problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_4048
|
rasdani/github-patches
|
git_diff
|
lutris__lutris-1136
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
"Manage Versions` window is too narrow
Recent improvements to Wine version managing are currently a bit pointless due to Manage Versions windows being too narrow and not being able to display the status of the downloading process:

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lutris/gui/runnerinstalldialog.py`
Content:
```
1 # pylint: disable=missing-docstring
2 import os
3 import random
4
5 from gi.repository import GLib, GObject, Gtk
6 from lutris import api, settings
7 from lutris.gui.dialogs import ErrorDialog, QuestionDialog
8 from lutris.gui.widgets.dialogs import Dialog
9 from lutris.util import jobs, system
10 from lutris.util.downloader import Downloader
11 from lutris.util.extract import extract_archive
12 from lutris.util.log import logger
13
14
15 class RunnerInstallDialog(Dialog):
16 COL_VER = 0
17 COL_ARCH = 1
18 COL_URL = 2
19 COL_INSTALLED = 3
20 COL_PROGRESS = 4
21
22 def __init__(self, title, parent, runner):
23 super(RunnerInstallDialog, self).__init__(
24 title, parent, 0, ('_OK', Gtk.ResponseType.OK)
25 )
26 width, height = (340, 380)
27 self.dialog_size = (width, height)
28 self.set_default_size(width, height)
29
30 self.runner = runner
31 self.runner_info = api.get_runners(self.runner)
32 if not self.runner_info:
33 ErrorDialog('Unable to get runner versions, check your internet connection',
34 parent=parent)
35 return
36 label = Gtk.Label("%s version management" % self.runner_info['name'])
37 self.vbox.add(label)
38 self.runner_store = self.get_store()
39 scrolled_window = Gtk.ScrolledWindow()
40 self.treeview = self.get_treeview(self.runner_store)
41 self.installing = {}
42 self.connect('response', self.on_response)
43
44 scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC,
45 Gtk.PolicyType.AUTOMATIC)
46 scrolled_window.set_shadow_type(Gtk.ShadowType.ETCHED_OUT)
47 scrolled_window.add(self.treeview)
48
49 self.vbox.pack_start(scrolled_window, True, True, 14)
50 self.show_all()
51
52 def get_treeview(self, model):
53 treeview = Gtk.TreeView(model=model)
54 treeview.set_headers_visible(False)
55
56 renderer_toggle = Gtk.CellRendererToggle()
57 renderer_text = Gtk.CellRendererText()
58 self.renderer_progress = Gtk.CellRendererProgress()
59
60 installed_column = Gtk.TreeViewColumn(None, renderer_toggle, active=3)
61 renderer_toggle.connect("toggled", self.on_installed_toggled)
62 treeview.append_column(installed_column)
63
64 version_column = Gtk.TreeViewColumn(None, renderer_text)
65 version_column.add_attribute(renderer_text, 'text', self.COL_VER)
66 version_column.set_property('min-width', 80)
67 treeview.append_column(version_column)
68
69 arch_column = Gtk.TreeViewColumn(None, renderer_text,
70 text=self.COL_ARCH)
71 arch_column.set_property('min-width', 50)
72 treeview.append_column(arch_column)
73
74 progress_column = Gtk.TreeViewColumn(None, self.renderer_progress,
75 value=self.COL_PROGRESS,
76 visible=self.COL_PROGRESS)
77 progress_column.set_property('fixed-width', 60)
78 progress_column.set_property('min-width', 60)
79 progress_column.set_property('resizable', False)
80 treeview.append_column(progress_column)
81
82 return treeview
83
84 def get_store(self):
85 liststore = Gtk.ListStore(str, str, str, bool, int)
86 for version_info in reversed(self.get_versions()):
87 version = version_info['version']
88 architecture = version_info['architecture']
89 progress = 0
90 is_installed = os.path.exists(
91 self.get_runner_path(version, architecture)
92 )
93 liststore.append(
94 [version_info['version'],
95 version_info['architecture'],
96 version_info['url'],
97 is_installed,
98 progress]
99 )
100 return liststore
101
102 def get_versions(self):
103 return self.runner_info['versions']
104
105 def get_runner_path(self, version, arch):
106 return os.path.join(settings.RUNNER_DIR, self.runner,
107 "{}-{}".format(version, arch))
108
109 def get_dest_path(self, row):
110 url = row[2]
111 filename = os.path.basename(url)
112 return os.path.join(settings.CACHE_DIR, filename)
113
114 def on_installed_toggled(self, widget, path):
115 row = self.runner_store[path]
116 if row[self.COL_VER] in self.installing:
117 confirm_dlg = QuestionDialog({
118 "question": "Do you want to cancel the download?",
119 "title": "Download starting"
120 })
121 if confirm_dlg.result == confirm_dlg.YES:
122 self.cancel_install(row)
123 elif row[self.COL_INSTALLED]:
124 self.uninstall_runner(row)
125 else:
126 self.install_runner(row)
127
128 def cancel_install(self, row):
129 self.installing[row[self.COL_VER]].cancel()
130 self.uninstall_runner(row)
131 row[self.COL_PROGRESS] = 0
132 self.installing.pop(row[self.COL_VER])
133
134 def uninstall_runner(self, row):
135 version = row[self.COL_VER]
136 arch = row[self.COL_ARCH]
137 system.remove_folder(self.get_runner_path(version, arch))
138 row[self.COL_INSTALLED] = False
139
140 def install_runner(self, row):
141 url = row[2]
142 logger.debug("Downloading %s", url)
143 dest_path = self.get_dest_path(row)
144 downloader = Downloader(url, dest_path, overwrite=True)
145 GLib.timeout_add(100, self.get_progress, downloader, row)
146 self.installing[row[self.COL_VER]] = downloader
147 downloader.start()
148
149 def get_progress(self, downloader, row):
150 if downloader.state == downloader.CANCELLED:
151 return False
152 if downloader.state == downloader.ERROR:
153 self.cancel_install(row)
154 return False
155 downloader.check_progress()
156 percent_downloaded = downloader.progress_percentage
157 if percent_downloaded >= 1:
158 row[4] = percent_downloaded
159 self.renderer_progress.props.pulse = -1
160 self.renderer_progress.props.text = "%d %%" % int(percent_downloaded)
161 else:
162 row[4] = 1
163 self.renderer_progress.props.pulse = random.randint(1, 100)
164 self.renderer_progress.props.text = "Downloading…"
165 if downloader.state == downloader.COMPLETED:
166 row[4] = 99
167 self.renderer_progress.props.text = "Extracting…"
168 self.on_runner_downloaded(row)
169 return False
170 return True
171
172 def on_runner_downloaded(self, row):
173 version = row[0]
174 architecture = row[1]
175 src = self.get_dest_path(row)
176 dst = self.get_runner_path(version, architecture)
177 jobs.AsyncCall(self.extract, self.on_extracted, src, dst, row)
178
179 def extract(self, src, dst, row):
180 extract_archive(src, dst)
181 return src, row
182
183 def on_extracted(self, xxx_todo_changeme, error):
184 (src, row) = xxx_todo_changeme
185 os.remove(src)
186 row[self.COL_PROGRESS] = 0
187 row[self.COL_INSTALLED] = True
188 self.renderer_progress.props.text = ""
189 self.installing.pop(row[self.COL_VER])
190
191 def on_response(self, dialog, response):
192 self.destroy()
193
194
195 if __name__ == "__main__":
196 import signal
197 signal.signal(signal.SIGINT, signal.SIG_DFL)
198 RunnerInstallDialog("test", None, "wine")
199 GObject.threads_init()
200 Gtk.main()
201
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lutris/gui/runnerinstalldialog.py b/lutris/gui/runnerinstalldialog.py
--- a/lutris/gui/runnerinstalldialog.py
+++ b/lutris/gui/runnerinstalldialog.py
@@ -23,7 +23,7 @@
super(RunnerInstallDialog, self).__init__(
title, parent, 0, ('_OK', Gtk.ResponseType.OK)
)
- width, height = (340, 380)
+ width, height = (460, 380)
self.dialog_size = (width, height)
self.set_default_size(width, height)
|
{"golden_diff": "diff --git a/lutris/gui/runnerinstalldialog.py b/lutris/gui/runnerinstalldialog.py\n--- a/lutris/gui/runnerinstalldialog.py\n+++ b/lutris/gui/runnerinstalldialog.py\n@@ -23,7 +23,7 @@\n super(RunnerInstallDialog, self).__init__(\n title, parent, 0, ('_OK', Gtk.ResponseType.OK)\n )\n- width, height = (340, 380)\n+ width, height = (460, 380)\n self.dialog_size = (width, height)\n self.set_default_size(width, height)\n", "issue": "\"Manage Versions` window is too narrow\nRecent improvements to Wine version managing are currently a bit pointless due to Manage Versions windows being too narrow and not being able to display the status of the downloading process:\r\n\r\n\r\n\n", "before_files": [{"content": "# pylint: disable=missing-docstring\nimport os\nimport random\n\nfrom gi.repository import GLib, GObject, Gtk\nfrom lutris import api, settings\nfrom lutris.gui.dialogs import ErrorDialog, QuestionDialog\nfrom lutris.gui.widgets.dialogs import Dialog\nfrom lutris.util import jobs, system\nfrom lutris.util.downloader import Downloader\nfrom lutris.util.extract import extract_archive\nfrom lutris.util.log import logger\n\n\nclass RunnerInstallDialog(Dialog):\n COL_VER = 0\n COL_ARCH = 1\n COL_URL = 2\n COL_INSTALLED = 3\n COL_PROGRESS = 4\n\n def __init__(self, title, parent, runner):\n super(RunnerInstallDialog, self).__init__(\n title, parent, 0, ('_OK', Gtk.ResponseType.OK)\n )\n width, height = (340, 380)\n self.dialog_size = (width, height)\n self.set_default_size(width, height)\n\n self.runner = runner\n self.runner_info = api.get_runners(self.runner)\n if not self.runner_info:\n ErrorDialog('Unable to get runner versions, check your internet connection',\n parent=parent)\n return\n label = Gtk.Label(\"%s version management\" % self.runner_info['name'])\n self.vbox.add(label)\n self.runner_store = self.get_store()\n scrolled_window = Gtk.ScrolledWindow()\n self.treeview = self.get_treeview(self.runner_store)\n self.installing = {}\n self.connect('response', self.on_response)\n\n scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC,\n Gtk.PolicyType.AUTOMATIC)\n scrolled_window.set_shadow_type(Gtk.ShadowType.ETCHED_OUT)\n scrolled_window.add(self.treeview)\n\n self.vbox.pack_start(scrolled_window, True, True, 14)\n self.show_all()\n\n def get_treeview(self, model):\n treeview = Gtk.TreeView(model=model)\n treeview.set_headers_visible(False)\n\n renderer_toggle = Gtk.CellRendererToggle()\n renderer_text = Gtk.CellRendererText()\n self.renderer_progress = Gtk.CellRendererProgress()\n\n installed_column = Gtk.TreeViewColumn(None, renderer_toggle, active=3)\n renderer_toggle.connect(\"toggled\", self.on_installed_toggled)\n treeview.append_column(installed_column)\n\n version_column = Gtk.TreeViewColumn(None, renderer_text)\n version_column.add_attribute(renderer_text, 'text', self.COL_VER)\n version_column.set_property('min-width', 80)\n treeview.append_column(version_column)\n\n arch_column = Gtk.TreeViewColumn(None, renderer_text,\n text=self.COL_ARCH)\n arch_column.set_property('min-width', 50)\n treeview.append_column(arch_column)\n\n progress_column = Gtk.TreeViewColumn(None, self.renderer_progress,\n value=self.COL_PROGRESS,\n visible=self.COL_PROGRESS)\n progress_column.set_property('fixed-width', 60)\n progress_column.set_property('min-width', 60)\n progress_column.set_property('resizable', False)\n treeview.append_column(progress_column)\n\n return treeview\n\n def get_store(self):\n liststore = Gtk.ListStore(str, str, str, bool, int)\n for version_info in reversed(self.get_versions()):\n version = version_info['version']\n architecture = version_info['architecture']\n progress = 0\n is_installed = os.path.exists(\n self.get_runner_path(version, architecture)\n )\n liststore.append(\n [version_info['version'],\n version_info['architecture'],\n version_info['url'],\n is_installed,\n progress]\n )\n return liststore\n\n def get_versions(self):\n return self.runner_info['versions']\n\n def get_runner_path(self, version, arch):\n return os.path.join(settings.RUNNER_DIR, self.runner,\n \"{}-{}\".format(version, arch))\n\n def get_dest_path(self, row):\n url = row[2]\n filename = os.path.basename(url)\n return os.path.join(settings.CACHE_DIR, filename)\n\n def on_installed_toggled(self, widget, path):\n row = self.runner_store[path]\n if row[self.COL_VER] in self.installing:\n confirm_dlg = QuestionDialog({\n \"question\": \"Do you want to cancel the download?\",\n \"title\": \"Download starting\"\n })\n if confirm_dlg.result == confirm_dlg.YES:\n self.cancel_install(row)\n elif row[self.COL_INSTALLED]:\n self.uninstall_runner(row)\n else:\n self.install_runner(row)\n\n def cancel_install(self, row):\n self.installing[row[self.COL_VER]].cancel()\n self.uninstall_runner(row)\n row[self.COL_PROGRESS] = 0\n self.installing.pop(row[self.COL_VER])\n\n def uninstall_runner(self, row):\n version = row[self.COL_VER]\n arch = row[self.COL_ARCH]\n system.remove_folder(self.get_runner_path(version, arch))\n row[self.COL_INSTALLED] = False\n\n def install_runner(self, row):\n url = row[2]\n logger.debug(\"Downloading %s\", url)\n dest_path = self.get_dest_path(row)\n downloader = Downloader(url, dest_path, overwrite=True)\n GLib.timeout_add(100, self.get_progress, downloader, row)\n self.installing[row[self.COL_VER]] = downloader\n downloader.start()\n\n def get_progress(self, downloader, row):\n if downloader.state == downloader.CANCELLED:\n return False\n if downloader.state == downloader.ERROR:\n self.cancel_install(row)\n return False\n downloader.check_progress()\n percent_downloaded = downloader.progress_percentage\n if percent_downloaded >= 1:\n row[4] = percent_downloaded\n self.renderer_progress.props.pulse = -1\n self.renderer_progress.props.text = \"%d %%\" % int(percent_downloaded)\n else:\n row[4] = 1\n self.renderer_progress.props.pulse = random.randint(1, 100)\n self.renderer_progress.props.text = \"Downloading\u2026\"\n if downloader.state == downloader.COMPLETED:\n row[4] = 99\n self.renderer_progress.props.text = \"Extracting\u2026\"\n self.on_runner_downloaded(row)\n return False\n return True\n\n def on_runner_downloaded(self, row):\n version = row[0]\n architecture = row[1]\n src = self.get_dest_path(row)\n dst = self.get_runner_path(version, architecture)\n jobs.AsyncCall(self.extract, self.on_extracted, src, dst, row)\n\n def extract(self, src, dst, row):\n extract_archive(src, dst)\n return src, row\n\n def on_extracted(self, xxx_todo_changeme, error):\n (src, row) = xxx_todo_changeme\n os.remove(src)\n row[self.COL_PROGRESS] = 0\n row[self.COL_INSTALLED] = True\n self.renderer_progress.props.text = \"\"\n self.installing.pop(row[self.COL_VER])\n\n def on_response(self, dialog, response):\n self.destroy()\n\n\nif __name__ == \"__main__\":\n import signal\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n RunnerInstallDialog(\"test\", None, \"wine\")\n GObject.threads_init()\n Gtk.main()\n", "path": "lutris/gui/runnerinstalldialog.py"}], "after_files": [{"content": "# pylint: disable=missing-docstring\nimport os\nimport random\n\nfrom gi.repository import GLib, GObject, Gtk\nfrom lutris import api, settings\nfrom lutris.gui.dialogs import ErrorDialog, QuestionDialog\nfrom lutris.gui.widgets.dialogs import Dialog\nfrom lutris.util import jobs, system\nfrom lutris.util.downloader import Downloader\nfrom lutris.util.extract import extract_archive\nfrom lutris.util.log import logger\n\n\nclass RunnerInstallDialog(Dialog):\n COL_VER = 0\n COL_ARCH = 1\n COL_URL = 2\n COL_INSTALLED = 3\n COL_PROGRESS = 4\n\n def __init__(self, title, parent, runner):\n super(RunnerInstallDialog, self).__init__(\n title, parent, 0, ('_OK', Gtk.ResponseType.OK)\n )\n width, height = (460, 380)\n self.dialog_size = (width, height)\n self.set_default_size(width, height)\n\n self.runner = runner\n self.runner_info = api.get_runners(self.runner)\n if not self.runner_info:\n ErrorDialog('Unable to get runner versions, check your internet connection',\n parent=parent)\n return\n label = Gtk.Label(\"%s version management\" % self.runner_info['name'])\n self.vbox.add(label)\n self.runner_store = self.get_store()\n scrolled_window = Gtk.ScrolledWindow()\n self.treeview = self.get_treeview(self.runner_store)\n self.installing = {}\n self.connect('response', self.on_response)\n\n scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC,\n Gtk.PolicyType.AUTOMATIC)\n scrolled_window.set_shadow_type(Gtk.ShadowType.ETCHED_OUT)\n scrolled_window.add(self.treeview)\n\n self.vbox.pack_start(scrolled_window, True, True, 14)\n self.show_all()\n\n def get_treeview(self, model):\n treeview = Gtk.TreeView(model=model)\n treeview.set_headers_visible(False)\n\n renderer_toggle = Gtk.CellRendererToggle()\n renderer_text = Gtk.CellRendererText()\n self.renderer_progress = Gtk.CellRendererProgress()\n\n installed_column = Gtk.TreeViewColumn(None, renderer_toggle, active=3)\n renderer_toggle.connect(\"toggled\", self.on_installed_toggled)\n treeview.append_column(installed_column)\n\n version_column = Gtk.TreeViewColumn(None, renderer_text)\n version_column.add_attribute(renderer_text, 'text', self.COL_VER)\n version_column.set_property('min-width', 80)\n treeview.append_column(version_column)\n\n arch_column = Gtk.TreeViewColumn(None, renderer_text,\n text=self.COL_ARCH)\n arch_column.set_property('min-width', 50)\n treeview.append_column(arch_column)\n\n progress_column = Gtk.TreeViewColumn(None, self.renderer_progress,\n value=self.COL_PROGRESS,\n visible=self.COL_PROGRESS)\n progress_column.set_property('fixed-width', 60)\n progress_column.set_property('min-width', 60)\n progress_column.set_property('resizable', False)\n treeview.append_column(progress_column)\n\n return treeview\n\n def get_store(self):\n liststore = Gtk.ListStore(str, str, str, bool, int)\n for version_info in reversed(self.get_versions()):\n version = version_info['version']\n architecture = version_info['architecture']\n progress = 0\n is_installed = os.path.exists(\n self.get_runner_path(version, architecture)\n )\n liststore.append(\n [version_info['version'],\n version_info['architecture'],\n version_info['url'],\n is_installed,\n progress]\n )\n return liststore\n\n def get_versions(self):\n return self.runner_info['versions']\n\n def get_runner_path(self, version, arch):\n return os.path.join(settings.RUNNER_DIR, self.runner,\n \"{}-{}\".format(version, arch))\n\n def get_dest_path(self, row):\n url = row[2]\n filename = os.path.basename(url)\n return os.path.join(settings.CACHE_DIR, filename)\n\n def on_installed_toggled(self, widget, path):\n row = self.runner_store[path]\n if row[self.COL_VER] in self.installing:\n confirm_dlg = QuestionDialog({\n \"question\": \"Do you want to cancel the download?\",\n \"title\": \"Download starting\"\n })\n if confirm_dlg.result == confirm_dlg.YES:\n self.cancel_install(row)\n elif row[self.COL_INSTALLED]:\n self.uninstall_runner(row)\n else:\n self.install_runner(row)\n\n def cancel_install(self, row):\n self.installing[row[self.COL_VER]].cancel()\n self.uninstall_runner(row)\n row[self.COL_PROGRESS] = 0\n self.installing.pop(row[self.COL_VER])\n\n def uninstall_runner(self, row):\n version = row[self.COL_VER]\n arch = row[self.COL_ARCH]\n system.remove_folder(self.get_runner_path(version, arch))\n row[self.COL_INSTALLED] = False\n\n def install_runner(self, row):\n url = row[2]\n logger.debug(\"Downloading %s\", url)\n dest_path = self.get_dest_path(row)\n downloader = Downloader(url, dest_path, overwrite=True)\n GLib.timeout_add(100, self.get_progress, downloader, row)\n self.installing[row[self.COL_VER]] = downloader\n downloader.start()\n\n def get_progress(self, downloader, row):\n if downloader.state == downloader.CANCELLED:\n return False\n if downloader.state == downloader.ERROR:\n self.cancel_install(row)\n return False\n downloader.check_progress()\n percent_downloaded = downloader.progress_percentage\n if percent_downloaded >= 1:\n row[4] = percent_downloaded\n self.renderer_progress.props.pulse = -1\n self.renderer_progress.props.text = \"%d %%\" % int(percent_downloaded)\n else:\n row[4] = 1\n self.renderer_progress.props.pulse = random.randint(1, 100)\n self.renderer_progress.props.text = \"Downloading\u2026\"\n if downloader.state == downloader.COMPLETED:\n row[4] = 99\n self.renderer_progress.props.text = \"Extracting\u2026\"\n self.on_runner_downloaded(row)\n return False\n return True\n\n def on_runner_downloaded(self, row):\n version = row[0]\n architecture = row[1]\n src = self.get_dest_path(row)\n dst = self.get_runner_path(version, architecture)\n jobs.AsyncCall(self.extract, self.on_extracted, src, dst, row)\n\n def extract(self, src, dst, row):\n extract_archive(src, dst)\n return src, row\n\n def on_extracted(self, xxx_todo_changeme, error):\n (src, row) = xxx_todo_changeme\n os.remove(src)\n row[self.COL_PROGRESS] = 0\n row[self.COL_INSTALLED] = True\n self.renderer_progress.props.text = \"\"\n self.installing.pop(row[self.COL_VER])\n\n def on_response(self, dialog, response):\n self.destroy()\n\n\nif __name__ == \"__main__\":\n import signal\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n RunnerInstallDialog(\"test\", None, \"wine\")\n GObject.threads_init()\n Gtk.main()\n", "path": "lutris/gui/runnerinstalldialog.py"}]}
| 2,454 | 149 |
gh_patches_debug_37740
|
rasdani/github-patches
|
git_diff
|
mitmproxy__mitmproxy-6719
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The mitmproxy program failed to start because the default port 8080 was occupied.
#### Problem Description
Because the default port 8080 is occupied, the mitmproxy program fails to start, and there is no output reason for the failure.
#### Steps to reproduce the behavior:
1. Listen on port 8080 using the nc command in a terminal window.
2. Start the mitmproxy program in another terminal window.
3. The mitmproxy program failed to start, and there was no output reason for the failure, and the normal terminal configuration was not restored.

#### System Information
Mitmproxy: 10.2.3 binary
Python: 3.12.2
OpenSSL: OpenSSL 3.2.1 30 Jan 2024
Platform: macOS-14.2.1-arm64-arm-64bit
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mitmproxy/master.py`
Content:
```
1 import asyncio
2 import logging
3
4 from . import ctx as mitmproxy_ctx
5 from .addons import termlog
6 from .proxy.mode_specs import ReverseMode
7 from .utils import asyncio_utils
8 from mitmproxy import addonmanager
9 from mitmproxy import command
10 from mitmproxy import eventsequence
11 from mitmproxy import hooks
12 from mitmproxy import http
13 from mitmproxy import log
14 from mitmproxy import options
15
16 logger = logging.getLogger(__name__)
17
18
19 class Master:
20 """
21 The master handles mitmproxy's main event loop.
22 """
23
24 event_loop: asyncio.AbstractEventLoop
25 _termlog_addon: termlog.TermLog | None = None
26
27 def __init__(
28 self,
29 opts: options.Options,
30 event_loop: asyncio.AbstractEventLoop | None = None,
31 with_termlog: bool = False,
32 ):
33 self.options: options.Options = opts or options.Options()
34 self.commands = command.CommandManager(self)
35 self.addons = addonmanager.AddonManager(self)
36
37 if with_termlog:
38 self._termlog_addon = termlog.TermLog()
39 self.addons.add(self._termlog_addon)
40
41 self.log = log.Log(self) # deprecated, do not use.
42 self._legacy_log_events = log.LegacyLogEvents(self)
43 self._legacy_log_events.install()
44
45 # We expect an active event loop here already because some addons
46 # may want to spawn tasks during the initial configuration phase,
47 # which happens before run().
48 self.event_loop = event_loop or asyncio.get_running_loop()
49 self.should_exit = asyncio.Event()
50 mitmproxy_ctx.master = self
51 mitmproxy_ctx.log = self.log # deprecated, do not use.
52 mitmproxy_ctx.options = self.options
53
54 async def run(self) -> None:
55 with (
56 asyncio_utils.install_exception_handler(self._asyncio_exception_handler),
57 asyncio_utils.set_eager_task_factory(),
58 ):
59 self.should_exit.clear()
60
61 if ec := self.addons.get("errorcheck"):
62 await ec.shutdown_if_errored()
63 if ps := self.addons.get("proxyserver"):
64 # This may block for some proxy modes, so we also monitor should_exit.
65 await asyncio.wait(
66 [
67 asyncio.create_task(ps.setup_servers()),
68 asyncio.create_task(self.should_exit.wait()),
69 ],
70 return_when=asyncio.FIRST_COMPLETED,
71 )
72 await self.running()
73 if ec := self.addons.get("errorcheck"):
74 await ec.shutdown_if_errored()
75 ec.finish()
76 try:
77 await self.should_exit.wait()
78 finally:
79 # .wait might be cancelled (e.g. by sys.exit)
80 await self.done()
81
82 def shutdown(self):
83 """
84 Shut down the proxy. This method is thread-safe.
85 """
86 # We may add an exception argument here.
87 self.event_loop.call_soon_threadsafe(self.should_exit.set)
88
89 async def running(self) -> None:
90 await self.addons.trigger_event(hooks.RunningHook())
91
92 async def done(self) -> None:
93 await self.addons.trigger_event(hooks.DoneHook())
94 self._legacy_log_events.uninstall()
95 if self._termlog_addon is not None:
96 self._termlog_addon.uninstall()
97
98 def _asyncio_exception_handler(self, loop, context) -> None:
99 try:
100 exc: Exception = context["exception"]
101 except KeyError:
102 logger.error(f"Unhandled asyncio error: {context}")
103 else:
104 if isinstance(exc, OSError) and exc.errno == 10038:
105 return # suppress https://bugs.python.org/issue43253
106 logger.error(
107 "Unhandled error in task.",
108 exc_info=(type(exc), exc, exc.__traceback__),
109 )
110
111 async def load_flow(self, f):
112 """
113 Loads a flow
114 """
115
116 if (
117 isinstance(f, http.HTTPFlow)
118 and len(self.options.mode) == 1
119 and self.options.mode[0].startswith("reverse:")
120 ):
121 # When we load flows in reverse proxy mode, we adjust the target host to
122 # the reverse proxy destination for all flows we load. This makes it very
123 # easy to replay saved flows against a different host.
124 # We may change this in the future so that clientplayback always replays to the first mode.
125 mode = ReverseMode.parse(self.options.mode[0])
126 assert isinstance(mode, ReverseMode)
127 f.request.host, f.request.port, *_ = mode.address
128 f.request.scheme = mode.scheme
129
130 for e in eventsequence.iterate(f):
131 await self.addons.handle_lifecycle(e)
132
```
Path: `mitmproxy/addons/errorcheck.py`
Content:
```
1 import asyncio
2 import logging
3 import sys
4
5 from mitmproxy import log
6
7
8 class ErrorCheck:
9 """Monitor startup for error log entries, and terminate immediately if there are some."""
10
11 repeat_errors_on_stderr: bool
12 """
13 Repeat all errors on stderr before exiting.
14 This is useful for the console UI, which otherwise swallows all output.
15 """
16
17 def __init__(self, repeat_errors_on_stderr: bool = False) -> None:
18 self.repeat_errors_on_stderr = repeat_errors_on_stderr
19
20 self.logger = ErrorCheckHandler()
21 self.logger.install()
22
23 def finish(self):
24 self.logger.uninstall()
25
26 async def shutdown_if_errored(self):
27 # don't run immediately, wait for all logging tasks to finish.
28 await asyncio.sleep(0)
29 if self.logger.has_errored:
30 plural = "s" if len(self.logger.has_errored) > 1 else ""
31 if self.repeat_errors_on_stderr:
32 msg = "\n".join(self.logger.format(r) for r in self.logger.has_errored)
33 print(f"Error{plural} logged during startup:\n{msg}", file=sys.stderr)
34 else:
35 print(
36 f"Error{plural} logged during startup, exiting...", file=sys.stderr
37 )
38
39 sys.exit(1)
40
41
42 class ErrorCheckHandler(log.MitmLogHandler):
43 def __init__(self) -> None:
44 super().__init__(logging.ERROR)
45 self.has_errored: list[logging.LogRecord] = []
46
47 def emit(self, record: logging.LogRecord) -> None:
48 self.has_errored.append(record)
49
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mitmproxy/addons/errorcheck.py b/mitmproxy/addons/errorcheck.py
--- a/mitmproxy/addons/errorcheck.py
+++ b/mitmproxy/addons/errorcheck.py
@@ -3,6 +3,8 @@
import sys
from mitmproxy import log
+from mitmproxy.contrib import click as miniclick
+from mitmproxy.utils import vt_codes
class ErrorCheck:
@@ -29,8 +31,13 @@
if self.logger.has_errored:
plural = "s" if len(self.logger.has_errored) > 1 else ""
if self.repeat_errors_on_stderr:
- msg = "\n".join(self.logger.format(r) for r in self.logger.has_errored)
- print(f"Error{plural} logged during startup:\n{msg}", file=sys.stderr)
+ message = f"Error{plural} logged during startup:"
+ if vt_codes.ensure_supported(sys.stderr): # pragma: no cover
+ message = miniclick.style(message, fg="red")
+ details = "\n".join(
+ self.logger.format(r) for r in self.logger.has_errored
+ )
+ print(f"{message}\n{details}", file=sys.stderr)
else:
print(
f"Error{plural} logged during startup, exiting...", file=sys.stderr
diff --git a/mitmproxy/master.py b/mitmproxy/master.py
--- a/mitmproxy/master.py
+++ b/mitmproxy/master.py
@@ -58,6 +58,7 @@
):
self.should_exit.clear()
+ # Can we exit before even bringing up servers?
if ec := self.addons.get("errorcheck"):
await ec.shutdown_if_errored()
if ps := self.addons.get("proxyserver"):
@@ -69,14 +70,23 @@
],
return_when=asyncio.FIRST_COMPLETED,
)
- await self.running()
- if ec := self.addons.get("errorcheck"):
- await ec.shutdown_if_errored()
- ec.finish()
+ if self.should_exit.is_set():
+ return
+ # Did bringing up servers fail?
+ if ec := self.addons.get("errorcheck"):
+ await ec.shutdown_if_errored()
+
try:
+ await self.running()
+ # Any errors in the final part of startup?
+ if ec := self.addons.get("errorcheck"):
+ await ec.shutdown_if_errored()
+ ec.finish()
+
await self.should_exit.wait()
finally:
- # .wait might be cancelled (e.g. by sys.exit)
+ # if running() was called, we also always want to call done().
+ # .wait might be cancelled (e.g. by sys.exit), so this needs to be in a finally block.
await self.done()
def shutdown(self):
|
{"golden_diff": "diff --git a/mitmproxy/addons/errorcheck.py b/mitmproxy/addons/errorcheck.py\n--- a/mitmproxy/addons/errorcheck.py\n+++ b/mitmproxy/addons/errorcheck.py\n@@ -3,6 +3,8 @@\n import sys\n \n from mitmproxy import log\n+from mitmproxy.contrib import click as miniclick\n+from mitmproxy.utils import vt_codes\n \n \n class ErrorCheck:\n@@ -29,8 +31,13 @@\n if self.logger.has_errored:\n plural = \"s\" if len(self.logger.has_errored) > 1 else \"\"\n if self.repeat_errors_on_stderr:\n- msg = \"\\n\".join(self.logger.format(r) for r in self.logger.has_errored)\n- print(f\"Error{plural} logged during startup:\\n{msg}\", file=sys.stderr)\n+ message = f\"Error{plural} logged during startup:\"\n+ if vt_codes.ensure_supported(sys.stderr): # pragma: no cover\n+ message = miniclick.style(message, fg=\"red\")\n+ details = \"\\n\".join(\n+ self.logger.format(r) for r in self.logger.has_errored\n+ )\n+ print(f\"{message}\\n{details}\", file=sys.stderr)\n else:\n print(\n f\"Error{plural} logged during startup, exiting...\", file=sys.stderr\ndiff --git a/mitmproxy/master.py b/mitmproxy/master.py\n--- a/mitmproxy/master.py\n+++ b/mitmproxy/master.py\n@@ -58,6 +58,7 @@\n ):\n self.should_exit.clear()\n \n+ # Can we exit before even bringing up servers?\n if ec := self.addons.get(\"errorcheck\"):\n await ec.shutdown_if_errored()\n if ps := self.addons.get(\"proxyserver\"):\n@@ -69,14 +70,23 @@\n ],\n return_when=asyncio.FIRST_COMPLETED,\n )\n- await self.running()\n- if ec := self.addons.get(\"errorcheck\"):\n- await ec.shutdown_if_errored()\n- ec.finish()\n+ if self.should_exit.is_set():\n+ return\n+ # Did bringing up servers fail?\n+ if ec := self.addons.get(\"errorcheck\"):\n+ await ec.shutdown_if_errored()\n+\n try:\n+ await self.running()\n+ # Any errors in the final part of startup?\n+ if ec := self.addons.get(\"errorcheck\"):\n+ await ec.shutdown_if_errored()\n+ ec.finish()\n+\n await self.should_exit.wait()\n finally:\n- # .wait might be cancelled (e.g. by sys.exit)\n+ # if running() was called, we also always want to call done().\n+ # .wait might be cancelled (e.g. by sys.exit), so this needs to be in a finally block.\n await self.done()\n \n def shutdown(self):\n", "issue": "The mitmproxy program failed to start because the default port 8080 was occupied.\n#### Problem Description\r\nBecause the default port 8080 is occupied, the mitmproxy program fails to start, and there is no output reason for the failure.\r\n\r\n#### Steps to reproduce the behavior:\r\n1. Listen on port 8080 using the nc command in a terminal window.\r\n2. Start the mitmproxy program in another terminal window.\r\n3. The mitmproxy program failed to start, and there was no output reason for the failure, and the normal terminal configuration was not restored.\r\n\r\n\r\n\r\n\r\n#### System Information\r\nMitmproxy: 10.2.3 binary\r\nPython: 3.12.2\r\nOpenSSL: OpenSSL 3.2.1 30 Jan 2024\r\nPlatform: macOS-14.2.1-arm64-arm-64bit\r\n\n", "before_files": [{"content": "import asyncio\nimport logging\n\nfrom . import ctx as mitmproxy_ctx\nfrom .addons import termlog\nfrom .proxy.mode_specs import ReverseMode\nfrom .utils import asyncio_utils\nfrom mitmproxy import addonmanager\nfrom mitmproxy import command\nfrom mitmproxy import eventsequence\nfrom mitmproxy import hooks\nfrom mitmproxy import http\nfrom mitmproxy import log\nfrom mitmproxy import options\n\nlogger = logging.getLogger(__name__)\n\n\nclass Master:\n \"\"\"\n The master handles mitmproxy's main event loop.\n \"\"\"\n\n event_loop: asyncio.AbstractEventLoop\n _termlog_addon: termlog.TermLog | None = None\n\n def __init__(\n self,\n opts: options.Options,\n event_loop: asyncio.AbstractEventLoop | None = None,\n with_termlog: bool = False,\n ):\n self.options: options.Options = opts or options.Options()\n self.commands = command.CommandManager(self)\n self.addons = addonmanager.AddonManager(self)\n\n if with_termlog:\n self._termlog_addon = termlog.TermLog()\n self.addons.add(self._termlog_addon)\n\n self.log = log.Log(self) # deprecated, do not use.\n self._legacy_log_events = log.LegacyLogEvents(self)\n self._legacy_log_events.install()\n\n # We expect an active event loop here already because some addons\n # may want to spawn tasks during the initial configuration phase,\n # which happens before run().\n self.event_loop = event_loop or asyncio.get_running_loop()\n self.should_exit = asyncio.Event()\n mitmproxy_ctx.master = self\n mitmproxy_ctx.log = self.log # deprecated, do not use.\n mitmproxy_ctx.options = self.options\n\n async def run(self) -> None:\n with (\n asyncio_utils.install_exception_handler(self._asyncio_exception_handler),\n asyncio_utils.set_eager_task_factory(),\n ):\n self.should_exit.clear()\n\n if ec := self.addons.get(\"errorcheck\"):\n await ec.shutdown_if_errored()\n if ps := self.addons.get(\"proxyserver\"):\n # This may block for some proxy modes, so we also monitor should_exit.\n await asyncio.wait(\n [\n asyncio.create_task(ps.setup_servers()),\n asyncio.create_task(self.should_exit.wait()),\n ],\n return_when=asyncio.FIRST_COMPLETED,\n )\n await self.running()\n if ec := self.addons.get(\"errorcheck\"):\n await ec.shutdown_if_errored()\n ec.finish()\n try:\n await self.should_exit.wait()\n finally:\n # .wait might be cancelled (e.g. by sys.exit)\n await self.done()\n\n def shutdown(self):\n \"\"\"\n Shut down the proxy. This method is thread-safe.\n \"\"\"\n # We may add an exception argument here.\n self.event_loop.call_soon_threadsafe(self.should_exit.set)\n\n async def running(self) -> None:\n await self.addons.trigger_event(hooks.RunningHook())\n\n async def done(self) -> None:\n await self.addons.trigger_event(hooks.DoneHook())\n self._legacy_log_events.uninstall()\n if self._termlog_addon is not None:\n self._termlog_addon.uninstall()\n\n def _asyncio_exception_handler(self, loop, context) -> None:\n try:\n exc: Exception = context[\"exception\"]\n except KeyError:\n logger.error(f\"Unhandled asyncio error: {context}\")\n else:\n if isinstance(exc, OSError) and exc.errno == 10038:\n return # suppress https://bugs.python.org/issue43253\n logger.error(\n \"Unhandled error in task.\",\n exc_info=(type(exc), exc, exc.__traceback__),\n )\n\n async def load_flow(self, f):\n \"\"\"\n Loads a flow\n \"\"\"\n\n if (\n isinstance(f, http.HTTPFlow)\n and len(self.options.mode) == 1\n and self.options.mode[0].startswith(\"reverse:\")\n ):\n # When we load flows in reverse proxy mode, we adjust the target host to\n # the reverse proxy destination for all flows we load. This makes it very\n # easy to replay saved flows against a different host.\n # We may change this in the future so that clientplayback always replays to the first mode.\n mode = ReverseMode.parse(self.options.mode[0])\n assert isinstance(mode, ReverseMode)\n f.request.host, f.request.port, *_ = mode.address\n f.request.scheme = mode.scheme\n\n for e in eventsequence.iterate(f):\n await self.addons.handle_lifecycle(e)\n", "path": "mitmproxy/master.py"}, {"content": "import asyncio\nimport logging\nimport sys\n\nfrom mitmproxy import log\n\n\nclass ErrorCheck:\n \"\"\"Monitor startup for error log entries, and terminate immediately if there are some.\"\"\"\n\n repeat_errors_on_stderr: bool\n \"\"\"\n Repeat all errors on stderr before exiting.\n This is useful for the console UI, which otherwise swallows all output.\n \"\"\"\n\n def __init__(self, repeat_errors_on_stderr: bool = False) -> None:\n self.repeat_errors_on_stderr = repeat_errors_on_stderr\n\n self.logger = ErrorCheckHandler()\n self.logger.install()\n\n def finish(self):\n self.logger.uninstall()\n\n async def shutdown_if_errored(self):\n # don't run immediately, wait for all logging tasks to finish.\n await asyncio.sleep(0)\n if self.logger.has_errored:\n plural = \"s\" if len(self.logger.has_errored) > 1 else \"\"\n if self.repeat_errors_on_stderr:\n msg = \"\\n\".join(self.logger.format(r) for r in self.logger.has_errored)\n print(f\"Error{plural} logged during startup:\\n{msg}\", file=sys.stderr)\n else:\n print(\n f\"Error{plural} logged during startup, exiting...\", file=sys.stderr\n )\n\n sys.exit(1)\n\n\nclass ErrorCheckHandler(log.MitmLogHandler):\n def __init__(self) -> None:\n super().__init__(logging.ERROR)\n self.has_errored: list[logging.LogRecord] = []\n\n def emit(self, record: logging.LogRecord) -> None:\n self.has_errored.append(record)\n", "path": "mitmproxy/addons/errorcheck.py"}], "after_files": [{"content": "import asyncio\nimport logging\n\nfrom . import ctx as mitmproxy_ctx\nfrom .addons import termlog\nfrom .proxy.mode_specs import ReverseMode\nfrom .utils import asyncio_utils\nfrom mitmproxy import addonmanager\nfrom mitmproxy import command\nfrom mitmproxy import eventsequence\nfrom mitmproxy import hooks\nfrom mitmproxy import http\nfrom mitmproxy import log\nfrom mitmproxy import options\n\nlogger = logging.getLogger(__name__)\n\n\nclass Master:\n \"\"\"\n The master handles mitmproxy's main event loop.\n \"\"\"\n\n event_loop: asyncio.AbstractEventLoop\n _termlog_addon: termlog.TermLog | None = None\n\n def __init__(\n self,\n opts: options.Options,\n event_loop: asyncio.AbstractEventLoop | None = None,\n with_termlog: bool = False,\n ):\n self.options: options.Options = opts or options.Options()\n self.commands = command.CommandManager(self)\n self.addons = addonmanager.AddonManager(self)\n\n if with_termlog:\n self._termlog_addon = termlog.TermLog()\n self.addons.add(self._termlog_addon)\n\n self.log = log.Log(self) # deprecated, do not use.\n self._legacy_log_events = log.LegacyLogEvents(self)\n self._legacy_log_events.install()\n\n # We expect an active event loop here already because some addons\n # may want to spawn tasks during the initial configuration phase,\n # which happens before run().\n self.event_loop = event_loop or asyncio.get_running_loop()\n self.should_exit = asyncio.Event()\n mitmproxy_ctx.master = self\n mitmproxy_ctx.log = self.log # deprecated, do not use.\n mitmproxy_ctx.options = self.options\n\n async def run(self) -> None:\n with (\n asyncio_utils.install_exception_handler(self._asyncio_exception_handler),\n asyncio_utils.set_eager_task_factory(),\n ):\n self.should_exit.clear()\n\n # Can we exit before even bringing up servers?\n if ec := self.addons.get(\"errorcheck\"):\n await ec.shutdown_if_errored()\n if ps := self.addons.get(\"proxyserver\"):\n # This may block for some proxy modes, so we also monitor should_exit.\n await asyncio.wait(\n [\n asyncio.create_task(ps.setup_servers()),\n asyncio.create_task(self.should_exit.wait()),\n ],\n return_when=asyncio.FIRST_COMPLETED,\n )\n if self.should_exit.is_set():\n return\n # Did bringing up servers fail?\n if ec := self.addons.get(\"errorcheck\"):\n await ec.shutdown_if_errored()\n\n try:\n await self.running()\n # Any errors in the final part of startup?\n if ec := self.addons.get(\"errorcheck\"):\n await ec.shutdown_if_errored()\n ec.finish()\n\n await self.should_exit.wait()\n finally:\n # if running() was called, we also always want to call done().\n # .wait might be cancelled (e.g. by sys.exit), so this needs to be in a finally block.\n await self.done()\n\n def shutdown(self):\n \"\"\"\n Shut down the proxy. This method is thread-safe.\n \"\"\"\n # We may add an exception argument here.\n self.event_loop.call_soon_threadsafe(self.should_exit.set)\n\n async def running(self) -> None:\n await self.addons.trigger_event(hooks.RunningHook())\n\n async def done(self) -> None:\n await self.addons.trigger_event(hooks.DoneHook())\n self._legacy_log_events.uninstall()\n if self._termlog_addon is not None:\n self._termlog_addon.uninstall()\n\n def _asyncio_exception_handler(self, loop, context) -> None:\n try:\n exc: Exception = context[\"exception\"]\n except KeyError:\n logger.error(f\"Unhandled asyncio error: {context}\")\n else:\n if isinstance(exc, OSError) and exc.errno == 10038:\n return # suppress https://bugs.python.org/issue43253\n logger.error(\n \"Unhandled error in task.\",\n exc_info=(type(exc), exc, exc.__traceback__),\n )\n\n async def load_flow(self, f):\n \"\"\"\n Loads a flow\n \"\"\"\n\n if (\n isinstance(f, http.HTTPFlow)\n and len(self.options.mode) == 1\n and self.options.mode[0].startswith(\"reverse:\")\n ):\n # When we load flows in reverse proxy mode, we adjust the target host to\n # the reverse proxy destination for all flows we load. This makes it very\n # easy to replay saved flows against a different host.\n # We may change this in the future so that clientplayback always replays to the first mode.\n mode = ReverseMode.parse(self.options.mode[0])\n assert isinstance(mode, ReverseMode)\n f.request.host, f.request.port, *_ = mode.address\n f.request.scheme = mode.scheme\n\n for e in eventsequence.iterate(f):\n await self.addons.handle_lifecycle(e)\n", "path": "mitmproxy/master.py"}, {"content": "import asyncio\nimport logging\nimport sys\n\nfrom mitmproxy import log\nfrom mitmproxy.contrib import click as miniclick\nfrom mitmproxy.utils import vt_codes\n\n\nclass ErrorCheck:\n \"\"\"Monitor startup for error log entries, and terminate immediately if there are some.\"\"\"\n\n repeat_errors_on_stderr: bool\n \"\"\"\n Repeat all errors on stderr before exiting.\n This is useful for the console UI, which otherwise swallows all output.\n \"\"\"\n\n def __init__(self, repeat_errors_on_stderr: bool = False) -> None:\n self.repeat_errors_on_stderr = repeat_errors_on_stderr\n\n self.logger = ErrorCheckHandler()\n self.logger.install()\n\n def finish(self):\n self.logger.uninstall()\n\n async def shutdown_if_errored(self):\n # don't run immediately, wait for all logging tasks to finish.\n await asyncio.sleep(0)\n if self.logger.has_errored:\n plural = \"s\" if len(self.logger.has_errored) > 1 else \"\"\n if self.repeat_errors_on_stderr:\n message = f\"Error{plural} logged during startup:\"\n if vt_codes.ensure_supported(sys.stderr): # pragma: no cover\n message = miniclick.style(message, fg=\"red\")\n details = \"\\n\".join(\n self.logger.format(r) for r in self.logger.has_errored\n )\n print(f\"{message}\\n{details}\", file=sys.stderr)\n else:\n print(\n f\"Error{plural} logged during startup, exiting...\", file=sys.stderr\n )\n\n sys.exit(1)\n\n\nclass ErrorCheckHandler(log.MitmLogHandler):\n def __init__(self) -> None:\n super().__init__(logging.ERROR)\n self.has_errored: list[logging.LogRecord] = []\n\n def emit(self, record: logging.LogRecord) -> None:\n self.has_errored.append(record)\n", "path": "mitmproxy/addons/errorcheck.py"}]}
| 2,252 | 635 |
gh_patches_debug_214
|
rasdani/github-patches
|
git_diff
|
DDMAL__CantusDB-328
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Representation of Notation objects
When I recently tried to edit a source, I was presented with an error message, and found that I was missing several required fields, including this one: 
Notation objects are currently pretty inscrutable. They should be represented such that at least their `name` property is visible.
Larger question: why do we have notation objects at all? Currently, the notation model has only one property: `name`. Could this information in Source objects not be more simply represented by a CharField? Is using Notation objects simply the way things were done in OldCantus? Are we using them to ensure standardization among multiple Sources?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `django/cantusdb_project/main_app/models/notation.py`
Content:
```
1 from django.db import models
2 from main_app.models import BaseModel
3
4
5 class Notation(BaseModel):
6 name = models.CharField(max_length=63)
7
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/django/cantusdb_project/main_app/models/notation.py b/django/cantusdb_project/main_app/models/notation.py
--- a/django/cantusdb_project/main_app/models/notation.py
+++ b/django/cantusdb_project/main_app/models/notation.py
@@ -4,3 +4,5 @@
class Notation(BaseModel):
name = models.CharField(max_length=63)
+ def __str__(self):
+ return f"{self.name} ({self.id})"
\ No newline at end of file
|
{"golden_diff": "diff --git a/django/cantusdb_project/main_app/models/notation.py b/django/cantusdb_project/main_app/models/notation.py\n--- a/django/cantusdb_project/main_app/models/notation.py\n+++ b/django/cantusdb_project/main_app/models/notation.py\n@@ -4,3 +4,5 @@\n \n class Notation(BaseModel):\n name = models.CharField(max_length=63)\n+ def __str__(self):\n+ return f\"{self.name} ({self.id})\"\n\\ No newline at end of file\n", "issue": "Representation of Notation objects\nWhen I recently tried to edit a source, I was presented with an error message, and found that I was missing several required fields, including this one: \r\nNotation objects are currently pretty inscrutable. They should be represented such that at least their `name` property is visible.\r\n\r\nLarger question: why do we have notation objects at all? Currently, the notation model has only one property: `name`. Could this information in Source objects not be more simply represented by a CharField? Is using Notation objects simply the way things were done in OldCantus? Are we using them to ensure standardization among multiple Sources?\n", "before_files": [{"content": "from django.db import models\nfrom main_app.models import BaseModel\n\n\nclass Notation(BaseModel):\n name = models.CharField(max_length=63)\n", "path": "django/cantusdb_project/main_app/models/notation.py"}], "after_files": [{"content": "from django.db import models\nfrom main_app.models import BaseModel\n\n\nclass Notation(BaseModel):\n name = models.CharField(max_length=63)\n def __str__(self):\n return f\"{self.name} ({self.id})\"", "path": "django/cantusdb_project/main_app/models/notation.py"}]}
| 523 | 120 |
gh_patches_debug_19368
|
rasdani/github-patches
|
git_diff
|
sopel-irc__sopel-610
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remind is case sensitive
[22:20] <Eyore> .in 3SEC stuff
[22:20] @Willie Eyore: Sorry, didn't understand the input.
[22:20] <Eyore> .in 3 SEC stuff
[22:20] @Willie Eyore: Sorry, didn't understand the input.
[22:20] <Eyore> .in 3 S stuff
[22:20] @Willie Eyore: Sorry, didn't understand the input.
[22:20] <Eyore> .in 3 MIN stuff
[22:20] @Willie Eyore: Sorry, didn't understand the input.
[22:20] <Eyore> .in 3 Min stuff
[22:20] @Willie Eyore: Sorry, didn't understand the input.
I'd fix it myself, but looking at the code, I'm not sure where to put the .lower()
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `willie/modules/remind.py`
Content:
```
1 # coding=utf8
2 """
3 remind.py - Willie Reminder Module
4 Copyright 2011, Sean B. Palmer, inamidst.com
5 Licensed under the Eiffel Forum License 2.
6
7 http://willie.dftba.net
8 """
9 from __future__ import unicode_literals
10
11 import os
12 import re
13 import time
14 import threading
15 import collections
16 import codecs
17 from datetime import datetime
18 from willie.module import commands, example, NOLIMIT
19 import willie.tools
20
21 try:
22 import pytz
23 except:
24 pytz = None
25
26 def filename(self):
27 name = self.nick + '-' + self.config.host + '.reminders.db'
28 return os.path.join(self.config.dotdir, name)
29
30
31 def load_database(name):
32 data = {}
33 if os.path.isfile(name):
34 f = codecs.open(name, 'r', encoding='utf-8')
35 for line in f:
36 unixtime, channel, nick, message = line.split('\t')
37 message = message.rstrip('\n')
38 t = int(float(unixtime)) # WTFs going on here?
39 reminder = (channel, nick, message)
40 try:
41 data[t].append(reminder)
42 except KeyError:
43 data[t] = [reminder]
44 f.close()
45 return data
46
47
48 def dump_database(name, data):
49 f = codecs.open(name, 'w', encoding='utf-8')
50 for unixtime, reminders in willie.tools.iteritems(data):
51 for channel, nick, message in reminders:
52 f.write('%s\t%s\t%s\t%s\n' % (unixtime, channel, nick, message))
53 f.close()
54
55
56 def setup(bot):
57 bot.rfn = filename(bot)
58 bot.rdb = load_database(bot.rfn)
59
60 def monitor(bot):
61 time.sleep(5)
62 while True:
63 now = int(time.time())
64 unixtimes = [int(key) for key in bot.rdb]
65 oldtimes = [t for t in unixtimes if t <= now]
66 if oldtimes:
67 for oldtime in oldtimes:
68 for (channel, nick, message) in bot.rdb[oldtime]:
69 if message:
70 bot.msg(channel, nick + ': ' + message)
71 else:
72 bot.msg(channel, nick + '!')
73 del bot.rdb[oldtime]
74 dump_database(bot.rfn, bot.rdb)
75 time.sleep(2.5)
76
77 targs = (bot,)
78 t = threading.Thread(target=monitor, args=targs)
79 t.start()
80
81 scaling = collections.OrderedDict([
82 ('years', 365.25 * 24 * 3600),
83 ('year', 365.25 * 24 * 3600),
84 ('yrs', 365.25 * 24 * 3600),
85 ('y', 365.25 * 24 * 3600),
86
87 ('months', 29.53059 * 24 * 3600),
88 ('month', 29.53059 * 24 * 3600),
89 ('mo', 29.53059 * 24 * 3600),
90
91 ('weeks', 7 * 24 * 3600),
92 ('week', 7 * 24 * 3600),
93 ('wks', 7 * 24 * 3600),
94 ('wk', 7 * 24 * 3600),
95 ('w', 7 * 24 * 3600),
96
97 ('days', 24 * 3600),
98 ('day', 24 * 3600),
99 ('d', 24 * 3600),
100
101 ('hours', 3600),
102 ('hour', 3600),
103 ('hrs', 3600),
104 ('hr', 3600),
105 ('h', 3600),
106
107 ('minutes', 60),
108 ('minute', 60),
109 ('mins', 60),
110 ('min', 60),
111 ('m', 60),
112
113 ('seconds', 1),
114 ('second', 1),
115 ('secs', 1),
116 ('sec', 1),
117 ('s', 1),
118 ])
119
120 periods = '|'.join(scaling.keys())
121
122
123 @commands('in')
124 @example('.in 3h45m Go to class')
125 def remind(bot, trigger):
126 """Gives you a reminder in the given amount of time."""
127 duration = 0
128 message = filter(None, re.split('(\d+(?:\.\d+)? ?(?:' + periods + ')) ?',
129 trigger.group(2))[1:])
130 reminder = ''
131 stop = False
132 for piece in message:
133 grp = re.match('(\d+(?:\.\d+)?) ?(.*) ?', piece)
134 if grp and not stop:
135 length = float(grp.group(1))
136 factor = scaling.get(grp.group(2), 60)
137 duration += length * factor
138 else:
139 reminder = reminder + piece
140 stop = True
141 if duration == 0:
142 return bot.reply("Sorry, didn't understand the input.")
143
144 if duration % 1:
145 duration = int(duration) + 1
146 else:
147 duration = int(duration)
148 timezone = willie.tools.get_timezone(
149 bot.db, bot.config, None, trigger.nick, trigger.sender)
150 create_reminder(bot, trigger, duration, reminder, timezone)
151
152
153 @commands('at')
154 @example('.at 13:47 Do your homework!')
155 def at(bot, trigger):
156 """
157 Gives you a reminder at the given time. Takes hh:mm:ssTimezone
158 message. Timezone is any timezone Willie takes elsewhere; the best choices
159 are those from the tzdb; a list of valid options is available at
160 http://dft.ba/-tz . The seconds and timezone are optional.
161 """
162 regex = re.compile(r'(\d+):(\d+)(?::(\d+))?([^\s\d]+)? (.*)')
163 match = regex.match(trigger.group(2))
164 if not match:
165 bot.reply("Sorry, but I didn't understand your input.")
166 return NOLIMIT
167 hour, minute, second, tz, message = match.groups()
168 if not second:
169 second = '0'
170
171 if pytz:
172 timezone = willie.tools.get_timezone(bot.db, bot.config, tz,
173 trigger.nick, trigger.sender)
174 if not timezone:
175 timezone = 'UTC'
176 now = datetime.now(pytz.timezone(timezone))
177 at_time = datetime(now.year, now.month, now.day,
178 int(hour), int(minute), int(second),
179 tzinfo=now.tzinfo)
180 timediff = at_time - now
181 else:
182 if tz and tz.upper() != 'UTC':
183 bot.reply("I don't have timzeone support installed.")
184 return NOLIMIT
185 now = datetime.now()
186 at_time = datetime(now.year, now.month, now.day,
187 int(hour), int(minute), int(second))
188 timediff = at_time - now
189
190 duration = timediff.seconds
191
192 if duration < 0:
193 duration += 86400
194 create_reminder(bot, trigger, duration, message, 'UTC')
195
196
197 def create_reminder(bot, trigger, duration, message, tz):
198 t = int(time.time()) + duration
199 reminder = (trigger.sender, trigger.nick, message)
200 try:
201 bot.rdb[t].append(reminder)
202 except KeyError:
203 bot.rdb[t] = [reminder]
204
205 dump_database(bot.rfn, bot.rdb)
206
207 if duration >= 60:
208 remind_at = datetime.utcfromtimestamp(t)
209 timef = willie.tools.format_time(bot.db, bot.config, tz, trigger.nick,
210 trigger.sender, remind_at)
211
212 bot.reply('Okay, will remind at %s' % timef)
213 else:
214 bot.reply('Okay, will remind in %s secs' % duration)
215
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/willie/modules/remind.py b/willie/modules/remind.py
--- a/willie/modules/remind.py
+++ b/willie/modules/remind.py
@@ -125,7 +125,7 @@
def remind(bot, trigger):
"""Gives you a reminder in the given amount of time."""
duration = 0
- message = filter(None, re.split('(\d+(?:\.\d+)? ?(?:' + periods + ')) ?',
+ message = filter(None, re.split('(\d+(?:\.\d+)? ?(?:(?i)' + periods + ')) ?',
trigger.group(2))[1:])
reminder = ''
stop = False
@@ -133,7 +133,7 @@
grp = re.match('(\d+(?:\.\d+)?) ?(.*) ?', piece)
if grp and not stop:
length = float(grp.group(1))
- factor = scaling.get(grp.group(2), 60)
+ factor = scaling.get(grp.group(2).lower(), 60)
duration += length * factor
else:
reminder = reminder + piece
|
{"golden_diff": "diff --git a/willie/modules/remind.py b/willie/modules/remind.py\n--- a/willie/modules/remind.py\n+++ b/willie/modules/remind.py\n@@ -125,7 +125,7 @@\n def remind(bot, trigger):\n \"\"\"Gives you a reminder in the given amount of time.\"\"\"\n duration = 0\n- message = filter(None, re.split('(\\d+(?:\\.\\d+)? ?(?:' + periods + ')) ?',\n+ message = filter(None, re.split('(\\d+(?:\\.\\d+)? ?(?:(?i)' + periods + ')) ?',\n trigger.group(2))[1:])\n reminder = ''\n stop = False\n@@ -133,7 +133,7 @@\n grp = re.match('(\\d+(?:\\.\\d+)?) ?(.*) ?', piece)\n if grp and not stop:\n length = float(grp.group(1))\n- factor = scaling.get(grp.group(2), 60)\n+ factor = scaling.get(grp.group(2).lower(), 60)\n duration += length * factor\n else:\n reminder = reminder + piece\n", "issue": "Remind is case sensitive\n[22:20] <Eyore> .in 3SEC stuff\n[22:20] @Willie Eyore: Sorry, didn't understand the input.\n[22:20] <Eyore> .in 3 SEC stuff\n[22:20] @Willie Eyore: Sorry, didn't understand the input.\n[22:20] <Eyore> .in 3 S stuff\n[22:20] @Willie Eyore: Sorry, didn't understand the input.\n[22:20] <Eyore> .in 3 MIN stuff\n[22:20] @Willie Eyore: Sorry, didn't understand the input.\n[22:20] <Eyore> .in 3 Min stuff\n[22:20] @Willie Eyore: Sorry, didn't understand the input.\n\nI'd fix it myself, but looking at the code, I'm not sure where to put the .lower()\n\n", "before_files": [{"content": "# coding=utf8\n\"\"\"\nremind.py - Willie Reminder Module\nCopyright 2011, Sean B. Palmer, inamidst.com\nLicensed under the Eiffel Forum License 2.\n\nhttp://willie.dftba.net\n\"\"\"\nfrom __future__ import unicode_literals\n\nimport os\nimport re\nimport time\nimport threading\nimport collections\nimport codecs\nfrom datetime import datetime\nfrom willie.module import commands, example, NOLIMIT\nimport willie.tools\n\ntry:\n import pytz\nexcept:\n pytz = None\n\ndef filename(self):\n name = self.nick + '-' + self.config.host + '.reminders.db'\n return os.path.join(self.config.dotdir, name)\n\n\ndef load_database(name):\n data = {}\n if os.path.isfile(name):\n f = codecs.open(name, 'r', encoding='utf-8')\n for line in f:\n unixtime, channel, nick, message = line.split('\\t')\n message = message.rstrip('\\n')\n t = int(float(unixtime)) # WTFs going on here?\n reminder = (channel, nick, message)\n try:\n data[t].append(reminder)\n except KeyError:\n data[t] = [reminder]\n f.close()\n return data\n\n\ndef dump_database(name, data):\n f = codecs.open(name, 'w', encoding='utf-8')\n for unixtime, reminders in willie.tools.iteritems(data):\n for channel, nick, message in reminders:\n f.write('%s\\t%s\\t%s\\t%s\\n' % (unixtime, channel, nick, message))\n f.close()\n\n\ndef setup(bot):\n bot.rfn = filename(bot)\n bot.rdb = load_database(bot.rfn)\n\n def monitor(bot):\n time.sleep(5)\n while True:\n now = int(time.time())\n unixtimes = [int(key) for key in bot.rdb]\n oldtimes = [t for t in unixtimes if t <= now]\n if oldtimes:\n for oldtime in oldtimes:\n for (channel, nick, message) in bot.rdb[oldtime]:\n if message:\n bot.msg(channel, nick + ': ' + message)\n else:\n bot.msg(channel, nick + '!')\n del bot.rdb[oldtime]\n dump_database(bot.rfn, bot.rdb)\n time.sleep(2.5)\n\n targs = (bot,)\n t = threading.Thread(target=monitor, args=targs)\n t.start()\n\nscaling = collections.OrderedDict([\n ('years', 365.25 * 24 * 3600),\n ('year', 365.25 * 24 * 3600),\n ('yrs', 365.25 * 24 * 3600),\n ('y', 365.25 * 24 * 3600),\n\n ('months', 29.53059 * 24 * 3600),\n ('month', 29.53059 * 24 * 3600),\n ('mo', 29.53059 * 24 * 3600),\n\n ('weeks', 7 * 24 * 3600),\n ('week', 7 * 24 * 3600),\n ('wks', 7 * 24 * 3600),\n ('wk', 7 * 24 * 3600),\n ('w', 7 * 24 * 3600),\n\n ('days', 24 * 3600),\n ('day', 24 * 3600),\n ('d', 24 * 3600),\n\n ('hours', 3600),\n ('hour', 3600),\n ('hrs', 3600),\n ('hr', 3600),\n ('h', 3600),\n\n ('minutes', 60),\n ('minute', 60),\n ('mins', 60),\n ('min', 60),\n ('m', 60),\n\n ('seconds', 1),\n ('second', 1),\n ('secs', 1),\n ('sec', 1),\n ('s', 1),\n])\n\nperiods = '|'.join(scaling.keys())\n\n\n@commands('in')\n@example('.in 3h45m Go to class')\ndef remind(bot, trigger):\n \"\"\"Gives you a reminder in the given amount of time.\"\"\"\n duration = 0\n message = filter(None, re.split('(\\d+(?:\\.\\d+)? ?(?:' + periods + ')) ?',\n trigger.group(2))[1:])\n reminder = ''\n stop = False\n for piece in message:\n grp = re.match('(\\d+(?:\\.\\d+)?) ?(.*) ?', piece)\n if grp and not stop:\n length = float(grp.group(1))\n factor = scaling.get(grp.group(2), 60)\n duration += length * factor\n else:\n reminder = reminder + piece\n stop = True\n if duration == 0:\n return bot.reply(\"Sorry, didn't understand the input.\")\n\n if duration % 1:\n duration = int(duration) + 1\n else:\n duration = int(duration)\n timezone = willie.tools.get_timezone(\n bot.db, bot.config, None, trigger.nick, trigger.sender)\n create_reminder(bot, trigger, duration, reminder, timezone)\n\n\n@commands('at')\n@example('.at 13:47 Do your homework!')\ndef at(bot, trigger):\n \"\"\"\n Gives you a reminder at the given time. Takes hh:mm:ssTimezone\n message. Timezone is any timezone Willie takes elsewhere; the best choices\n are those from the tzdb; a list of valid options is available at\n http://dft.ba/-tz . The seconds and timezone are optional.\n \"\"\"\n regex = re.compile(r'(\\d+):(\\d+)(?::(\\d+))?([^\\s\\d]+)? (.*)')\n match = regex.match(trigger.group(2))\n if not match:\n bot.reply(\"Sorry, but I didn't understand your input.\")\n return NOLIMIT\n hour, minute, second, tz, message = match.groups()\n if not second:\n second = '0'\n\n if pytz:\n timezone = willie.tools.get_timezone(bot.db, bot.config, tz,\n trigger.nick, trigger.sender)\n if not timezone:\n timezone = 'UTC'\n now = datetime.now(pytz.timezone(timezone))\n at_time = datetime(now.year, now.month, now.day,\n int(hour), int(minute), int(second),\n tzinfo=now.tzinfo)\n timediff = at_time - now\n else:\n if tz and tz.upper() != 'UTC':\n bot.reply(\"I don't have timzeone support installed.\")\n return NOLIMIT\n now = datetime.now()\n at_time = datetime(now.year, now.month, now.day,\n int(hour), int(minute), int(second))\n timediff = at_time - now\n\n duration = timediff.seconds\n\n if duration < 0:\n duration += 86400\n create_reminder(bot, trigger, duration, message, 'UTC')\n\n\ndef create_reminder(bot, trigger, duration, message, tz):\n t = int(time.time()) + duration\n reminder = (trigger.sender, trigger.nick, message)\n try:\n bot.rdb[t].append(reminder)\n except KeyError:\n bot.rdb[t] = [reminder]\n\n dump_database(bot.rfn, bot.rdb)\n\n if duration >= 60:\n remind_at = datetime.utcfromtimestamp(t)\n timef = willie.tools.format_time(bot.db, bot.config, tz, trigger.nick,\n trigger.sender, remind_at)\n\n bot.reply('Okay, will remind at %s' % timef)\n else:\n bot.reply('Okay, will remind in %s secs' % duration)\n", "path": "willie/modules/remind.py"}], "after_files": [{"content": "# coding=utf8\n\"\"\"\nremind.py - Willie Reminder Module\nCopyright 2011, Sean B. Palmer, inamidst.com\nLicensed under the Eiffel Forum License 2.\n\nhttp://willie.dftba.net\n\"\"\"\nfrom __future__ import unicode_literals\n\nimport os\nimport re\nimport time\nimport threading\nimport collections\nimport codecs\nfrom datetime import datetime\nfrom willie.module import commands, example, NOLIMIT\nimport willie.tools\n\ntry:\n import pytz\nexcept:\n pytz = None\n\ndef filename(self):\n name = self.nick + '-' + self.config.host + '.reminders.db'\n return os.path.join(self.config.dotdir, name)\n\n\ndef load_database(name):\n data = {}\n if os.path.isfile(name):\n f = codecs.open(name, 'r', encoding='utf-8')\n for line in f:\n unixtime, channel, nick, message = line.split('\\t')\n message = message.rstrip('\\n')\n t = int(float(unixtime)) # WTFs going on here?\n reminder = (channel, nick, message)\n try:\n data[t].append(reminder)\n except KeyError:\n data[t] = [reminder]\n f.close()\n return data\n\n\ndef dump_database(name, data):\n f = codecs.open(name, 'w', encoding='utf-8')\n for unixtime, reminders in willie.tools.iteritems(data):\n for channel, nick, message in reminders:\n f.write('%s\\t%s\\t%s\\t%s\\n' % (unixtime, channel, nick, message))\n f.close()\n\n\ndef setup(bot):\n bot.rfn = filename(bot)\n bot.rdb = load_database(bot.rfn)\n\n def monitor(bot):\n time.sleep(5)\n while True:\n now = int(time.time())\n unixtimes = [int(key) for key in bot.rdb]\n oldtimes = [t for t in unixtimes if t <= now]\n if oldtimes:\n for oldtime in oldtimes:\n for (channel, nick, message) in bot.rdb[oldtime]:\n if message:\n bot.msg(channel, nick + ': ' + message)\n else:\n bot.msg(channel, nick + '!')\n del bot.rdb[oldtime]\n dump_database(bot.rfn, bot.rdb)\n time.sleep(2.5)\n\n targs = (bot,)\n t = threading.Thread(target=monitor, args=targs)\n t.start()\n\nscaling = collections.OrderedDict([\n ('years', 365.25 * 24 * 3600),\n ('year', 365.25 * 24 * 3600),\n ('yrs', 365.25 * 24 * 3600),\n ('y', 365.25 * 24 * 3600),\n\n ('months', 29.53059 * 24 * 3600),\n ('month', 29.53059 * 24 * 3600),\n ('mo', 29.53059 * 24 * 3600),\n\n ('weeks', 7 * 24 * 3600),\n ('week', 7 * 24 * 3600),\n ('wks', 7 * 24 * 3600),\n ('wk', 7 * 24 * 3600),\n ('w', 7 * 24 * 3600),\n\n ('days', 24 * 3600),\n ('day', 24 * 3600),\n ('d', 24 * 3600),\n\n ('hours', 3600),\n ('hour', 3600),\n ('hrs', 3600),\n ('hr', 3600),\n ('h', 3600),\n\n ('minutes', 60),\n ('minute', 60),\n ('mins', 60),\n ('min', 60),\n ('m', 60),\n\n ('seconds', 1),\n ('second', 1),\n ('secs', 1),\n ('sec', 1),\n ('s', 1),\n])\n\nperiods = '|'.join(scaling.keys())\n\n\n@commands('in')\n@example('.in 3h45m Go to class')\ndef remind(bot, trigger):\n \"\"\"Gives you a reminder in the given amount of time.\"\"\"\n duration = 0\n message = filter(None, re.split('(\\d+(?:\\.\\d+)? ?(?:(?i)' + periods + ')) ?',\n trigger.group(2))[1:])\n reminder = ''\n stop = False\n for piece in message:\n grp = re.match('(\\d+(?:\\.\\d+)?) ?(.*) ?', piece)\n if grp and not stop:\n length = float(grp.group(1))\n factor = scaling.get(grp.group(2).lower(), 60)\n duration += length * factor\n else:\n reminder = reminder + piece\n stop = True\n if duration == 0:\n return bot.reply(\"Sorry, didn't understand the input.\")\n\n if duration % 1:\n duration = int(duration) + 1\n else:\n duration = int(duration)\n timezone = willie.tools.get_timezone(\n bot.db, bot.config, None, trigger.nick, trigger.sender)\n create_reminder(bot, trigger, duration, reminder, timezone)\n\n\n@commands('at')\n@example('.at 13:47 Do your homework!')\ndef at(bot, trigger):\n \"\"\"\n Gives you a reminder at the given time. Takes hh:mm:ssTimezone\n message. Timezone is any timezone Willie takes elsewhere; the best choices\n are those from the tzdb; a list of valid options is available at\n http://dft.ba/-tz . The seconds and timezone are optional.\n \"\"\"\n regex = re.compile(r'(\\d+):(\\d+)(?::(\\d+))?([^\\s\\d]+)? (.*)')\n match = regex.match(trigger.group(2))\n if not match:\n bot.reply(\"Sorry, but I didn't understand your input.\")\n return NOLIMIT\n hour, minute, second, tz, message = match.groups()\n if not second:\n second = '0'\n\n if pytz:\n timezone = willie.tools.get_timezone(bot.db, bot.config, tz,\n trigger.nick, trigger.sender)\n if not timezone:\n timezone = 'UTC'\n now = datetime.now(pytz.timezone(timezone))\n at_time = datetime(now.year, now.month, now.day,\n int(hour), int(minute), int(second),\n tzinfo=now.tzinfo)\n timediff = at_time - now\n else:\n if tz and tz.upper() != 'UTC':\n bot.reply(\"I don't have timzeone support installed.\")\n return NOLIMIT\n now = datetime.now()\n at_time = datetime(now.year, now.month, now.day,\n int(hour), int(minute), int(second))\n timediff = at_time - now\n\n duration = timediff.seconds\n\n if duration < 0:\n duration += 86400\n create_reminder(bot, trigger, duration, message, 'UTC')\n\n\ndef create_reminder(bot, trigger, duration, message, tz):\n t = int(time.time()) + duration\n reminder = (trigger.sender, trigger.nick, message)\n try:\n bot.rdb[t].append(reminder)\n except KeyError:\n bot.rdb[t] = [reminder]\n\n dump_database(bot.rfn, bot.rdb)\n\n if duration >= 60:\n remind_at = datetime.utcfromtimestamp(t)\n timef = willie.tools.format_time(bot.db, bot.config, tz, trigger.nick,\n trigger.sender, remind_at)\n\n bot.reply('Okay, will remind at %s' % timef)\n else:\n bot.reply('Okay, will remind in %s secs' % duration)\n", "path": "willie/modules/remind.py"}]}
| 2,814 | 261 |
gh_patches_debug_42849
|
rasdani/github-patches
|
git_diff
|
ray-project__ray-9386
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[rllib] incorrect model output for DQN with torch and dueling=false
<!--Please include [tune], [rllib], [autoscaler] etc. in the issue title if relevant-->
### What is the problem?
The output fo the DQN model is not within the action space.
Something is wrong when constructing the torch model when dueling is off. The output dimension of the model is equal to whatever is passed in "fcnet_hiddens" instead of being of the size of the action space.
*Ray version and other system information (Python version, TensorFlow version, OS):*
- ray==0.9.0.dev0
- python 3.6.10
- mac OS
### Reproduction (REQUIRED)
```python
import ray
from ray import tune
ray.init()
config = {
"env": "CartPole-v1",
"num_workers": 1,
"train_batch_size": 128,
"learning_starts": 128,
"model": {"fcnet_hiddens": [32]},
"dueling": False ,
"framework": "torch"
}
tune.run("DQN", name="MWE", config=config, stop={"training_iteration": 100})
```
- [x] I have verified my script runs in a clean environment and reproduces the issue.
- [x] I have verified the issue also occurs with the [latest wheels](https://docs.ray.io/en/latest/installation.html).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rllib/agents/dqn/dqn_torch_model.py`
Content:
```
1 import numpy as np
2
3 from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
4 from ray.rllib.utils.framework import try_import_torch
5
6 torch, nn = try_import_torch()
7
8
9 class DQNTorchModel(TorchModelV2, nn.Module):
10 """Extension of standard TorchModelV2 to provide dueling-Q functionality.
11 """
12
13 def __init__(
14 self,
15 obs_space,
16 action_space,
17 num_outputs,
18 model_config,
19 name,
20 *,
21 dueling=False,
22 q_hiddens=(256, ),
23 dueling_activation="relu",
24 use_noisy=False,
25 sigma0=0.5,
26 # TODO(sven): Move `add_layer_norm` into ModelCatalog as
27 # generic option, then error if we use ParameterNoise as
28 # Exploration type and do not have any LayerNorm layers in
29 # the net.
30 add_layer_norm=False):
31 """Initialize variables of this model.
32
33 Extra model kwargs:
34 dueling (bool): Whether to build the advantage(A)/value(V) heads
35 for DDQN. If True, Q-values are calculated as:
36 Q = (A - mean[A]) + V. If False, raw NN output is interpreted
37 as Q-values.
38 q_hiddens (List[int]): List of layer-sizes after(!) the
39 Advantages(A)/Value(V)-split. Hence, each of the A- and V-
40 branches will have this structure of Dense layers. To define
41 the NN before this A/V-split, use - as always -
42 config["model"]["fcnet_hiddens"].
43 dueling_activation (str): The activation to use for all dueling
44 layers (A- and V-branch). One of "relu", "tanh", "linear".
45 use_noisy (bool): use noisy nets
46 sigma0 (float): initial value of noisy nets
47 add_layer_norm (bool): Enable layer norm (for param noise).
48 """
49 nn.Module.__init__(self)
50 super(DQNTorchModel, self).__init__(obs_space, action_space,
51 num_outputs, model_config, name)
52
53 self.dueling = dueling
54 ins = num_outputs
55
56 # Dueling case: Build the shared (advantages and value) fc-network.
57 advantage_module = nn.Sequential()
58 value_module = None
59 if self.dueling:
60 value_module = nn.Sequential()
61 for i, n in enumerate(q_hiddens):
62 advantage_module.add_module("dueling_A_{}".format(i),
63 nn.Linear(ins, n))
64 value_module.add_module("dueling_V_{}".format(i),
65 nn.Linear(ins, n))
66 # Add activations if necessary.
67 if dueling_activation == "relu":
68 advantage_module.add_module("dueling_A_act_{}".format(i),
69 nn.ReLU())
70 value_module.add_module("dueling_V_act_{}".format(i),
71 nn.ReLU())
72 elif dueling_activation == "tanh":
73 advantage_module.add_module("dueling_A_act_{}".format(i),
74 nn.Tanh())
75 value_module.add_module("dueling_V_act_{}".format(i),
76 nn.Tanh())
77
78 # Add LayerNorm after each Dense.
79 if add_layer_norm:
80 advantage_module.add_module("LayerNorm_A_{}".format(i),
81 nn.LayerNorm(n))
82 value_module.add_module("LayerNorm_V_{}".format(i),
83 nn.LayerNorm(n))
84 ins = n
85 # Actual Advantages layer (nodes=num-actions) and
86 # value layer (nodes=1).
87 advantage_module.add_module("A", nn.Linear(ins, action_space.n))
88 value_module.add_module("V", nn.Linear(ins, 1))
89 # Non-dueling:
90 # Q-value layer (use main module's outputs as Q-values).
91 else:
92 pass
93
94 self.advantage_module = advantage_module
95 self.value_module = value_module
96
97 def get_advantages_or_q_values(self, model_out):
98 """Returns distributional values for Q(s, a) given a state embedding.
99
100 Override this in your custom model to customize the Q output head.
101
102 Arguments:
103 model_out (Tensor): embedding from the model layers
104
105 Returns:
106 (action_scores, logits, dist) if num_atoms == 1, otherwise
107 (action_scores, z, support_logits_per_action, logits, dist)
108 """
109
110 return self.advantage_module(model_out)
111
112 def get_state_value(self, model_out):
113 """Returns the state value prediction for the given state embedding."""
114
115 return self.value_module(model_out)
116
117 def _noisy_layer(self, action_in, out_size, sigma0, non_linear=True):
118 """
119 a common dense layer: y = w^{T}x + b
120 a noisy layer: y = (w + \\epsilon_w*\\sigma_w)^{T}x +
121 (b+\\epsilon_b*\\sigma_b)
122 where \epsilon are random variables sampled from factorized normal
123 distributions and \\sigma are trainable variables which are expected to
124 vanish along the training procedure
125 """
126 in_size = int(action_in.shape[1])
127
128 epsilon_in = torch.normal(
129 mean=torch.zeros([in_size]), std=torch.ones([in_size]))
130 epsilon_out = torch.normal(
131 mean=torch.zeros([out_size]), std=torch.ones([out_size]))
132 epsilon_in = self._f_epsilon(epsilon_in)
133 epsilon_out = self._f_epsilon(epsilon_out)
134 epsilon_w = torch.matmul(
135 torch.unsqueeze(epsilon_in, -1),
136 other=torch.unsqueeze(epsilon_out, 0))
137 epsilon_b = epsilon_out
138
139 sigma_w = torch.Tensor(
140 data=np.random.uniform(
141 low=-1.0 / np.sqrt(float(in_size)),
142 high=1.0 / np.sqrt(float(in_size)),
143 size=[in_size, out_size]),
144 dtype=torch.float32,
145 requires_grad=True)
146 # TF noise generation can be unreliable on GPU
147 # If generating the noise on the CPU,
148 # lowering sigma0 to 0.1 may be helpful
149 sigma_b = torch.Tensor(
150 data=np.full(
151 shape=[out_size], fill_value=sigma0 / np.sqrt(float(in_size))),
152 requires_grad=True)
153 w = torch.Tensor(
154 data=np.full(
155 shape=[in_size, out_size],
156 fill_value=6 / np.sqrt(float(in_size) + float(out_size))),
157 requires_grad=True)
158 b = torch.Tensor(data=np.zeros([out_size]), requires_grad=True)
159 action_activation = torch.matmul(action_in, w + sigma_w * epsilon_w) \
160 + b + sigma_b * epsilon_b
161
162 if not non_linear:
163 return action_activation
164 return nn.functional.relu(action_activation)
165
166 def _f_epsilon(self, x):
167 return torch.sign(x) * torch.pow(torch.abs(x), 0.5)
168
```
Path: `rllib/examples/parametric_actions_cartpole.py`
Content:
```
1 """Example of handling variable length and/or parametric action spaces.
2
3 This is a toy example of the action-embedding based approach for handling large
4 discrete action spaces (potentially infinite in size), similar to this:
5
6 https://neuro.cs.ut.ee/the-use-of-embeddings-in-openai-five/
7
8 This currently works with RLlib's policy gradient style algorithms
9 (e.g., PG, PPO, IMPALA, A2C) and also DQN.
10
11 Note that since the model outputs now include "-inf" tf.float32.min
12 values, not all algorithm options are supported at the moment. For example,
13 algorithms might crash if they don't properly ignore the -inf action scores.
14 Working configurations are given below.
15 """
16
17 import argparse
18
19 import ray
20 from ray import tune
21 from ray.rllib.examples.env.parametric_actions_cartpole import \
22 ParametricActionsCartPole
23 from ray.rllib.examples.models.parametric_actions_model import \
24 ParametricActionsModel, TorchParametricActionsModel
25 from ray.rllib.models import ModelCatalog
26 from ray.rllib.utils.test_utils import check_learning_achieved
27 from ray.tune.registry import register_env
28
29 parser = argparse.ArgumentParser()
30 parser.add_argument("--run", type=str, default="PPO")
31 parser.add_argument("--torch", action="store_true")
32 parser.add_argument("--as-test", action="store_true")
33 parser.add_argument("--stop-iters", type=int, default=200)
34 parser.add_argument("--stop-reward", type=float, default=150.0)
35 parser.add_argument("--stop-timesteps", type=int, default=100000)
36
37 if __name__ == "__main__":
38 args = parser.parse_args()
39 ray.init()
40
41 register_env("pa_cartpole", lambda _: ParametricActionsCartPole(10))
42 ModelCatalog.register_custom_model(
43 "pa_model", TorchParametricActionsModel
44 if args.torch else ParametricActionsModel)
45
46 if args.run == "DQN":
47 cfg = {
48 # TODO(ekl) we need to set these to prevent the masked values
49 # from being further processed in DistributionalQModel, which
50 # would mess up the masking. It is possible to support these if we
51 # defined a a custom DistributionalQModel that is aware of masking.
52 "hiddens": [],
53 "dueling": False,
54 }
55 else:
56 cfg = {}
57
58 config = dict({
59 "env": "pa_cartpole",
60 "model": {
61 "custom_model": "pa_model",
62 },
63 "num_workers": 0,
64 "framework": "torch" if args.torch else "tf",
65 }, **cfg)
66
67 stop = {
68 "training_iteration": args.stop_iters,
69 "timesteps_total": args.stop_timesteps,
70 "episode_reward_mean": args.stop_reward,
71 }
72
73 results = tune.run(args.run, stop=stop, config=config, verbose=1)
74
75 if args.as_test:
76 check_learning_achieved(results, args.stop_reward)
77
78 ray.shutdown()
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/rllib/agents/dqn/dqn_torch_model.py b/rllib/agents/dqn/dqn_torch_model.py
--- a/rllib/agents/dqn/dqn_torch_model.py
+++ b/rllib/agents/dqn/dqn_torch_model.py
@@ -53,46 +53,45 @@
self.dueling = dueling
ins = num_outputs
- # Dueling case: Build the shared (advantages and value) fc-network.
advantage_module = nn.Sequential()
- value_module = None
- if self.dueling:
- value_module = nn.Sequential()
- for i, n in enumerate(q_hiddens):
- advantage_module.add_module("dueling_A_{}".format(i),
- nn.Linear(ins, n))
- value_module.add_module("dueling_V_{}".format(i),
+ value_module = nn.Sequential()
+
+ # Dueling case: Build the shared (advantages and value) fc-network.
+ for i, n in enumerate(q_hiddens):
+ advantage_module.add_module("dueling_A_{}".format(i),
nn.Linear(ins, n))
- # Add activations if necessary.
- if dueling_activation == "relu":
- advantage_module.add_module("dueling_A_act_{}".format(i),
- nn.ReLU())
- value_module.add_module("dueling_V_act_{}".format(i),
+ value_module.add_module("dueling_V_{}".format(i),
+ nn.Linear(ins, n))
+ # Add activations if necessary.
+ if dueling_activation == "relu":
+ advantage_module.add_module("dueling_A_act_{}".format(i),
nn.ReLU())
- elif dueling_activation == "tanh":
- advantage_module.add_module("dueling_A_act_{}".format(i),
- nn.Tanh())
- value_module.add_module("dueling_V_act_{}".format(i),
+ value_module.add_module("dueling_V_act_{}".format(i),
+ nn.ReLU())
+ elif dueling_activation == "tanh":
+ advantage_module.add_module("dueling_A_act_{}".format(i),
nn.Tanh())
+ value_module.add_module("dueling_V_act_{}".format(i),
+ nn.Tanh())
- # Add LayerNorm after each Dense.
- if add_layer_norm:
- advantage_module.add_module("LayerNorm_A_{}".format(i),
- nn.LayerNorm(n))
- value_module.add_module("LayerNorm_V_{}".format(i),
+ # Add LayerNorm after each Dense.
+ if add_layer_norm:
+ advantage_module.add_module("LayerNorm_A_{}".format(i),
nn.LayerNorm(n))
- ins = n
- # Actual Advantages layer (nodes=num-actions) and
- # value layer (nodes=1).
+ value_module.add_module("LayerNorm_V_{}".format(i),
+ nn.LayerNorm(n))
+ ins = n
+
+ # Actual Advantages layer (nodes=num-actions).
+ if q_hiddens:
advantage_module.add_module("A", nn.Linear(ins, action_space.n))
- value_module.add_module("V", nn.Linear(ins, 1))
- # Non-dueling:
- # Q-value layer (use main module's outputs as Q-values).
- else:
- pass
self.advantage_module = advantage_module
- self.value_module = value_module
+
+ # Value layer (nodes=1).
+ if self.dueling:
+ value_module.add_module("V", nn.Linear(ins, 1))
+ self.value_module = value_module
def get_advantages_or_q_values(self, model_out):
"""Returns distributional values for Q(s, a) given a state embedding.
diff --git a/rllib/examples/parametric_actions_cartpole.py b/rllib/examples/parametric_actions_cartpole.py
--- a/rllib/examples/parametric_actions_cartpole.py
+++ b/rllib/examples/parametric_actions_cartpole.py
@@ -48,7 +48,7 @@
# TODO(ekl) we need to set these to prevent the masked values
# from being further processed in DistributionalQModel, which
# would mess up the masking. It is possible to support these if we
- # defined a a custom DistributionalQModel that is aware of masking.
+ # defined a custom DistributionalQModel that is aware of masking.
"hiddens": [],
"dueling": False,
}
|
{"golden_diff": "diff --git a/rllib/agents/dqn/dqn_torch_model.py b/rllib/agents/dqn/dqn_torch_model.py\n--- a/rllib/agents/dqn/dqn_torch_model.py\n+++ b/rllib/agents/dqn/dqn_torch_model.py\n@@ -53,46 +53,45 @@\n self.dueling = dueling\n ins = num_outputs\n \n- # Dueling case: Build the shared (advantages and value) fc-network.\n advantage_module = nn.Sequential()\n- value_module = None\n- if self.dueling:\n- value_module = nn.Sequential()\n- for i, n in enumerate(q_hiddens):\n- advantage_module.add_module(\"dueling_A_{}\".format(i),\n- nn.Linear(ins, n))\n- value_module.add_module(\"dueling_V_{}\".format(i),\n+ value_module = nn.Sequential()\n+\n+ # Dueling case: Build the shared (advantages and value) fc-network.\n+ for i, n in enumerate(q_hiddens):\n+ advantage_module.add_module(\"dueling_A_{}\".format(i),\n nn.Linear(ins, n))\n- # Add activations if necessary.\n- if dueling_activation == \"relu\":\n- advantage_module.add_module(\"dueling_A_act_{}\".format(i),\n- nn.ReLU())\n- value_module.add_module(\"dueling_V_act_{}\".format(i),\n+ value_module.add_module(\"dueling_V_{}\".format(i),\n+ nn.Linear(ins, n))\n+ # Add activations if necessary.\n+ if dueling_activation == \"relu\":\n+ advantage_module.add_module(\"dueling_A_act_{}\".format(i),\n nn.ReLU())\n- elif dueling_activation == \"tanh\":\n- advantage_module.add_module(\"dueling_A_act_{}\".format(i),\n- nn.Tanh())\n- value_module.add_module(\"dueling_V_act_{}\".format(i),\n+ value_module.add_module(\"dueling_V_act_{}\".format(i),\n+ nn.ReLU())\n+ elif dueling_activation == \"tanh\":\n+ advantage_module.add_module(\"dueling_A_act_{}\".format(i),\n nn.Tanh())\n+ value_module.add_module(\"dueling_V_act_{}\".format(i),\n+ nn.Tanh())\n \n- # Add LayerNorm after each Dense.\n- if add_layer_norm:\n- advantage_module.add_module(\"LayerNorm_A_{}\".format(i),\n- nn.LayerNorm(n))\n- value_module.add_module(\"LayerNorm_V_{}\".format(i),\n+ # Add LayerNorm after each Dense.\n+ if add_layer_norm:\n+ advantage_module.add_module(\"LayerNorm_A_{}\".format(i),\n nn.LayerNorm(n))\n- ins = n\n- # Actual Advantages layer (nodes=num-actions) and\n- # value layer (nodes=1).\n+ value_module.add_module(\"LayerNorm_V_{}\".format(i),\n+ nn.LayerNorm(n))\n+ ins = n\n+\n+ # Actual Advantages layer (nodes=num-actions).\n+ if q_hiddens:\n advantage_module.add_module(\"A\", nn.Linear(ins, action_space.n))\n- value_module.add_module(\"V\", nn.Linear(ins, 1))\n- # Non-dueling:\n- # Q-value layer (use main module's outputs as Q-values).\n- else:\n- pass\n \n self.advantage_module = advantage_module\n- self.value_module = value_module\n+\n+ # Value layer (nodes=1).\n+ if self.dueling:\n+ value_module.add_module(\"V\", nn.Linear(ins, 1))\n+ self.value_module = value_module\n \n def get_advantages_or_q_values(self, model_out):\n \"\"\"Returns distributional values for Q(s, a) given a state embedding.\ndiff --git a/rllib/examples/parametric_actions_cartpole.py b/rllib/examples/parametric_actions_cartpole.py\n--- a/rllib/examples/parametric_actions_cartpole.py\n+++ b/rllib/examples/parametric_actions_cartpole.py\n@@ -48,7 +48,7 @@\n # TODO(ekl) we need to set these to prevent the masked values\n # from being further processed in DistributionalQModel, which\n # would mess up the masking. It is possible to support these if we\n- # defined a a custom DistributionalQModel that is aware of masking.\n+ # defined a custom DistributionalQModel that is aware of masking.\n \"hiddens\": [],\n \"dueling\": False,\n }\n", "issue": "[rllib] incorrect model output for DQN with torch and dueling=false \n<!--Please include [tune], [rllib], [autoscaler] etc. in the issue title if relevant-->\r\n\r\n### What is the problem?\r\n\r\nThe output fo the DQN model is not within the action space. \r\n\r\nSomething is wrong when constructing the torch model when dueling is off. The output dimension of the model is equal to whatever is passed in \"fcnet_hiddens\" instead of being of the size of the action space. \r\n\r\n*Ray version and other system information (Python version, TensorFlow version, OS):*\r\n- ray==0.9.0.dev0 \r\n- python 3.6.10\r\n- mac OS\r\n\r\n### Reproduction (REQUIRED)\r\n\r\n```python\r\nimport ray\r\nfrom ray import tune\r\n\r\nray.init()\r\n\r\nconfig = {\r\n \"env\": \"CartPole-v1\",\r\n \"num_workers\": 1,\r\n \"train_batch_size\": 128,\r\n \"learning_starts\": 128,\r\n \"model\": {\"fcnet_hiddens\": [32]},\r\n \"dueling\": False ,\r\n \"framework\": \"torch\"\r\n}\r\n\r\ntune.run(\"DQN\", name=\"MWE\", config=config, stop={\"training_iteration\": 100})\r\n```\r\n\r\n- [x] I have verified my script runs in a clean environment and reproduces the issue.\r\n- [x] I have verified the issue also occurs with the [latest wheels](https://docs.ray.io/en/latest/installation.html).\r\n\n", "before_files": [{"content": "import numpy as np\n\nfrom ray.rllib.models.torch.torch_modelv2 import TorchModelV2\nfrom ray.rllib.utils.framework import try_import_torch\n\ntorch, nn = try_import_torch()\n\n\nclass DQNTorchModel(TorchModelV2, nn.Module):\n \"\"\"Extension of standard TorchModelV2 to provide dueling-Q functionality.\n \"\"\"\n\n def __init__(\n self,\n obs_space,\n action_space,\n num_outputs,\n model_config,\n name,\n *,\n dueling=False,\n q_hiddens=(256, ),\n dueling_activation=\"relu\",\n use_noisy=False,\n sigma0=0.5,\n # TODO(sven): Move `add_layer_norm` into ModelCatalog as\n # generic option, then error if we use ParameterNoise as\n # Exploration type and do not have any LayerNorm layers in\n # the net.\n add_layer_norm=False):\n \"\"\"Initialize variables of this model.\n\n Extra model kwargs:\n dueling (bool): Whether to build the advantage(A)/value(V) heads\n for DDQN. If True, Q-values are calculated as:\n Q = (A - mean[A]) + V. If False, raw NN output is interpreted\n as Q-values.\n q_hiddens (List[int]): List of layer-sizes after(!) the\n Advantages(A)/Value(V)-split. Hence, each of the A- and V-\n branches will have this structure of Dense layers. To define\n the NN before this A/V-split, use - as always -\n config[\"model\"][\"fcnet_hiddens\"].\n dueling_activation (str): The activation to use for all dueling\n layers (A- and V-branch). One of \"relu\", \"tanh\", \"linear\".\n use_noisy (bool): use noisy nets\n sigma0 (float): initial value of noisy nets\n add_layer_norm (bool): Enable layer norm (for param noise).\n \"\"\"\n nn.Module.__init__(self)\n super(DQNTorchModel, self).__init__(obs_space, action_space,\n num_outputs, model_config, name)\n\n self.dueling = dueling\n ins = num_outputs\n\n # Dueling case: Build the shared (advantages and value) fc-network.\n advantage_module = nn.Sequential()\n value_module = None\n if self.dueling:\n value_module = nn.Sequential()\n for i, n in enumerate(q_hiddens):\n advantage_module.add_module(\"dueling_A_{}\".format(i),\n nn.Linear(ins, n))\n value_module.add_module(\"dueling_V_{}\".format(i),\n nn.Linear(ins, n))\n # Add activations if necessary.\n if dueling_activation == \"relu\":\n advantage_module.add_module(\"dueling_A_act_{}\".format(i),\n nn.ReLU())\n value_module.add_module(\"dueling_V_act_{}\".format(i),\n nn.ReLU())\n elif dueling_activation == \"tanh\":\n advantage_module.add_module(\"dueling_A_act_{}\".format(i),\n nn.Tanh())\n value_module.add_module(\"dueling_V_act_{}\".format(i),\n nn.Tanh())\n\n # Add LayerNorm after each Dense.\n if add_layer_norm:\n advantage_module.add_module(\"LayerNorm_A_{}\".format(i),\n nn.LayerNorm(n))\n value_module.add_module(\"LayerNorm_V_{}\".format(i),\n nn.LayerNorm(n))\n ins = n\n # Actual Advantages layer (nodes=num-actions) and\n # value layer (nodes=1).\n advantage_module.add_module(\"A\", nn.Linear(ins, action_space.n))\n value_module.add_module(\"V\", nn.Linear(ins, 1))\n # Non-dueling:\n # Q-value layer (use main module's outputs as Q-values).\n else:\n pass\n\n self.advantage_module = advantage_module\n self.value_module = value_module\n\n def get_advantages_or_q_values(self, model_out):\n \"\"\"Returns distributional values for Q(s, a) given a state embedding.\n\n Override this in your custom model to customize the Q output head.\n\n Arguments:\n model_out (Tensor): embedding from the model layers\n\n Returns:\n (action_scores, logits, dist) if num_atoms == 1, otherwise\n (action_scores, z, support_logits_per_action, logits, dist)\n \"\"\"\n\n return self.advantage_module(model_out)\n\n def get_state_value(self, model_out):\n \"\"\"Returns the state value prediction for the given state embedding.\"\"\"\n\n return self.value_module(model_out)\n\n def _noisy_layer(self, action_in, out_size, sigma0, non_linear=True):\n \"\"\"\n a common dense layer: y = w^{T}x + b\n a noisy layer: y = (w + \\\\epsilon_w*\\\\sigma_w)^{T}x +\n (b+\\\\epsilon_b*\\\\sigma_b)\n where \\epsilon are random variables sampled from factorized normal\n distributions and \\\\sigma are trainable variables which are expected to\n vanish along the training procedure\n \"\"\"\n in_size = int(action_in.shape[1])\n\n epsilon_in = torch.normal(\n mean=torch.zeros([in_size]), std=torch.ones([in_size]))\n epsilon_out = torch.normal(\n mean=torch.zeros([out_size]), std=torch.ones([out_size]))\n epsilon_in = self._f_epsilon(epsilon_in)\n epsilon_out = self._f_epsilon(epsilon_out)\n epsilon_w = torch.matmul(\n torch.unsqueeze(epsilon_in, -1),\n other=torch.unsqueeze(epsilon_out, 0))\n epsilon_b = epsilon_out\n\n sigma_w = torch.Tensor(\n data=np.random.uniform(\n low=-1.0 / np.sqrt(float(in_size)),\n high=1.0 / np.sqrt(float(in_size)),\n size=[in_size, out_size]),\n dtype=torch.float32,\n requires_grad=True)\n # TF noise generation can be unreliable on GPU\n # If generating the noise on the CPU,\n # lowering sigma0 to 0.1 may be helpful\n sigma_b = torch.Tensor(\n data=np.full(\n shape=[out_size], fill_value=sigma0 / np.sqrt(float(in_size))),\n requires_grad=True)\n w = torch.Tensor(\n data=np.full(\n shape=[in_size, out_size],\n fill_value=6 / np.sqrt(float(in_size) + float(out_size))),\n requires_grad=True)\n b = torch.Tensor(data=np.zeros([out_size]), requires_grad=True)\n action_activation = torch.matmul(action_in, w + sigma_w * epsilon_w) \\\n + b + sigma_b * epsilon_b\n\n if not non_linear:\n return action_activation\n return nn.functional.relu(action_activation)\n\n def _f_epsilon(self, x):\n return torch.sign(x) * torch.pow(torch.abs(x), 0.5)\n", "path": "rllib/agents/dqn/dqn_torch_model.py"}, {"content": "\"\"\"Example of handling variable length and/or parametric action spaces.\n\nThis is a toy example of the action-embedding based approach for handling large\ndiscrete action spaces (potentially infinite in size), similar to this:\n\n https://neuro.cs.ut.ee/the-use-of-embeddings-in-openai-five/\n\nThis currently works with RLlib's policy gradient style algorithms\n(e.g., PG, PPO, IMPALA, A2C) and also DQN.\n\nNote that since the model outputs now include \"-inf\" tf.float32.min\nvalues, not all algorithm options are supported at the moment. For example,\nalgorithms might crash if they don't properly ignore the -inf action scores.\nWorking configurations are given below.\n\"\"\"\n\nimport argparse\n\nimport ray\nfrom ray import tune\nfrom ray.rllib.examples.env.parametric_actions_cartpole import \\\n ParametricActionsCartPole\nfrom ray.rllib.examples.models.parametric_actions_model import \\\n ParametricActionsModel, TorchParametricActionsModel\nfrom ray.rllib.models import ModelCatalog\nfrom ray.rllib.utils.test_utils import check_learning_achieved\nfrom ray.tune.registry import register_env\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--run\", type=str, default=\"PPO\")\nparser.add_argument(\"--torch\", action=\"store_true\")\nparser.add_argument(\"--as-test\", action=\"store_true\")\nparser.add_argument(\"--stop-iters\", type=int, default=200)\nparser.add_argument(\"--stop-reward\", type=float, default=150.0)\nparser.add_argument(\"--stop-timesteps\", type=int, default=100000)\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n ray.init()\n\n register_env(\"pa_cartpole\", lambda _: ParametricActionsCartPole(10))\n ModelCatalog.register_custom_model(\n \"pa_model\", TorchParametricActionsModel\n if args.torch else ParametricActionsModel)\n\n if args.run == \"DQN\":\n cfg = {\n # TODO(ekl) we need to set these to prevent the masked values\n # from being further processed in DistributionalQModel, which\n # would mess up the masking. It is possible to support these if we\n # defined a a custom DistributionalQModel that is aware of masking.\n \"hiddens\": [],\n \"dueling\": False,\n }\n else:\n cfg = {}\n\n config = dict({\n \"env\": \"pa_cartpole\",\n \"model\": {\n \"custom_model\": \"pa_model\",\n },\n \"num_workers\": 0,\n \"framework\": \"torch\" if args.torch else \"tf\",\n }, **cfg)\n\n stop = {\n \"training_iteration\": args.stop_iters,\n \"timesteps_total\": args.stop_timesteps,\n \"episode_reward_mean\": args.stop_reward,\n }\n\n results = tune.run(args.run, stop=stop, config=config, verbose=1)\n\n if args.as_test:\n check_learning_achieved(results, args.stop_reward)\n\n ray.shutdown()\n", "path": "rllib/examples/parametric_actions_cartpole.py"}], "after_files": [{"content": "import numpy as np\n\nfrom ray.rllib.models.torch.torch_modelv2 import TorchModelV2\nfrom ray.rllib.utils.framework import try_import_torch\n\ntorch, nn = try_import_torch()\n\n\nclass DQNTorchModel(TorchModelV2, nn.Module):\n \"\"\"Extension of standard TorchModelV2 to provide dueling-Q functionality.\n \"\"\"\n\n def __init__(\n self,\n obs_space,\n action_space,\n num_outputs,\n model_config,\n name,\n *,\n dueling=False,\n q_hiddens=(256, ),\n dueling_activation=\"relu\",\n use_noisy=False,\n sigma0=0.5,\n # TODO(sven): Move `add_layer_norm` into ModelCatalog as\n # generic option, then error if we use ParameterNoise as\n # Exploration type and do not have any LayerNorm layers in\n # the net.\n add_layer_norm=False):\n \"\"\"Initialize variables of this model.\n\n Extra model kwargs:\n dueling (bool): Whether to build the advantage(A)/value(V) heads\n for DDQN. If True, Q-values are calculated as:\n Q = (A - mean[A]) + V. If False, raw NN output is interpreted\n as Q-values.\n q_hiddens (List[int]): List of layer-sizes after(!) the\n Advantages(A)/Value(V)-split. Hence, each of the A- and V-\n branches will have this structure of Dense layers. To define\n the NN before this A/V-split, use - as always -\n config[\"model\"][\"fcnet_hiddens\"].\n dueling_activation (str): The activation to use for all dueling\n layers (A- and V-branch). One of \"relu\", \"tanh\", \"linear\".\n use_noisy (bool): use noisy nets\n sigma0 (float): initial value of noisy nets\n add_layer_norm (bool): Enable layer norm (for param noise).\n \"\"\"\n nn.Module.__init__(self)\n super(DQNTorchModel, self).__init__(obs_space, action_space,\n num_outputs, model_config, name)\n\n self.dueling = dueling\n ins = num_outputs\n\n advantage_module = nn.Sequential()\n value_module = nn.Sequential()\n\n # Dueling case: Build the shared (advantages and value) fc-network.\n for i, n in enumerate(q_hiddens):\n advantage_module.add_module(\"dueling_A_{}\".format(i),\n nn.Linear(ins, n))\n value_module.add_module(\"dueling_V_{}\".format(i),\n nn.Linear(ins, n))\n # Add activations if necessary.\n if dueling_activation == \"relu\":\n advantage_module.add_module(\"dueling_A_act_{}\".format(i),\n nn.ReLU())\n value_module.add_module(\"dueling_V_act_{}\".format(i),\n nn.ReLU())\n elif dueling_activation == \"tanh\":\n advantage_module.add_module(\"dueling_A_act_{}\".format(i),\n nn.Tanh())\n value_module.add_module(\"dueling_V_act_{}\".format(i),\n nn.Tanh())\n\n # Add LayerNorm after each Dense.\n if add_layer_norm:\n advantage_module.add_module(\"LayerNorm_A_{}\".format(i),\n nn.LayerNorm(n))\n value_module.add_module(\"LayerNorm_V_{}\".format(i),\n nn.LayerNorm(n))\n ins = n\n\n # Actual Advantages layer (nodes=num-actions).\n if q_hiddens:\n advantage_module.add_module(\"A\", nn.Linear(ins, action_space.n))\n\n self.advantage_module = advantage_module\n\n # Value layer (nodes=1).\n if self.dueling:\n value_module.add_module(\"V\", nn.Linear(ins, 1))\n self.value_module = value_module\n\n def get_advantages_or_q_values(self, model_out):\n \"\"\"Returns distributional values for Q(s, a) given a state embedding.\n\n Override this in your custom model to customize the Q output head.\n\n Arguments:\n model_out (Tensor): embedding from the model layers\n\n Returns:\n (action_scores, logits, dist) if num_atoms == 1, otherwise\n (action_scores, z, support_logits_per_action, logits, dist)\n \"\"\"\n\n return self.advantage_module(model_out)\n\n def get_state_value(self, model_out):\n \"\"\"Returns the state value prediction for the given state embedding.\"\"\"\n\n return self.value_module(model_out)\n\n def _noisy_layer(self, action_in, out_size, sigma0, non_linear=True):\n \"\"\"\n a common dense layer: y = w^{T}x + b\n a noisy layer: y = (w + \\\\epsilon_w*\\\\sigma_w)^{T}x +\n (b+\\\\epsilon_b*\\\\sigma_b)\n where \\epsilon are random variables sampled from factorized normal\n distributions and \\\\sigma are trainable variables which are expected to\n vanish along the training procedure\n \"\"\"\n in_size = int(action_in.shape[1])\n\n epsilon_in = torch.normal(\n mean=torch.zeros([in_size]), std=torch.ones([in_size]))\n epsilon_out = torch.normal(\n mean=torch.zeros([out_size]), std=torch.ones([out_size]))\n epsilon_in = self._f_epsilon(epsilon_in)\n epsilon_out = self._f_epsilon(epsilon_out)\n epsilon_w = torch.matmul(\n torch.unsqueeze(epsilon_in, -1),\n other=torch.unsqueeze(epsilon_out, 0))\n epsilon_b = epsilon_out\n\n sigma_w = torch.Tensor(\n data=np.random.uniform(\n low=-1.0 / np.sqrt(float(in_size)),\n high=1.0 / np.sqrt(float(in_size)),\n size=[in_size, out_size]),\n dtype=torch.float32,\n requires_grad=True)\n # TF noise generation can be unreliable on GPU\n # If generating the noise on the CPU,\n # lowering sigma0 to 0.1 may be helpful\n sigma_b = torch.Tensor(\n data=np.full(\n shape=[out_size], fill_value=sigma0 / np.sqrt(float(in_size))),\n requires_grad=True)\n w = torch.Tensor(\n data=np.full(\n shape=[in_size, out_size],\n fill_value=6 / np.sqrt(float(in_size) + float(out_size))),\n requires_grad=True)\n b = torch.Tensor(data=np.zeros([out_size]), requires_grad=True)\n action_activation = torch.matmul(action_in, w + sigma_w * epsilon_w) \\\n + b + sigma_b * epsilon_b\n\n if not non_linear:\n return action_activation\n return nn.functional.relu(action_activation)\n\n def _f_epsilon(self, x):\n return torch.sign(x) * torch.pow(torch.abs(x), 0.5)\n", "path": "rllib/agents/dqn/dqn_torch_model.py"}, {"content": "\"\"\"Example of handling variable length and/or parametric action spaces.\n\nThis is a toy example of the action-embedding based approach for handling large\ndiscrete action spaces (potentially infinite in size), similar to this:\n\n https://neuro.cs.ut.ee/the-use-of-embeddings-in-openai-five/\n\nThis currently works with RLlib's policy gradient style algorithms\n(e.g., PG, PPO, IMPALA, A2C) and also DQN.\n\nNote that since the model outputs now include \"-inf\" tf.float32.min\nvalues, not all algorithm options are supported at the moment. For example,\nalgorithms might crash if they don't properly ignore the -inf action scores.\nWorking configurations are given below.\n\"\"\"\n\nimport argparse\n\nimport ray\nfrom ray import tune\nfrom ray.rllib.examples.env.parametric_actions_cartpole import \\\n ParametricActionsCartPole\nfrom ray.rllib.examples.models.parametric_actions_model import \\\n ParametricActionsModel, TorchParametricActionsModel\nfrom ray.rllib.models import ModelCatalog\nfrom ray.rllib.utils.test_utils import check_learning_achieved\nfrom ray.tune.registry import register_env\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--run\", type=str, default=\"PPO\")\nparser.add_argument(\"--torch\", action=\"store_true\")\nparser.add_argument(\"--as-test\", action=\"store_true\")\nparser.add_argument(\"--stop-iters\", type=int, default=200)\nparser.add_argument(\"--stop-reward\", type=float, default=150.0)\nparser.add_argument(\"--stop-timesteps\", type=int, default=100000)\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n ray.init()\n\n register_env(\"pa_cartpole\", lambda _: ParametricActionsCartPole(10))\n ModelCatalog.register_custom_model(\n \"pa_model\", TorchParametricActionsModel\n if args.torch else ParametricActionsModel)\n\n if args.run == \"DQN\":\n cfg = {\n # TODO(ekl) we need to set these to prevent the masked values\n # from being further processed in DistributionalQModel, which\n # would mess up the masking. It is possible to support these if we\n # defined a custom DistributionalQModel that is aware of masking.\n \"hiddens\": [],\n \"dueling\": False,\n }\n else:\n cfg = {}\n\n config = dict({\n \"env\": \"pa_cartpole\",\n \"model\": {\n \"custom_model\": \"pa_model\",\n },\n \"num_workers\": 0,\n \"framework\": \"torch\" if args.torch else \"tf\",\n }, **cfg)\n\n stop = {\n \"training_iteration\": args.stop_iters,\n \"timesteps_total\": args.stop_timesteps,\n \"episode_reward_mean\": args.stop_reward,\n }\n\n results = tune.run(args.run, stop=stop, config=config, verbose=1)\n\n if args.as_test:\n check_learning_achieved(results, args.stop_reward)\n\n ray.shutdown()\n", "path": "rllib/examples/parametric_actions_cartpole.py"}]}
| 3,292 | 978 |
gh_patches_debug_35288
|
rasdani/github-patches
|
git_diff
|
optuna__optuna-4151
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
JournalStorage not working on Windows10
### Expected behavior
I was using the minimal JournalStorage example from the optuna documentation on my Windows10 PC.
However the script is not able to run and throws every time json decoding errors.
According to the latest documentation I didn´t see any limitations regarding the OS on which JournalStorage can be used. But maybe I was missing something.
### Environment
- Optuna version:3.1.0.dev
- Python version:3.10.6
- OS:Windows-10-10.0.19044-SP0
- (Optional) Other libraries and their versions:
### Error messages, stack traces, or logs
```shell
C:\1_Files\M\optuna_trial.py:9: ExperimentalWarning: JournalStorage is experimental (supported from v3.1.0). The interface can change in the future.
storage = optuna.storages.JournalStorage(
Traceback (most recent call last):
File "C:\1_Files\M\optuna_trial.py", line 12, in <module>
study = optuna.create_study(storage=storage)
File "C:\Users\Prometheus\miniconda3\envs\py310\lib\site-packages\optuna\_convert_positional_args.py", line 63, in converter_wrapper
return func(**kwargs)
File "C:\Users\Prometheus\miniconda3\envs\py310\lib\site-packages\optuna\study\study.py", line 1155, in create_study
study_id = storage.create_new_study(study_name)
File "C:\Users\Prometheus\miniconda3\envs\py310\lib\site-packages\optuna\storages\_journal\storage.py", line 111, in create_new_study
self._sync_with_backend()
File "C:\Users\Prometheus\miniconda3\envs\py310\lib\site-packages\optuna\storages\_journal\storage.py", line 103, in _sync_with_backend
logs = self._backend.read_logs(self._replay_result.log_number_read)
File "C:\Users\Prometheus\miniconda3\envs\py310\lib\site-packages\optuna\storages\_journal\file.py", line 174, in read_logs
raise last_decode_error
File "C:\Users\Prometheus\miniconda3\envs\py310\lib\site-packages\optuna\storages\_journal\file.py", line 184, in read_logs
logs.append(json.loads(line))
File "C:\Users\Prometheus\miniconda3\envs\py310\lib\json\__init__.py", line 346, in loads
return _default_decoder.decode(s)
File "C:\Users\Prometheus\miniconda3\envs\py310\lib\json\decoder.py", line 337, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "C:\Users\Prometheus\miniconda3\envs\py310\lib\json\decoder.py", line 353, in raw_decode
obj, end = self.scan_once(s, idx)
json.decoder.JSONDecodeError: Invalid control character at: line 1 column 3 (char 2)
```
### Steps to reproduce
1. Run any JournalStorage example on a Windows10 PC
2. Python finishes with error
3.
```python
import optuna
def objective(trial):
x = trial.suggest_float("x", -10, 10)
return (x - 2) ** 2
storage = optuna.storages.JournalStorage(
optuna.storages.JournalFileStorage("./log_file"))
study = optuna.create_study(storage=storage)
study.optimize(objective)
```
### Additional context (optional)
running the script as an admin yields a slightly different error
```
File "C:\Users\Prometheus\miniconda3\envs\py310\lib\json\decoder.py", line 355, in raw_decode
raise JSONDecodeError("Expecting value", s, err.value) from None
json.decoder.JSONDecodeError: Expecting value: line 2 column 1 (char 1)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `optuna/storages/_journal/file.py`
Content:
```
1 import abc
2 from contextlib import contextmanager
3 import errno
4 import json
5 import os
6 import time
7 from typing import Any
8 from typing import Dict
9 from typing import Iterator
10 from typing import List
11 from typing import Optional
12 import uuid
13
14 from optuna.storages._journal.base import BaseJournalLogStorage
15
16
17 LOCK_FILE_SUFFIX = ".lock"
18 RENAME_FILE_SUFFIX = ".rename"
19
20
21 class JournalFileBaseLock(abc.ABC):
22 @abc.abstractmethod
23 def acquire(self) -> bool:
24 raise NotImplementedError
25
26 @abc.abstractmethod
27 def release(self) -> None:
28 raise NotImplementedError
29
30
31 class JournalFileSymlinkLock(JournalFileBaseLock):
32 """Lock class for synchronizing processes for NFSv2 or later.
33
34 On acquiring the lock, link system call is called to create an exclusive file. The file is
35 deleted when the lock is released. In NFS environments prior to NFSv3, use this instead of
36 :class:`~optuna.storages.JournalFileOpenLock`
37
38 Args:
39 filepath:
40 The path of the file whose race condition must be protected.
41 """
42
43 def __init__(self, filepath: str) -> None:
44 self._lock_target_file = filepath
45 self._lock_file = filepath + LOCK_FILE_SUFFIX
46 self._lock_rename_file = self._lock_file + str(uuid.uuid4()) + RENAME_FILE_SUFFIX
47
48 def acquire(self) -> bool:
49 """Acquire a lock in a blocking way by creating a symbolic link of a file.
50
51 Returns:
52 :obj:`True` if it succeeded in creating a symbolic link of `self._lock_target_file`.
53
54 """
55 sleep_secs = 0.001
56 while True:
57 try:
58 os.symlink(self._lock_target_file, self._lock_file)
59 return True
60 except OSError as err:
61 if err.errno == errno.EEXIST:
62 time.sleep(sleep_secs)
63 sleep_secs = min(sleep_secs * 2, 1)
64 continue
65 raise err
66 except BaseException:
67 self.release()
68 raise
69
70 def release(self) -> None:
71 """Release a lock by removing the symbolic link."""
72
73 try:
74 os.rename(self._lock_file, self._lock_rename_file)
75 os.unlink(self._lock_rename_file)
76 except OSError:
77 raise RuntimeError("Error: did not possess lock")
78 except BaseException:
79 os.unlink(self._lock_rename_file)
80 raise
81
82
83 class JournalFileOpenLock(JournalFileBaseLock):
84 """Lock class for synchronizing processes for NFSv3 or later.
85
86 On acquiring the lock, open system call is called with the O_EXCL option to create an exclusive
87 file. The file is deleted when the lock is released. This class is only supported when using
88 NFSv3 or later on kernel 2.6 or later. In prior NFS environments, use
89 :class:`~optuna.storages.JournalFileSymlinkLock`.
90
91 Args:
92 filepath:
93 The path of the file whose race condition must be protected.
94 """
95
96 def __init__(self, filepath: str) -> None:
97 self._lock_file = filepath + LOCK_FILE_SUFFIX
98 self._lock_rename_file = self._lock_file + str(uuid.uuid4()) + RENAME_FILE_SUFFIX
99
100 def acquire(self) -> bool:
101 """Acquire a lock in a blocking way by creating a lock file.
102
103 Returns:
104 :obj:`True` if it succeeded in creating a `self._lock_file`
105
106 """
107 sleep_secs = 0.001
108 while True:
109 try:
110 open_flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY
111 os.close(os.open(self._lock_file, open_flags))
112 return True
113 except OSError as err:
114 if err.errno == errno.EEXIST:
115 time.sleep(sleep_secs)
116 sleep_secs = min(sleep_secs * 2, 1)
117 continue
118 raise err
119 except BaseException:
120 self.release()
121 raise
122
123 def release(self) -> None:
124 """Release a lock by removing the created file."""
125
126 try:
127 os.rename(self._lock_file, self._lock_rename_file)
128 os.unlink(self._lock_rename_file)
129 except OSError:
130 raise RuntimeError("Error: did not possess lock")
131 except BaseException:
132 os.unlink(self._lock_rename_file)
133 raise
134
135
136 @contextmanager
137 def get_lock_file(lock_obj: JournalFileBaseLock) -> Iterator[None]:
138 lock_obj.acquire()
139 try:
140 yield
141 finally:
142 lock_obj.release()
143
144
145 class JournalFileStorage(BaseJournalLogStorage):
146 """File storage class for Journal log backend.
147
148 Args:
149 file_path:
150 Path of file to persist the log to.
151
152 lock_obj:
153 Lock object for process exclusivity.
154
155 """
156
157 def __init__(self, file_path: str, lock_obj: Optional[JournalFileBaseLock] = None) -> None:
158 self._file_path: str = file_path
159 self._lock = lock_obj or JournalFileSymlinkLock(self._file_path)
160 open(self._file_path, "a").close() # Create a file if it does not exist
161 self._log_number_offset: Dict[int, int] = {0: 0}
162
163 def read_logs(self, log_number_from: int) -> List[Dict[str, Any]]:
164 logs = []
165 with open(self._file_path, "r") as f:
166 log_number_start = 0
167 if log_number_from in self._log_number_offset:
168 f.seek(self._log_number_offset[log_number_from])
169 log_number_start = log_number_from
170
171 last_decode_error = None
172 for log_number, line in enumerate(f, start=log_number_start):
173 if last_decode_error is not None:
174 raise last_decode_error
175 if log_number + 1 not in self._log_number_offset:
176 byte_len = len(line.encode("utf-8"))
177 self._log_number_offset[log_number + 1] = (
178 self._log_number_offset[log_number] + byte_len
179 )
180 if log_number < log_number_from:
181 continue
182 try:
183 logs.append(json.loads(line))
184 except json.JSONDecodeError as err:
185 last_decode_error = err
186 del self._log_number_offset[log_number + 1]
187
188 return logs
189
190 def append_logs(self, logs: List[Dict[str, Any]]) -> None:
191 with get_lock_file(self._lock):
192 what_to_write = "\n".join([json.dumps(log) for log in logs]) + "\n"
193 with open(self._file_path, "a", encoding="utf-8") as f:
194 f.write(what_to_write)
195 f.flush()
196 os.fsync(f.fileno())
197
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/optuna/storages/_journal/file.py b/optuna/storages/_journal/file.py
--- a/optuna/storages/_journal/file.py
+++ b/optuna/storages/_journal/file.py
@@ -157,12 +157,12 @@
def __init__(self, file_path: str, lock_obj: Optional[JournalFileBaseLock] = None) -> None:
self._file_path: str = file_path
self._lock = lock_obj or JournalFileSymlinkLock(self._file_path)
- open(self._file_path, "a").close() # Create a file if it does not exist
+ open(self._file_path, "ab").close() # Create a file if it does not exist
self._log_number_offset: Dict[int, int] = {0: 0}
def read_logs(self, log_number_from: int) -> List[Dict[str, Any]]:
logs = []
- with open(self._file_path, "r") as f:
+ with open(self._file_path, "rb") as f:
log_number_start = 0
if log_number_from in self._log_number_offset:
f.seek(self._log_number_offset[log_number_from])
@@ -173,7 +173,7 @@
if last_decode_error is not None:
raise last_decode_error
if log_number + 1 not in self._log_number_offset:
- byte_len = len(line.encode("utf-8"))
+ byte_len = len(line)
self._log_number_offset[log_number + 1] = (
self._log_number_offset[log_number] + byte_len
)
@@ -190,7 +190,7 @@
def append_logs(self, logs: List[Dict[str, Any]]) -> None:
with get_lock_file(self._lock):
what_to_write = "\n".join([json.dumps(log) for log in logs]) + "\n"
- with open(self._file_path, "a", encoding="utf-8") as f:
- f.write(what_to_write)
+ with open(self._file_path, "ab") as f:
+ f.write(what_to_write.encode("utf-8"))
f.flush()
os.fsync(f.fileno())
|
{"golden_diff": "diff --git a/optuna/storages/_journal/file.py b/optuna/storages/_journal/file.py\n--- a/optuna/storages/_journal/file.py\n+++ b/optuna/storages/_journal/file.py\n@@ -157,12 +157,12 @@\n def __init__(self, file_path: str, lock_obj: Optional[JournalFileBaseLock] = None) -> None:\n self._file_path: str = file_path\n self._lock = lock_obj or JournalFileSymlinkLock(self._file_path)\n- open(self._file_path, \"a\").close() # Create a file if it does not exist\n+ open(self._file_path, \"ab\").close() # Create a file if it does not exist\n self._log_number_offset: Dict[int, int] = {0: 0}\n \n def read_logs(self, log_number_from: int) -> List[Dict[str, Any]]:\n logs = []\n- with open(self._file_path, \"r\") as f:\n+ with open(self._file_path, \"rb\") as f:\n log_number_start = 0\n if log_number_from in self._log_number_offset:\n f.seek(self._log_number_offset[log_number_from])\n@@ -173,7 +173,7 @@\n if last_decode_error is not None:\n raise last_decode_error\n if log_number + 1 not in self._log_number_offset:\n- byte_len = len(line.encode(\"utf-8\"))\n+ byte_len = len(line)\n self._log_number_offset[log_number + 1] = (\n self._log_number_offset[log_number] + byte_len\n )\n@@ -190,7 +190,7 @@\n def append_logs(self, logs: List[Dict[str, Any]]) -> None:\n with get_lock_file(self._lock):\n what_to_write = \"\\n\".join([json.dumps(log) for log in logs]) + \"\\n\"\n- with open(self._file_path, \"a\", encoding=\"utf-8\") as f:\n- f.write(what_to_write)\n+ with open(self._file_path, \"ab\") as f:\n+ f.write(what_to_write.encode(\"utf-8\"))\n f.flush()\n os.fsync(f.fileno())\n", "issue": "JournalStorage not working on Windows10\n### Expected behavior\r\n\r\nI was using the minimal JournalStorage example from the optuna documentation on my Windows10 PC. \r\nHowever the script is not able to run and throws every time json decoding errors.\r\n\r\nAccording to the latest documentation I didn\u00b4t see any limitations regarding the OS on which JournalStorage can be used. But maybe I was missing something.\r\n\r\n\r\n### Environment\r\n\r\n- Optuna version:3.1.0.dev\r\n- Python version:3.10.6\r\n- OS:Windows-10-10.0.19044-SP0\r\n- (Optional) Other libraries and their versions:\r\n\r\n\r\n### Error messages, stack traces, or logs\r\n\r\n```shell\r\nC:\\1_Files\\M\\optuna_trial.py:9: ExperimentalWarning: JournalStorage is experimental (supported from v3.1.0). The interface can change in the future.\r\n storage = optuna.storages.JournalStorage(\r\nTraceback (most recent call last):\r\n File \"C:\\1_Files\\M\\optuna_trial.py\", line 12, in <module>\r\n study = optuna.create_study(storage=storage)\r\n File \"C:\\Users\\Prometheus\\miniconda3\\envs\\py310\\lib\\site-packages\\optuna\\_convert_positional_args.py\", line 63, in converter_wrapper\r\n return func(**kwargs)\r\n File \"C:\\Users\\Prometheus\\miniconda3\\envs\\py310\\lib\\site-packages\\optuna\\study\\study.py\", line 1155, in create_study\r\n study_id = storage.create_new_study(study_name)\r\n File \"C:\\Users\\Prometheus\\miniconda3\\envs\\py310\\lib\\site-packages\\optuna\\storages\\_journal\\storage.py\", line 111, in create_new_study\r\n self._sync_with_backend()\r\n File \"C:\\Users\\Prometheus\\miniconda3\\envs\\py310\\lib\\site-packages\\optuna\\storages\\_journal\\storage.py\", line 103, in _sync_with_backend\r\n logs = self._backend.read_logs(self._replay_result.log_number_read)\r\n File \"C:\\Users\\Prometheus\\miniconda3\\envs\\py310\\lib\\site-packages\\optuna\\storages\\_journal\\file.py\", line 174, in read_logs\r\n raise last_decode_error\r\n File \"C:\\Users\\Prometheus\\miniconda3\\envs\\py310\\lib\\site-packages\\optuna\\storages\\_journal\\file.py\", line 184, in read_logs\r\n logs.append(json.loads(line))\r\n File \"C:\\Users\\Prometheus\\miniconda3\\envs\\py310\\lib\\json\\__init__.py\", line 346, in loads\r\n return _default_decoder.decode(s)\r\n File \"C:\\Users\\Prometheus\\miniconda3\\envs\\py310\\lib\\json\\decoder.py\", line 337, in decode\r\n obj, end = self.raw_decode(s, idx=_w(s, 0).end())\r\n File \"C:\\Users\\Prometheus\\miniconda3\\envs\\py310\\lib\\json\\decoder.py\", line 353, in raw_decode\r\n obj, end = self.scan_once(s, idx)\r\njson.decoder.JSONDecodeError: Invalid control character at: line 1 column 3 (char 2)\r\n```\r\n\r\n\r\n### Steps to reproduce\r\n\r\n1. Run any JournalStorage example on a Windows10 PC\r\n2. Python finishes with error\r\n3.\r\n```python\r\nimport optuna\r\n\r\n\r\ndef objective(trial):\r\n x = trial.suggest_float(\"x\", -10, 10)\r\n return (x - 2) ** 2\r\n\r\n\r\nstorage = optuna.storages.JournalStorage(\r\n optuna.storages.JournalFileStorage(\"./log_file\"))\r\n\r\nstudy = optuna.create_study(storage=storage)\r\nstudy.optimize(objective)\r\n```\r\n\r\n\r\n### Additional context (optional)\r\n\r\nrunning the script as an admin yields a slightly different error\r\n\r\n```\r\n File \"C:\\Users\\Prometheus\\miniconda3\\envs\\py310\\lib\\json\\decoder.py\", line 355, in raw_decode\r\n raise JSONDecodeError(\"Expecting value\", s, err.value) from None\r\njson.decoder.JSONDecodeError: Expecting value: line 2 column 1 (char 1)\r\n```\n", "before_files": [{"content": "import abc\nfrom contextlib import contextmanager\nimport errno\nimport json\nimport os\nimport time\nfrom typing import Any\nfrom typing import Dict\nfrom typing import Iterator\nfrom typing import List\nfrom typing import Optional\nimport uuid\n\nfrom optuna.storages._journal.base import BaseJournalLogStorage\n\n\nLOCK_FILE_SUFFIX = \".lock\"\nRENAME_FILE_SUFFIX = \".rename\"\n\n\nclass JournalFileBaseLock(abc.ABC):\n @abc.abstractmethod\n def acquire(self) -> bool:\n raise NotImplementedError\n\n @abc.abstractmethod\n def release(self) -> None:\n raise NotImplementedError\n\n\nclass JournalFileSymlinkLock(JournalFileBaseLock):\n \"\"\"Lock class for synchronizing processes for NFSv2 or later.\n\n On acquiring the lock, link system call is called to create an exclusive file. The file is\n deleted when the lock is released. In NFS environments prior to NFSv3, use this instead of\n :class:`~optuna.storages.JournalFileOpenLock`\n\n Args:\n filepath:\n The path of the file whose race condition must be protected.\n \"\"\"\n\n def __init__(self, filepath: str) -> None:\n self._lock_target_file = filepath\n self._lock_file = filepath + LOCK_FILE_SUFFIX\n self._lock_rename_file = self._lock_file + str(uuid.uuid4()) + RENAME_FILE_SUFFIX\n\n def acquire(self) -> bool:\n \"\"\"Acquire a lock in a blocking way by creating a symbolic link of a file.\n\n Returns:\n :obj:`True` if it succeeded in creating a symbolic link of `self._lock_target_file`.\n\n \"\"\"\n sleep_secs = 0.001\n while True:\n try:\n os.symlink(self._lock_target_file, self._lock_file)\n return True\n except OSError as err:\n if err.errno == errno.EEXIST:\n time.sleep(sleep_secs)\n sleep_secs = min(sleep_secs * 2, 1)\n continue\n raise err\n except BaseException:\n self.release()\n raise\n\n def release(self) -> None:\n \"\"\"Release a lock by removing the symbolic link.\"\"\"\n\n try:\n os.rename(self._lock_file, self._lock_rename_file)\n os.unlink(self._lock_rename_file)\n except OSError:\n raise RuntimeError(\"Error: did not possess lock\")\n except BaseException:\n os.unlink(self._lock_rename_file)\n raise\n\n\nclass JournalFileOpenLock(JournalFileBaseLock):\n \"\"\"Lock class for synchronizing processes for NFSv3 or later.\n\n On acquiring the lock, open system call is called with the O_EXCL option to create an exclusive\n file. The file is deleted when the lock is released. This class is only supported when using\n NFSv3 or later on kernel 2.6 or later. In prior NFS environments, use\n :class:`~optuna.storages.JournalFileSymlinkLock`.\n\n Args:\n filepath:\n The path of the file whose race condition must be protected.\n \"\"\"\n\n def __init__(self, filepath: str) -> None:\n self._lock_file = filepath + LOCK_FILE_SUFFIX\n self._lock_rename_file = self._lock_file + str(uuid.uuid4()) + RENAME_FILE_SUFFIX\n\n def acquire(self) -> bool:\n \"\"\"Acquire a lock in a blocking way by creating a lock file.\n\n Returns:\n :obj:`True` if it succeeded in creating a `self._lock_file`\n\n \"\"\"\n sleep_secs = 0.001\n while True:\n try:\n open_flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY\n os.close(os.open(self._lock_file, open_flags))\n return True\n except OSError as err:\n if err.errno == errno.EEXIST:\n time.sleep(sleep_secs)\n sleep_secs = min(sleep_secs * 2, 1)\n continue\n raise err\n except BaseException:\n self.release()\n raise\n\n def release(self) -> None:\n \"\"\"Release a lock by removing the created file.\"\"\"\n\n try:\n os.rename(self._lock_file, self._lock_rename_file)\n os.unlink(self._lock_rename_file)\n except OSError:\n raise RuntimeError(\"Error: did not possess lock\")\n except BaseException:\n os.unlink(self._lock_rename_file)\n raise\n\n\n@contextmanager\ndef get_lock_file(lock_obj: JournalFileBaseLock) -> Iterator[None]:\n lock_obj.acquire()\n try:\n yield\n finally:\n lock_obj.release()\n\n\nclass JournalFileStorage(BaseJournalLogStorage):\n \"\"\"File storage class for Journal log backend.\n\n Args:\n file_path:\n Path of file to persist the log to.\n\n lock_obj:\n Lock object for process exclusivity.\n\n \"\"\"\n\n def __init__(self, file_path: str, lock_obj: Optional[JournalFileBaseLock] = None) -> None:\n self._file_path: str = file_path\n self._lock = lock_obj or JournalFileSymlinkLock(self._file_path)\n open(self._file_path, \"a\").close() # Create a file if it does not exist\n self._log_number_offset: Dict[int, int] = {0: 0}\n\n def read_logs(self, log_number_from: int) -> List[Dict[str, Any]]:\n logs = []\n with open(self._file_path, \"r\") as f:\n log_number_start = 0\n if log_number_from in self._log_number_offset:\n f.seek(self._log_number_offset[log_number_from])\n log_number_start = log_number_from\n\n last_decode_error = None\n for log_number, line in enumerate(f, start=log_number_start):\n if last_decode_error is not None:\n raise last_decode_error\n if log_number + 1 not in self._log_number_offset:\n byte_len = len(line.encode(\"utf-8\"))\n self._log_number_offset[log_number + 1] = (\n self._log_number_offset[log_number] + byte_len\n )\n if log_number < log_number_from:\n continue\n try:\n logs.append(json.loads(line))\n except json.JSONDecodeError as err:\n last_decode_error = err\n del self._log_number_offset[log_number + 1]\n\n return logs\n\n def append_logs(self, logs: List[Dict[str, Any]]) -> None:\n with get_lock_file(self._lock):\n what_to_write = \"\\n\".join([json.dumps(log) for log in logs]) + \"\\n\"\n with open(self._file_path, \"a\", encoding=\"utf-8\") as f:\n f.write(what_to_write)\n f.flush()\n os.fsync(f.fileno())\n", "path": "optuna/storages/_journal/file.py"}], "after_files": [{"content": "import abc\nfrom contextlib import contextmanager\nimport errno\nimport json\nimport os\nimport time\nfrom typing import Any\nfrom typing import Dict\nfrom typing import Iterator\nfrom typing import List\nfrom typing import Optional\nimport uuid\n\nfrom optuna.storages._journal.base import BaseJournalLogStorage\n\n\nLOCK_FILE_SUFFIX = \".lock\"\nRENAME_FILE_SUFFIX = \".rename\"\n\n\nclass JournalFileBaseLock(abc.ABC):\n @abc.abstractmethod\n def acquire(self) -> bool:\n raise NotImplementedError\n\n @abc.abstractmethod\n def release(self) -> None:\n raise NotImplementedError\n\n\nclass JournalFileSymlinkLock(JournalFileBaseLock):\n \"\"\"Lock class for synchronizing processes for NFSv2 or later.\n\n On acquiring the lock, link system call is called to create an exclusive file. The file is\n deleted when the lock is released. In NFS environments prior to NFSv3, use this instead of\n :class:`~optuna.storages.JournalFileOpenLock`\n\n Args:\n filepath:\n The path of the file whose race condition must be protected.\n \"\"\"\n\n def __init__(self, filepath: str) -> None:\n self._lock_target_file = filepath\n self._lock_file = filepath + LOCK_FILE_SUFFIX\n self._lock_rename_file = self._lock_file + str(uuid.uuid4()) + RENAME_FILE_SUFFIX\n\n def acquire(self) -> bool:\n \"\"\"Acquire a lock in a blocking way by creating a symbolic link of a file.\n\n Returns:\n :obj:`True` if it succeeded in creating a symbolic link of `self._lock_target_file`.\n\n \"\"\"\n sleep_secs = 0.001\n while True:\n try:\n os.symlink(self._lock_target_file, self._lock_file)\n return True\n except OSError as err:\n if err.errno == errno.EEXIST:\n time.sleep(sleep_secs)\n sleep_secs = min(sleep_secs * 2, 1)\n continue\n raise err\n except BaseException:\n self.release()\n raise\n\n def release(self) -> None:\n \"\"\"Release a lock by removing the symbolic link.\"\"\"\n\n try:\n os.rename(self._lock_file, self._lock_rename_file)\n os.unlink(self._lock_rename_file)\n except OSError:\n raise RuntimeError(\"Error: did not possess lock\")\n except BaseException:\n os.unlink(self._lock_rename_file)\n raise\n\n\nclass JournalFileOpenLock(JournalFileBaseLock):\n \"\"\"Lock class for synchronizing processes for NFSv3 or later.\n\n On acquiring the lock, open system call is called with the O_EXCL option to create an exclusive\n file. The file is deleted when the lock is released. This class is only supported when using\n NFSv3 or later on kernel 2.6 or later. In prior NFS environments, use\n :class:`~optuna.storages.JournalFileSymlinkLock`.\n\n Args:\n filepath:\n The path of the file whose race condition must be protected.\n \"\"\"\n\n def __init__(self, filepath: str) -> None:\n self._lock_file = filepath + LOCK_FILE_SUFFIX\n self._lock_rename_file = self._lock_file + str(uuid.uuid4()) + RENAME_FILE_SUFFIX\n\n def acquire(self) -> bool:\n \"\"\"Acquire a lock in a blocking way by creating a lock file.\n\n Returns:\n :obj:`True` if it succeeded in creating a `self._lock_file`\n\n \"\"\"\n sleep_secs = 0.001\n while True:\n try:\n open_flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY\n os.close(os.open(self._lock_file, open_flags))\n return True\n except OSError as err:\n if err.errno == errno.EEXIST:\n time.sleep(sleep_secs)\n sleep_secs = min(sleep_secs * 2, 1)\n continue\n raise err\n except BaseException:\n self.release()\n raise\n\n def release(self) -> None:\n \"\"\"Release a lock by removing the created file.\"\"\"\n\n try:\n os.rename(self._lock_file, self._lock_rename_file)\n os.unlink(self._lock_rename_file)\n except OSError:\n raise RuntimeError(\"Error: did not possess lock\")\n except BaseException:\n os.unlink(self._lock_rename_file)\n raise\n\n\n@contextmanager\ndef get_lock_file(lock_obj: JournalFileBaseLock) -> Iterator[None]:\n lock_obj.acquire()\n try:\n yield\n finally:\n lock_obj.release()\n\n\nclass JournalFileStorage(BaseJournalLogStorage):\n \"\"\"File storage class for Journal log backend.\n\n Args:\n file_path:\n Path of file to persist the log to.\n\n lock_obj:\n Lock object for process exclusivity.\n\n \"\"\"\n\n def __init__(self, file_path: str, lock_obj: Optional[JournalFileBaseLock] = None) -> None:\n self._file_path: str = file_path\n self._lock = lock_obj or JournalFileSymlinkLock(self._file_path)\n open(self._file_path, \"ab\").close() # Create a file if it does not exist\n self._log_number_offset: Dict[int, int] = {0: 0}\n\n def read_logs(self, log_number_from: int) -> List[Dict[str, Any]]:\n logs = []\n with open(self._file_path, \"rb\") as f:\n log_number_start = 0\n if log_number_from in self._log_number_offset:\n f.seek(self._log_number_offset[log_number_from])\n log_number_start = log_number_from\n\n last_decode_error = None\n for log_number, line in enumerate(f, start=log_number_start):\n if last_decode_error is not None:\n raise last_decode_error\n if log_number + 1 not in self._log_number_offset:\n byte_len = len(line)\n self._log_number_offset[log_number + 1] = (\n self._log_number_offset[log_number] + byte_len\n )\n if log_number < log_number_from:\n continue\n try:\n logs.append(json.loads(line))\n except json.JSONDecodeError as err:\n last_decode_error = err\n del self._log_number_offset[log_number + 1]\n\n return logs\n\n def append_logs(self, logs: List[Dict[str, Any]]) -> None:\n with get_lock_file(self._lock):\n what_to_write = \"\\n\".join([json.dumps(log) for log in logs]) + \"\\n\"\n with open(self._file_path, \"ab\") as f:\n f.write(what_to_write.encode(\"utf-8\"))\n f.flush()\n os.fsync(f.fileno())\n", "path": "optuna/storages/_journal/file.py"}]}
| 3,210 | 507 |
gh_patches_debug_28558
|
rasdani/github-patches
|
git_diff
|
encode__uvicorn-420
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update 'websockets' to 8.x
I realize that https://github.com/encode/uvicorn/pull/396 was done because the tests appear to be failing with `websockets>=8.0`, but that release also fixed a bunch of DeprecationWarnings that we've been seeing in [httpx builds on 3.8](https://travis-ci.org/encode/httpx/jobs/574949180).
Since it's only a `DeprecationWarning`, and only on 3.8 ([3.7 and below is fine](https://travis-ci.org/encode/httpx/jobs/574949179)), I definitely would agree that this is a low priority issue.
I dug into the tests to try and figure out what exactly was going wrong, but the `AttributeError`s I looked at looks like the main issue is two missing functions in [websockets_impl.py](https://github.com/encode/uvicorn/blob/master/uvicorn/protocols/websockets/websockets_impl.py)
`15 AttributeError: 'Server' object has no attribute 'is_serving'` --> https://github.com/aaugustin/websockets/blob/8.0.2/src/websockets/server.py#L642-L668
` 9 AttributeError: 'WebSocketProtocol' object has no attribute 'transfer_data_task'` --> https://github.com/aaugustin/websockets/blob/8.0.2/src/websockets/protocol.py#L276-L313
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 import os
5 import re
6 import sys
7 import platform
8
9 from setuptools import setup
10
11
12 def get_version(package):
13 """
14 Return package version as listed in `__version__` in `init.py`.
15 """
16 path = os.path.join(package, '__init__.py')
17 init_py = open(path, 'r', encoding='utf8').read()
18 return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
19
20
21 def get_long_description():
22 """
23 Return the README.
24 """
25 return open('README.md', 'r', encoding='utf8').read()
26
27
28 def get_packages(package):
29 """
30 Return root package and all sub-packages.
31 """
32 return [dirpath
33 for dirpath, dirnames, filenames in os.walk(package)
34 if os.path.exists(os.path.join(dirpath, '__init__.py'))]
35
36
37 env_marker = (
38 "sys_platform != 'win32'"
39 " and sys_platform != 'cygwin'"
40 " and platform_python_implementation != 'pypy'"
41 )
42
43 requirements = [
44 "click==7.*",
45 "h11==0.8.*",
46 "websockets==7.*",
47 "httptools==0.0.13 ;" + env_marker,
48 "uvloop==0.* ;" + env_marker,
49 ]
50
51
52 setup(
53 name='uvicorn',
54 version=get_version('uvicorn'),
55 url='https://github.com/encode/uvicorn',
56 license='BSD',
57 description='The lightning-fast ASGI server.',
58 long_description=get_long_description(),
59 long_description_content_type='text/markdown',
60 author='Tom Christie',
61 author_email='[email protected]',
62 packages=get_packages('uvicorn'),
63 install_requires=requirements,
64 data_files = [("", ["LICENSE.md"])],
65 classifiers=[
66 'Development Status :: 3 - Alpha',
67 'Environment :: Web Environment',
68 'Intended Audience :: Developers',
69 'License :: OSI Approved :: BSD License',
70 'Operating System :: OS Independent',
71 'Topic :: Internet :: WWW/HTTP',
72 'Programming Language :: Python :: 3',
73 'Programming Language :: Python :: 3.5',
74 'Programming Language :: Python :: 3.6',
75 'Programming Language :: Python :: 3.7',
76 'Programming Language :: Python :: Implementation :: CPython',
77 'Programming Language :: Python :: Implementation :: PyPy',
78 ],
79 entry_points="""
80 [console_scripts]
81 uvicorn=uvicorn.main:main
82 """
83 )
84
```
Path: `uvicorn/protocols/websockets/websockets_impl.py`
Content:
```
1 import asyncio
2 import http
3 from urllib.parse import unquote
4
5 import websockets
6
7 from uvicorn.protocols.utils import get_local_addr, get_remote_addr, is_ssl
8
9
10 class Server:
11 closing = False
12
13 def register(self, ws):
14 pass
15
16 def unregister(self, ws):
17 pass
18
19
20 class WebSocketProtocol(websockets.WebSocketServerProtocol):
21 def __init__(self, config, server_state, _loop=None):
22 if not config.loaded:
23 config.load()
24
25 self.config = config
26 self.app = config.loaded_app
27 self.loop = _loop or asyncio.get_event_loop()
28 self.logger = config.logger_instance
29 self.root_path = config.root_path
30
31 # Shared server state
32 self.connections = server_state.connections
33 self.tasks = server_state.tasks
34
35 # Connection state
36 self.transport = None
37 self.server = None
38 self.client = None
39 self.scheme = None
40
41 # Connection events
42 self.scope = None
43 self.handshake_started_event = asyncio.Event()
44 self.handshake_completed_event = asyncio.Event()
45 self.closed_event = asyncio.Event()
46 self.initial_response = None
47 self.connect_sent = False
48 self.accepted_subprotocol = None
49
50 server = Server()
51
52 super().__init__(ws_handler=self.ws_handler, ws_server=server)
53
54 def connection_made(self, transport):
55 self.connections.add(self)
56 self.transport = transport
57 self.server = get_local_addr(transport)
58 self.client = get_remote_addr(transport)
59 self.scheme = "wss" if is_ssl(transport) else "ws"
60 super().connection_made(transport)
61
62 def connection_lost(self, exc):
63 self.connections.remove(self)
64 self.handshake_completed_event.set()
65 super().connection_lost(exc)
66
67 def shutdown(self):
68 self.transport.close()
69
70 def on_task_complete(self, task):
71 self.tasks.discard(task)
72
73 async def process_request(self, path, headers):
74 """
75 This hook is called to determine if the websocket should return
76 an HTTP response and close.
77
78 Our behavior here is to start the ASGI application, and then wait
79 for either `accept` or `close` in order to determine if we should
80 close the connection.
81 """
82 path_portion, _, query_string = path.partition("?")
83
84 websockets.handshake.check_request(headers)
85
86 subprotocols = []
87 for header in headers.get_all("Sec-WebSocket-Protocol"):
88 subprotocols.extend([token.strip() for token in header.split(",")])
89
90 asgi_headers = [
91 (name.encode("ascii"), value.encode("ascii"))
92 for name, value in headers.raw_items()
93 ]
94
95 self.scope = {
96 "type": "websocket",
97 "scheme": self.scheme,
98 "server": self.server,
99 "client": self.client,
100 "root_path": self.root_path,
101 "path": unquote(path_portion),
102 "raw_path": path_portion,
103 "query_string": query_string.encode("ascii"),
104 "headers": asgi_headers,
105 "subprotocols": subprotocols,
106 }
107 task = self.loop.create_task(self.run_asgi())
108 task.add_done_callback(self.on_task_complete)
109 self.tasks.add(task)
110 await self.handshake_started_event.wait()
111 return self.initial_response
112
113 def process_subprotocol(self, headers, available_subprotocols):
114 """
115 We override the standard 'process_subprotocol' behavior here so that
116 we return whatever subprotocol is sent in the 'accept' message.
117 """
118 return self.accepted_subprotocol
119
120 def send_500_response(self):
121 msg = b"Internal Server Error"
122 content = [
123 b"HTTP/1.1 500 Internal Server Error\r\n"
124 b"content-type: text/plain; charset=utf-8\r\n",
125 b"content-length: " + str(len(msg)).encode("ascii") + b"\r\n",
126 b"connection: close\r\n",
127 b"\r\n",
128 msg,
129 ]
130 self.transport.write(b"".join(content))
131
132 async def ws_handler(self, protocol, path):
133 """
134 This is the main handler function for the 'websockets' implementation
135 to call into. We just wait for close then return, and instead allow
136 'send' and 'receive' events to drive the flow.
137 """
138 self.handshake_completed_event.set()
139 await self.closed_event.wait()
140
141 async def run_asgi(self):
142 """
143 Wrapper around the ASGI callable, handling exceptions and unexpected
144 termination states.
145 """
146 try:
147 result = await self.app(self.scope, self.asgi_receive, self.asgi_send)
148 except BaseException as exc:
149 self.closed_event.set()
150 msg = "Exception in ASGI application\n"
151 self.logger.error(msg, exc_info=exc)
152 if not self.handshake_started_event.is_set():
153 self.send_500_response()
154 else:
155 await self.handshake_completed_event.wait()
156 self.transport.close()
157 else:
158 self.closed_event.set()
159 if not self.handshake_started_event.is_set():
160 msg = "ASGI callable returned without sending handshake."
161 self.logger.error(msg)
162 self.send_500_response()
163 self.transport.close()
164 elif result is not None:
165 msg = "ASGI callable should return None, but returned '%s'."
166 self.logger.error(msg, result)
167 await self.handshake_completed_event.wait()
168 self.transport.close()
169
170 async def asgi_send(self, message):
171 message_type = message["type"]
172
173 if not self.handshake_started_event.is_set():
174 if message_type == "websocket.accept":
175 self.logger.info(
176 '%s - "WebSocket %s" [accepted]',
177 self.scope["client"],
178 self.scope["root_path"] + self.scope["path"],
179 )
180 self.initial_response = None
181 self.accepted_subprotocol = message.get("subprotocol")
182 self.handshake_started_event.set()
183
184 elif message_type == "websocket.close":
185 self.logger.info(
186 '%s - "WebSocket %s" 403',
187 self.scope["client"],
188 self.scope["root_path"] + self.scope["path"],
189 )
190 self.initial_response = (http.HTTPStatus.FORBIDDEN, [], b"")
191 self.handshake_started_event.set()
192 self.closed_event.set()
193
194 else:
195 msg = "Expected ASGI message 'websocket.accept' or 'websocket.close', but got '%s'."
196 raise RuntimeError(msg % message_type)
197
198 elif not self.closed_event.is_set():
199 await self.handshake_completed_event.wait()
200
201 if message_type == "websocket.send":
202 bytes_data = message.get("bytes")
203 text_data = message.get("text")
204 data = text_data if bytes_data is None else bytes_data
205 await self.send(data)
206
207 elif message_type == "websocket.close":
208 code = message.get("code", 1000)
209 await self.close(code)
210 self.closed_event.set()
211
212 else:
213 msg = "Expected ASGI message 'websocket.send' or 'websocket.close', but got '%s'."
214 raise RuntimeError(msg % message_type)
215
216 else:
217 msg = "Unexpected ASGI message '%s', after sending 'websocket.close'."
218 raise RuntimeError(msg % message_type)
219
220 async def asgi_receive(self):
221 if not self.connect_sent:
222 self.connect_sent = True
223 return {"type": "websocket.connect"}
224
225 await self.handshake_completed_event.wait()
226 try:
227 data = await self.recv()
228 except websockets.ConnectionClosed as exc:
229 return {"type": "websocket.disconnect", "code": exc.code}
230
231 msg = {"type": "websocket.receive"}
232
233 if isinstance(data, str):
234 msg["text"] = data
235 else:
236 msg["bytes"] = data
237
238 return msg
239
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -43,7 +43,7 @@
requirements = [
"click==7.*",
"h11==0.8.*",
- "websockets==7.*",
+ "websockets==8.*",
"httptools==0.0.13 ;" + env_marker,
"uvloop==0.* ;" + env_marker,
]
diff --git a/uvicorn/protocols/websockets/websockets_impl.py b/uvicorn/protocols/websockets/websockets_impl.py
--- a/uvicorn/protocols/websockets/websockets_impl.py
+++ b/uvicorn/protocols/websockets/websockets_impl.py
@@ -16,6 +16,9 @@
def unregister(self, ws):
pass
+ def is_serving(self):
+ return not self.closing
+
class WebSocketProtocol(websockets.WebSocketServerProtocol):
def __init__(self, config, server_state, _loop=None):
@@ -46,10 +49,11 @@
self.initial_response = None
self.connect_sent = False
self.accepted_subprotocol = None
+ self.transfer_data_task = None
- server = Server()
+ self.ws_server = Server()
- super().__init__(ws_handler=self.ws_handler, ws_server=server)
+ super().__init__(ws_handler=self.ws_handler, ws_server=self.ws_server)
def connection_made(self, transport):
self.connections.add(self)
@@ -65,6 +69,7 @@
super().connection_lost(exc)
def shutdown(self):
+ self.ws_server.closing = True
self.transport.close()
def on_task_complete(self, task):
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -43,7 +43,7 @@\n requirements = [\n \"click==7.*\",\n \"h11==0.8.*\",\n- \"websockets==7.*\",\n+ \"websockets==8.*\",\n \"httptools==0.0.13 ;\" + env_marker,\n \"uvloop==0.* ;\" + env_marker,\n ]\ndiff --git a/uvicorn/protocols/websockets/websockets_impl.py b/uvicorn/protocols/websockets/websockets_impl.py\n--- a/uvicorn/protocols/websockets/websockets_impl.py\n+++ b/uvicorn/protocols/websockets/websockets_impl.py\n@@ -16,6 +16,9 @@\n def unregister(self, ws):\n pass\n \n+ def is_serving(self):\n+ return not self.closing\n+\n \n class WebSocketProtocol(websockets.WebSocketServerProtocol):\n def __init__(self, config, server_state, _loop=None):\n@@ -46,10 +49,11 @@\n self.initial_response = None\n self.connect_sent = False\n self.accepted_subprotocol = None\n+ self.transfer_data_task = None\n \n- server = Server()\n+ self.ws_server = Server()\n \n- super().__init__(ws_handler=self.ws_handler, ws_server=server)\n+ super().__init__(ws_handler=self.ws_handler, ws_server=self.ws_server)\n \n def connection_made(self, transport):\n self.connections.add(self)\n@@ -65,6 +69,7 @@\n super().connection_lost(exc)\n \n def shutdown(self):\n+ self.ws_server.closing = True\n self.transport.close()\n \n def on_task_complete(self, task):\n", "issue": "Update 'websockets' to 8.x\nI realize that https://github.com/encode/uvicorn/pull/396 was done because the tests appear to be failing with `websockets>=8.0`, but that release also fixed a bunch of DeprecationWarnings that we've been seeing in [httpx builds on 3.8](https://travis-ci.org/encode/httpx/jobs/574949180).\r\n\r\nSince it's only a `DeprecationWarning`, and only on 3.8 ([3.7 and below is fine](https://travis-ci.org/encode/httpx/jobs/574949179)), I definitely would agree that this is a low priority issue.\r\n\r\nI dug into the tests to try and figure out what exactly was going wrong, but the `AttributeError`s I looked at looks like the main issue is two missing functions in [websockets_impl.py](https://github.com/encode/uvicorn/blob/master/uvicorn/protocols/websockets/websockets_impl.py)\r\n\r\n`15 AttributeError: 'Server' object has no attribute 'is_serving'` --> https://github.com/aaugustin/websockets/blob/8.0.2/src/websockets/server.py#L642-L668\r\n` 9 AttributeError: 'WebSocketProtocol' object has no attribute 'transfer_data_task'` --> https://github.com/aaugustin/websockets/blob/8.0.2/src/websockets/protocol.py#L276-L313\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\nimport sys\nimport platform\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n path = os.path.join(package, '__init__.py')\n init_py = open(path, 'r', encoding='utf8').read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n return open('README.md', 'r', encoding='utf8').read()\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n\nenv_marker = (\n \"sys_platform != 'win32'\"\n \" and sys_platform != 'cygwin'\"\n \" and platform_python_implementation != 'pypy'\"\n)\n\nrequirements = [\n \"click==7.*\",\n \"h11==0.8.*\",\n \"websockets==7.*\",\n \"httptools==0.0.13 ;\" + env_marker,\n \"uvloop==0.* ;\" + env_marker,\n]\n\n\nsetup(\n name='uvicorn',\n version=get_version('uvicorn'),\n url='https://github.com/encode/uvicorn',\n license='BSD',\n description='The lightning-fast ASGI server.',\n long_description=get_long_description(),\n long_description_content_type='text/markdown',\n author='Tom Christie',\n author_email='[email protected]',\n packages=get_packages('uvicorn'),\n install_requires=requirements,\n data_files = [(\"\", [\"LICENSE.md\"])],\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Topic :: Internet :: WWW/HTTP',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n ],\n entry_points=\"\"\"\n [console_scripts]\n uvicorn=uvicorn.main:main\n \"\"\"\n)\n", "path": "setup.py"}, {"content": "import asyncio\nimport http\nfrom urllib.parse import unquote\n\nimport websockets\n\nfrom uvicorn.protocols.utils import get_local_addr, get_remote_addr, is_ssl\n\n\nclass Server:\n closing = False\n\n def register(self, ws):\n pass\n\n def unregister(self, ws):\n pass\n\n\nclass WebSocketProtocol(websockets.WebSocketServerProtocol):\n def __init__(self, config, server_state, _loop=None):\n if not config.loaded:\n config.load()\n\n self.config = config\n self.app = config.loaded_app\n self.loop = _loop or asyncio.get_event_loop()\n self.logger = config.logger_instance\n self.root_path = config.root_path\n\n # Shared server state\n self.connections = server_state.connections\n self.tasks = server_state.tasks\n\n # Connection state\n self.transport = None\n self.server = None\n self.client = None\n self.scheme = None\n\n # Connection events\n self.scope = None\n self.handshake_started_event = asyncio.Event()\n self.handshake_completed_event = asyncio.Event()\n self.closed_event = asyncio.Event()\n self.initial_response = None\n self.connect_sent = False\n self.accepted_subprotocol = None\n\n server = Server()\n\n super().__init__(ws_handler=self.ws_handler, ws_server=server)\n\n def connection_made(self, transport):\n self.connections.add(self)\n self.transport = transport\n self.server = get_local_addr(transport)\n self.client = get_remote_addr(transport)\n self.scheme = \"wss\" if is_ssl(transport) else \"ws\"\n super().connection_made(transport)\n\n def connection_lost(self, exc):\n self.connections.remove(self)\n self.handshake_completed_event.set()\n super().connection_lost(exc)\n\n def shutdown(self):\n self.transport.close()\n\n def on_task_complete(self, task):\n self.tasks.discard(task)\n\n async def process_request(self, path, headers):\n \"\"\"\n This hook is called to determine if the websocket should return\n an HTTP response and close.\n\n Our behavior here is to start the ASGI application, and then wait\n for either `accept` or `close` in order to determine if we should\n close the connection.\n \"\"\"\n path_portion, _, query_string = path.partition(\"?\")\n\n websockets.handshake.check_request(headers)\n\n subprotocols = []\n for header in headers.get_all(\"Sec-WebSocket-Protocol\"):\n subprotocols.extend([token.strip() for token in header.split(\",\")])\n\n asgi_headers = [\n (name.encode(\"ascii\"), value.encode(\"ascii\"))\n for name, value in headers.raw_items()\n ]\n\n self.scope = {\n \"type\": \"websocket\",\n \"scheme\": self.scheme,\n \"server\": self.server,\n \"client\": self.client,\n \"root_path\": self.root_path,\n \"path\": unquote(path_portion),\n \"raw_path\": path_portion,\n \"query_string\": query_string.encode(\"ascii\"),\n \"headers\": asgi_headers,\n \"subprotocols\": subprotocols,\n }\n task = self.loop.create_task(self.run_asgi())\n task.add_done_callback(self.on_task_complete)\n self.tasks.add(task)\n await self.handshake_started_event.wait()\n return self.initial_response\n\n def process_subprotocol(self, headers, available_subprotocols):\n \"\"\"\n We override the standard 'process_subprotocol' behavior here so that\n we return whatever subprotocol is sent in the 'accept' message.\n \"\"\"\n return self.accepted_subprotocol\n\n def send_500_response(self):\n msg = b\"Internal Server Error\"\n content = [\n b\"HTTP/1.1 500 Internal Server Error\\r\\n\"\n b\"content-type: text/plain; charset=utf-8\\r\\n\",\n b\"content-length: \" + str(len(msg)).encode(\"ascii\") + b\"\\r\\n\",\n b\"connection: close\\r\\n\",\n b\"\\r\\n\",\n msg,\n ]\n self.transport.write(b\"\".join(content))\n\n async def ws_handler(self, protocol, path):\n \"\"\"\n This is the main handler function for the 'websockets' implementation\n to call into. We just wait for close then return, and instead allow\n 'send' and 'receive' events to drive the flow.\n \"\"\"\n self.handshake_completed_event.set()\n await self.closed_event.wait()\n\n async def run_asgi(self):\n \"\"\"\n Wrapper around the ASGI callable, handling exceptions and unexpected\n termination states.\n \"\"\"\n try:\n result = await self.app(self.scope, self.asgi_receive, self.asgi_send)\n except BaseException as exc:\n self.closed_event.set()\n msg = \"Exception in ASGI application\\n\"\n self.logger.error(msg, exc_info=exc)\n if not self.handshake_started_event.is_set():\n self.send_500_response()\n else:\n await self.handshake_completed_event.wait()\n self.transport.close()\n else:\n self.closed_event.set()\n if not self.handshake_started_event.is_set():\n msg = \"ASGI callable returned without sending handshake.\"\n self.logger.error(msg)\n self.send_500_response()\n self.transport.close()\n elif result is not None:\n msg = \"ASGI callable should return None, but returned '%s'.\"\n self.logger.error(msg, result)\n await self.handshake_completed_event.wait()\n self.transport.close()\n\n async def asgi_send(self, message):\n message_type = message[\"type\"]\n\n if not self.handshake_started_event.is_set():\n if message_type == \"websocket.accept\":\n self.logger.info(\n '%s - \"WebSocket %s\" [accepted]',\n self.scope[\"client\"],\n self.scope[\"root_path\"] + self.scope[\"path\"],\n )\n self.initial_response = None\n self.accepted_subprotocol = message.get(\"subprotocol\")\n self.handshake_started_event.set()\n\n elif message_type == \"websocket.close\":\n self.logger.info(\n '%s - \"WebSocket %s\" 403',\n self.scope[\"client\"],\n self.scope[\"root_path\"] + self.scope[\"path\"],\n )\n self.initial_response = (http.HTTPStatus.FORBIDDEN, [], b\"\")\n self.handshake_started_event.set()\n self.closed_event.set()\n\n else:\n msg = \"Expected ASGI message 'websocket.accept' or 'websocket.close', but got '%s'.\"\n raise RuntimeError(msg % message_type)\n\n elif not self.closed_event.is_set():\n await self.handshake_completed_event.wait()\n\n if message_type == \"websocket.send\":\n bytes_data = message.get(\"bytes\")\n text_data = message.get(\"text\")\n data = text_data if bytes_data is None else bytes_data\n await self.send(data)\n\n elif message_type == \"websocket.close\":\n code = message.get(\"code\", 1000)\n await self.close(code)\n self.closed_event.set()\n\n else:\n msg = \"Expected ASGI message 'websocket.send' or 'websocket.close', but got '%s'.\"\n raise RuntimeError(msg % message_type)\n\n else:\n msg = \"Unexpected ASGI message '%s', after sending 'websocket.close'.\"\n raise RuntimeError(msg % message_type)\n\n async def asgi_receive(self):\n if not self.connect_sent:\n self.connect_sent = True\n return {\"type\": \"websocket.connect\"}\n\n await self.handshake_completed_event.wait()\n try:\n data = await self.recv()\n except websockets.ConnectionClosed as exc:\n return {\"type\": \"websocket.disconnect\", \"code\": exc.code}\n\n msg = {\"type\": \"websocket.receive\"}\n\n if isinstance(data, str):\n msg[\"text\"] = data\n else:\n msg[\"bytes\"] = data\n\n return msg\n", "path": "uvicorn/protocols/websockets/websockets_impl.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\nimport sys\nimport platform\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n path = os.path.join(package, '__init__.py')\n init_py = open(path, 'r', encoding='utf8').read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n return open('README.md', 'r', encoding='utf8').read()\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n\nenv_marker = (\n \"sys_platform != 'win32'\"\n \" and sys_platform != 'cygwin'\"\n \" and platform_python_implementation != 'pypy'\"\n)\n\nrequirements = [\n \"click==7.*\",\n \"h11==0.8.*\",\n \"websockets==8.*\",\n \"httptools==0.0.13 ;\" + env_marker,\n \"uvloop==0.* ;\" + env_marker,\n]\n\n\nsetup(\n name='uvicorn',\n version=get_version('uvicorn'),\n url='https://github.com/encode/uvicorn',\n license='BSD',\n description='The lightning-fast ASGI server.',\n long_description=get_long_description(),\n long_description_content_type='text/markdown',\n author='Tom Christie',\n author_email='[email protected]',\n packages=get_packages('uvicorn'),\n install_requires=requirements,\n data_files = [(\"\", [\"LICENSE.md\"])],\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Topic :: Internet :: WWW/HTTP',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n ],\n entry_points=\"\"\"\n [console_scripts]\n uvicorn=uvicorn.main:main\n \"\"\"\n)\n", "path": "setup.py"}, {"content": "import asyncio\nimport http\nfrom urllib.parse import unquote\n\nimport websockets\n\nfrom uvicorn.protocols.utils import get_local_addr, get_remote_addr, is_ssl\n\n\nclass Server:\n closing = False\n\n def register(self, ws):\n pass\n\n def unregister(self, ws):\n pass\n\n def is_serving(self):\n return not self.closing\n\n\nclass WebSocketProtocol(websockets.WebSocketServerProtocol):\n def __init__(self, config, server_state, _loop=None):\n if not config.loaded:\n config.load()\n\n self.config = config\n self.app = config.loaded_app\n self.loop = _loop or asyncio.get_event_loop()\n self.logger = config.logger_instance\n self.root_path = config.root_path\n\n # Shared server state\n self.connections = server_state.connections\n self.tasks = server_state.tasks\n\n # Connection state\n self.transport = None\n self.server = None\n self.client = None\n self.scheme = None\n\n # Connection events\n self.scope = None\n self.handshake_started_event = asyncio.Event()\n self.handshake_completed_event = asyncio.Event()\n self.closed_event = asyncio.Event()\n self.initial_response = None\n self.connect_sent = False\n self.accepted_subprotocol = None\n self.transfer_data_task = None\n\n self.ws_server = Server()\n\n super().__init__(ws_handler=self.ws_handler, ws_server=self.ws_server)\n\n def connection_made(self, transport):\n self.connections.add(self)\n self.transport = transport\n self.server = get_local_addr(transport)\n self.client = get_remote_addr(transport)\n self.scheme = \"wss\" if is_ssl(transport) else \"ws\"\n super().connection_made(transport)\n\n def connection_lost(self, exc):\n self.connections.remove(self)\n self.handshake_completed_event.set()\n super().connection_lost(exc)\n\n def shutdown(self):\n self.ws_server.closing = True\n self.transport.close()\n\n def on_task_complete(self, task):\n self.tasks.discard(task)\n\n async def process_request(self, path, headers):\n \"\"\"\n This hook is called to determine if the websocket should return\n an HTTP response and close.\n\n Our behavior here is to start the ASGI application, and then wait\n for either `accept` or `close` in order to determine if we should\n close the connection.\n \"\"\"\n path_portion, _, query_string = path.partition(\"?\")\n\n websockets.handshake.check_request(headers)\n\n subprotocols = []\n for header in headers.get_all(\"Sec-WebSocket-Protocol\"):\n subprotocols.extend([token.strip() for token in header.split(\",\")])\n\n asgi_headers = [\n (name.encode(\"ascii\"), value.encode(\"ascii\"))\n for name, value in headers.raw_items()\n ]\n\n self.scope = {\n \"type\": \"websocket\",\n \"scheme\": self.scheme,\n \"server\": self.server,\n \"client\": self.client,\n \"root_path\": self.root_path,\n \"path\": unquote(path_portion),\n \"raw_path\": path_portion,\n \"query_string\": query_string.encode(\"ascii\"),\n \"headers\": asgi_headers,\n \"subprotocols\": subprotocols,\n }\n task = self.loop.create_task(self.run_asgi())\n task.add_done_callback(self.on_task_complete)\n self.tasks.add(task)\n await self.handshake_started_event.wait()\n return self.initial_response\n\n def process_subprotocol(self, headers, available_subprotocols):\n \"\"\"\n We override the standard 'process_subprotocol' behavior here so that\n we return whatever subprotocol is sent in the 'accept' message.\n \"\"\"\n return self.accepted_subprotocol\n\n def send_500_response(self):\n msg = b\"Internal Server Error\"\n content = [\n b\"HTTP/1.1 500 Internal Server Error\\r\\n\"\n b\"content-type: text/plain; charset=utf-8\\r\\n\",\n b\"content-length: \" + str(len(msg)).encode(\"ascii\") + b\"\\r\\n\",\n b\"connection: close\\r\\n\",\n b\"\\r\\n\",\n msg,\n ]\n self.transport.write(b\"\".join(content))\n\n async def ws_handler(self, protocol, path):\n \"\"\"\n This is the main handler function for the 'websockets' implementation\n to call into. We just wait for close then return, and instead allow\n 'send' and 'receive' events to drive the flow.\n \"\"\"\n self.handshake_completed_event.set()\n await self.closed_event.wait()\n\n async def run_asgi(self):\n \"\"\"\n Wrapper around the ASGI callable, handling exceptions and unexpected\n termination states.\n \"\"\"\n try:\n result = await self.app(self.scope, self.asgi_receive, self.asgi_send)\n except BaseException as exc:\n self.closed_event.set()\n msg = \"Exception in ASGI application\\n\"\n self.logger.error(msg, exc_info=exc)\n if not self.handshake_started_event.is_set():\n self.send_500_response()\n else:\n await self.handshake_completed_event.wait()\n self.transport.close()\n else:\n self.closed_event.set()\n if not self.handshake_started_event.is_set():\n msg = \"ASGI callable returned without sending handshake.\"\n self.logger.error(msg)\n self.send_500_response()\n self.transport.close()\n elif result is not None:\n msg = \"ASGI callable should return None, but returned '%s'.\"\n self.logger.error(msg, result)\n await self.handshake_completed_event.wait()\n self.transport.close()\n\n async def asgi_send(self, message):\n message_type = message[\"type\"]\n\n if not self.handshake_started_event.is_set():\n if message_type == \"websocket.accept\":\n self.logger.info(\n '%s - \"WebSocket %s\" [accepted]',\n self.scope[\"client\"],\n self.scope[\"root_path\"] + self.scope[\"path\"],\n )\n self.initial_response = None\n self.accepted_subprotocol = message.get(\"subprotocol\")\n self.handshake_started_event.set()\n\n elif message_type == \"websocket.close\":\n self.logger.info(\n '%s - \"WebSocket %s\" 403',\n self.scope[\"client\"],\n self.scope[\"root_path\"] + self.scope[\"path\"],\n )\n self.initial_response = (http.HTTPStatus.FORBIDDEN, [], b\"\")\n self.handshake_started_event.set()\n self.closed_event.set()\n\n else:\n msg = \"Expected ASGI message 'websocket.accept' or 'websocket.close', but got '%s'.\"\n raise RuntimeError(msg % message_type)\n\n elif not self.closed_event.is_set():\n await self.handshake_completed_event.wait()\n\n if message_type == \"websocket.send\":\n bytes_data = message.get(\"bytes\")\n text_data = message.get(\"text\")\n data = text_data if bytes_data is None else bytes_data\n await self.send(data)\n\n elif message_type == \"websocket.close\":\n code = message.get(\"code\", 1000)\n await self.close(code)\n self.closed_event.set()\n\n else:\n msg = \"Expected ASGI message 'websocket.send' or 'websocket.close', but got '%s'.\"\n raise RuntimeError(msg % message_type)\n\n else:\n msg = \"Unexpected ASGI message '%s', after sending 'websocket.close'.\"\n raise RuntimeError(msg % message_type)\n\n async def asgi_receive(self):\n if not self.connect_sent:\n self.connect_sent = True\n return {\"type\": \"websocket.connect\"}\n\n await self.handshake_completed_event.wait()\n try:\n data = await self.recv()\n except websockets.ConnectionClosed as exc:\n return {\"type\": \"websocket.disconnect\", \"code\": exc.code}\n\n msg = {\"type\": \"websocket.receive\"}\n\n if isinstance(data, str):\n msg[\"text\"] = data\n else:\n msg[\"bytes\"] = data\n\n return msg\n", "path": "uvicorn/protocols/websockets/websockets_impl.py"}]}
| 3,627 | 387 |
gh_patches_debug_16631
|
rasdani/github-patches
|
git_diff
|
google__osv.dev-986
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The purl of Alpine ecosystem is inconsistent with purl-spec
According to [**purl-type `apk`**](https://github.com/package-url/purl-spec/blob/master/PURL-TYPES.rst#apk) ([PR](https://github.com/package-url/purl-spec/pull/171)), it seems that the purl type of `Alpine` should be `apk` and the purl namespace can be `alpine`. In this project, however, the purl type of `Alpine` is `alpine`.
As a result, the purl of a package is **different** when apply different standards. For example, the purl of the `curl` package is `pkg:apk/alpine/curl` according to purl-spec while it is `pkg:alpine/curl` in this project.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `osv/purl_helpers.py`
Content:
```
1 # Copyright 2022 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """PURL conversion utilities."""
15
16 from urllib.parse import quote
17
18 PURL_ECOSYSTEMS = {
19 'crates.io': 'cargo',
20 'Debian': 'deb',
21 'Hex': 'hex',
22 'Go': 'golang',
23 'Maven': 'maven',
24 'NuGet': 'nuget',
25 'npm': 'npm',
26 'Packagist': 'composer',
27 'OSS-Fuzz': 'generic',
28 'PyPI': 'pypi',
29 'RubyGems': 'gem',
30 }
31
32
33 def _url_encode(package_name):
34 """URL encode a PURL `namespace/name` or `name`."""
35 parts = package_name.split('/')
36 return '/'.join(quote(p) for p in parts)
37
38
39 def package_to_purl(ecosystem, package_name):
40 """Convert a ecosystem and package name to PURL."""
41 purl_type = PURL_ECOSYSTEMS.get(ecosystem)
42 if not purl_type:
43 return None
44
45 suffix = ''
46
47 if purl_type == 'maven':
48 # PURLs use / to separate the group ID and the artifact ID.
49 package_name = package_name.replace(':', '/', 1)
50
51 if purl_type == 'deb':
52 package_name = 'debian/' + package_name
53 suffix = '?arch=source'
54
55 return f'pkg:{purl_type}/{_url_encode(package_name)}{suffix}'
56
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/osv/purl_helpers.py b/osv/purl_helpers.py
--- a/osv/purl_helpers.py
+++ b/osv/purl_helpers.py
@@ -16,6 +16,7 @@
from urllib.parse import quote
PURL_ECOSYSTEMS = {
+ 'Alpine': 'apk',
'crates.io': 'cargo',
'Debian': 'deb',
'Hex': 'hex',
@@ -48,8 +49,12 @@
# PURLs use / to separate the group ID and the artifact ID.
package_name = package_name.replace(':', '/', 1)
- if purl_type == 'deb':
+ if purl_type == 'deb' and ecosystem == 'Debian':
package_name = 'debian/' + package_name
suffix = '?arch=source'
+ if purl_type == 'apk' and ecosystem == 'Alpine':
+ package_name = 'alpine/' + package_name
+ suffix = '?arch=source'
+
return f'pkg:{purl_type}/{_url_encode(package_name)}{suffix}'
|
{"golden_diff": "diff --git a/osv/purl_helpers.py b/osv/purl_helpers.py\n--- a/osv/purl_helpers.py\n+++ b/osv/purl_helpers.py\n@@ -16,6 +16,7 @@\n from urllib.parse import quote\n \n PURL_ECOSYSTEMS = {\n+ 'Alpine': 'apk',\n 'crates.io': 'cargo',\n 'Debian': 'deb',\n 'Hex': 'hex',\n@@ -48,8 +49,12 @@\n # PURLs use / to separate the group ID and the artifact ID.\n package_name = package_name.replace(':', '/', 1)\n \n- if purl_type == 'deb':\n+ if purl_type == 'deb' and ecosystem == 'Debian':\n package_name = 'debian/' + package_name\n suffix = '?arch=source'\n \n+ if purl_type == 'apk' and ecosystem == 'Alpine':\n+ package_name = 'alpine/' + package_name\n+ suffix = '?arch=source'\n+\n return f'pkg:{purl_type}/{_url_encode(package_name)}{suffix}'\n", "issue": "The purl of Alpine ecosystem is inconsistent with purl-spec\nAccording to [**purl-type `apk`**](https://github.com/package-url/purl-spec/blob/master/PURL-TYPES.rst#apk) ([PR](https://github.com/package-url/purl-spec/pull/171)), it seems that the purl type of `Alpine` should be `apk` and the purl namespace can be `alpine`. In this project, however, the purl type of `Alpine` is `alpine`.\r\n\r\nAs a result, the purl of a package is **different** when apply different standards. For example, the purl of the `curl` package is `pkg:apk/alpine/curl` according to purl-spec while it is `pkg:alpine/curl` in this project.\r\n\r\n\n", "before_files": [{"content": "# Copyright 2022 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PURL conversion utilities.\"\"\"\n\nfrom urllib.parse import quote\n\nPURL_ECOSYSTEMS = {\n 'crates.io': 'cargo',\n 'Debian': 'deb',\n 'Hex': 'hex',\n 'Go': 'golang',\n 'Maven': 'maven',\n 'NuGet': 'nuget',\n 'npm': 'npm',\n 'Packagist': 'composer',\n 'OSS-Fuzz': 'generic',\n 'PyPI': 'pypi',\n 'RubyGems': 'gem',\n}\n\n\ndef _url_encode(package_name):\n \"\"\"URL encode a PURL `namespace/name` or `name`.\"\"\"\n parts = package_name.split('/')\n return '/'.join(quote(p) for p in parts)\n\n\ndef package_to_purl(ecosystem, package_name):\n \"\"\"Convert a ecosystem and package name to PURL.\"\"\"\n purl_type = PURL_ECOSYSTEMS.get(ecosystem)\n if not purl_type:\n return None\n\n suffix = ''\n\n if purl_type == 'maven':\n # PURLs use / to separate the group ID and the artifact ID.\n package_name = package_name.replace(':', '/', 1)\n\n if purl_type == 'deb':\n package_name = 'debian/' + package_name\n suffix = '?arch=source'\n\n return f'pkg:{purl_type}/{_url_encode(package_name)}{suffix}'\n", "path": "osv/purl_helpers.py"}], "after_files": [{"content": "# Copyright 2022 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PURL conversion utilities.\"\"\"\n\nfrom urllib.parse import quote\n\nPURL_ECOSYSTEMS = {\n 'Alpine': 'apk',\n 'crates.io': 'cargo',\n 'Debian': 'deb',\n 'Hex': 'hex',\n 'Go': 'golang',\n 'Maven': 'maven',\n 'NuGet': 'nuget',\n 'npm': 'npm',\n 'Packagist': 'composer',\n 'OSS-Fuzz': 'generic',\n 'PyPI': 'pypi',\n 'RubyGems': 'gem',\n}\n\n\ndef _url_encode(package_name):\n \"\"\"URL encode a PURL `namespace/name` or `name`.\"\"\"\n parts = package_name.split('/')\n return '/'.join(quote(p) for p in parts)\n\n\ndef package_to_purl(ecosystem, package_name):\n \"\"\"Convert a ecosystem and package name to PURL.\"\"\"\n purl_type = PURL_ECOSYSTEMS.get(ecosystem)\n if not purl_type:\n return None\n\n suffix = ''\n\n if purl_type == 'maven':\n # PURLs use / to separate the group ID and the artifact ID.\n package_name = package_name.replace(':', '/', 1)\n\n if purl_type == 'deb' and ecosystem == 'Debian':\n package_name = 'debian/' + package_name\n suffix = '?arch=source'\n\n if purl_type == 'apk' and ecosystem == 'Alpine':\n package_name = 'alpine/' + package_name\n suffix = '?arch=source'\n\n return f'pkg:{purl_type}/{_url_encode(package_name)}{suffix}'\n", "path": "osv/purl_helpers.py"}]}
| 979 | 244 |
gh_patches_debug_30461
|
rasdani/github-patches
|
git_diff
|
pyodide__pyodide-3811
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow specifying output directory in pyodide build with -o or --outdir
I added the capability to specify output directory in pyodide build with `--output-directory`. But here `pypa/build` accepts either `--outdir` or `-o`. We should accept these as ways to specify the output directory for consistency
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyodide-build/pyodide_build/cli/build.py`
Content:
```
1 import re
2 import shutil
3 import tempfile
4 from pathlib import Path
5 from typing import Optional
6 from urllib.parse import urlparse
7
8 import requests
9 import typer
10
11 from .. import common
12 from ..out_of_tree import build
13 from ..out_of_tree.pypi import (
14 build_dependencies_for_wheel,
15 build_wheels_from_pypi_requirements,
16 fetch_pypi_package,
17 )
18 from ..out_of_tree.utils import initialize_pyodide_root
19
20
21 def pypi(
22 package: str,
23 output_directory: Path,
24 exports: str = typer.Option(
25 "requested",
26 help="Which symbols should be exported when linking .so files?",
27 ),
28 ctx: typer.Context = typer.Context,
29 ) -> Path:
30 """Fetch a wheel from pypi, or build from source if none available."""
31 initialize_pyodide_root()
32 common.check_emscripten_version()
33 backend_flags = ctx.args
34 with tempfile.TemporaryDirectory() as tmpdir:
35 srcdir = Path(tmpdir)
36
37 # get package from pypi
38 package_path = fetch_pypi_package(package, srcdir)
39 if not package_path.is_dir():
40 # a pure-python wheel has been downloaded - just copy to dist folder
41 dest_file = output_directory / package_path.name
42 shutil.copyfile(str(package_path), output_directory / package_path.name)
43 print(f"Successfully fetched: {package_path.name}")
44 return dest_file
45
46 built_wheel = build.run(srcdir, output_directory, exports, backend_flags)
47 return built_wheel
48
49
50 def download_url(url: str, output_directory: Path) -> str:
51 with requests.get(url, stream=True) as response:
52 urlpath = Path(urlparse(response.url).path)
53 if urlpath.suffix == ".gz":
54 urlpath = urlpath.with_suffix("")
55 file_name = urlpath.name
56 with open(output_directory / file_name, "wb") as f:
57 for chunk in response.iter_content(chunk_size=1 << 20):
58 f.write(chunk)
59 return file_name
60
61
62 def url(
63 package_url: str,
64 output_directory: Path,
65 exports: str = typer.Option(
66 "requested",
67 help="Which symbols should be exported when linking .so files?",
68 ),
69 ctx: typer.Context = typer.Context,
70 ) -> Path:
71 """Fetch a wheel or build sdist from url."""
72 initialize_pyodide_root()
73 common.check_emscripten_version()
74 backend_flags = ctx.args
75 with tempfile.TemporaryDirectory() as tmpdir:
76 tmppath = Path(tmpdir)
77 filename = download_url(package_url, tmppath)
78 if Path(filename).suffix == ".whl":
79 shutil.move(tmppath / filename, output_directory / filename)
80 return output_directory / filename
81
82 builddir = tmppath / "build"
83 shutil.unpack_archive(tmppath / filename, builddir)
84 files = list(builddir.iterdir())
85 if len(files) == 1 and files[0].is_dir():
86 # unzipped into subfolder
87 builddir = files[0]
88 wheel_path = build.run(builddir, output_directory, exports, backend_flags)
89 return wheel_path
90
91
92 def source(
93 source_location: Path,
94 output_directory: Path,
95 exports: str = typer.Option(
96 "requested",
97 help="Which symbols should be exported when linking .so files?",
98 ),
99 ctx: typer.Context = typer.Context,
100 ) -> Path:
101 """Use pypa/build to build a Python package from source"""
102 initialize_pyodide_root()
103 common.check_emscripten_version()
104 backend_flags = ctx.args
105 built_wheel = build.run(source_location, output_directory, exports, backend_flags)
106 return built_wheel
107
108
109 # simple 'pyodide build' command
110 def main(
111 source_location: "Optional[str]" = typer.Argument(
112 "",
113 help="Build source, can be source folder, pypi version specification, or url to a source dist archive or wheel file. If this is blank, it will build the current directory.",
114 ),
115 output_directory: str = typer.Option(
116 "./dist",
117 help="which directory should the output be placed into?",
118 ),
119 requirements_txt: str = typer.Option(
120 "",
121 "--requirements",
122 "-r",
123 help="Build a list of package requirements from a requirements.txt file",
124 ),
125 exports: str = typer.Option(
126 "requested",
127 help="Which symbols should be exported when linking .so files?",
128 ),
129 build_dependencies: bool = typer.Option(
130 False, help="Fetch non-pyodide dependencies from pypi and build them too."
131 ),
132 output_lockfile: str = typer.Option(
133 "",
134 help="Output list of resolved dependencies to a file in requirements.txt format",
135 ),
136 skip_dependency: list[str] = typer.Option(
137 [],
138 help="Skip building or resolving a single dependency. Use multiple times or provide a comma separated list to skip multiple dependencies.",
139 ),
140 compression_level: int = typer.Option(
141 6, help="Compression level to use for the created zip file"
142 ),
143 ctx: typer.Context = typer.Context,
144 ) -> None:
145 """Use pypa/build to build a Python package from source, pypi or url."""
146 outpath = Path(output_directory).resolve()
147 outpath.mkdir(exist_ok=True)
148 extras: list[str] = []
149
150 if len(requirements_txt) > 0:
151 # a requirements.txt - build it (and optionally deps)
152 if not Path(requirements_txt).exists():
153 raise RuntimeError(
154 f"Couldn't find requirements text file {requirements_txt}"
155 )
156 reqs = []
157 with open(requirements_txt) as f:
158 raw_reqs = [x.strip() for x in f.readlines()]
159 for x in raw_reqs:
160 # remove comments
161 comment_pos = x.find("#")
162 if comment_pos != -1:
163 x = x[:comment_pos].strip()
164 if len(x) > 0:
165 if x[0] == "-":
166 raise RuntimeError(
167 f"pyodide build only supports name-based PEP508 requirements. [{x}] will not work."
168 )
169 if x.find("@") != -1:
170 raise RuntimeError(
171 f"pyodide build does not support URL based requirements. [{x}] will not work"
172 )
173 reqs.append(x)
174 try:
175 build_wheels_from_pypi_requirements(
176 reqs,
177 outpath,
178 build_dependencies,
179 skip_dependency,
180 exports,
181 ctx.args,
182 output_lockfile=output_lockfile,
183 )
184 except BaseException as e:
185 import traceback
186
187 print("Failed building multiple wheels:", traceback.format_exc())
188 raise e
189 return
190
191 if source_location is not None:
192 extras = re.findall(r"\[(\w+)\]", source_location)
193 if len(extras) != 0:
194 source_location = source_location[0 : source_location.find("[")]
195 if not source_location:
196 # build the current folder
197 wheel = source(Path.cwd(), outpath, exports, ctx)
198 elif source_location.find("://") != -1:
199 wheel = url(source_location, outpath, exports, ctx)
200 elif Path(source_location).is_dir():
201 # a folder, build it
202 wheel = source(Path(source_location).resolve(), outpath, exports, ctx)
203 elif source_location.find("/") == -1:
204 # try fetch or build from pypi
205 wheel = pypi(source_location, outpath, exports, ctx)
206 else:
207 raise RuntimeError(f"Couldn't determine source type for {source_location}")
208 # now build deps for wheel
209 if build_dependencies:
210 try:
211 build_dependencies_for_wheel(
212 wheel,
213 extras,
214 skip_dependency,
215 exports,
216 ctx.args,
217 output_lockfile=output_lockfile,
218 compression_level=compression_level,
219 )
220 except BaseException as e:
221 import traceback
222
223 print("Failed building dependencies for wheel:", traceback.format_exc())
224 wheel.unlink()
225 raise e
226
227
228 main.typer_kwargs = { # type: ignore[attr-defined]
229 "context_settings": {
230 "ignore_unknown_options": True,
231 "allow_extra_args": True,
232 },
233 }
234
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pyodide-build/pyodide_build/cli/build.py b/pyodide-build/pyodide_build/cli/build.py
--- a/pyodide-build/pyodide_build/cli/build.py
+++ b/pyodide-build/pyodide_build/cli/build.py
@@ -1,5 +1,6 @@
import re
import shutil
+import sys
import tempfile
from pathlib import Path
from typing import Optional
@@ -113,9 +114,12 @@
help="Build source, can be source folder, pypi version specification, or url to a source dist archive or wheel file. If this is blank, it will build the current directory.",
),
output_directory: str = typer.Option(
- "./dist",
+ "",
+ "--outdir",
+ "-o",
help="which directory should the output be placed into?",
),
+ output_directory_compat: str = typer.Option("", "--output-directory", hidden=True),
requirements_txt: str = typer.Option(
"",
"--requirements",
@@ -143,6 +147,16 @@
ctx: typer.Context = typer.Context,
) -> None:
"""Use pypa/build to build a Python package from source, pypi or url."""
+ if output_directory_compat:
+ print(
+ "--output-directory is deprecated, use --outdir or -o instead",
+ file=sys.stderr,
+ )
+ if output_directory_compat and output_directory:
+ print("Cannot provide both --outdir and --output-directory", file=sys.stderr)
+ sys.exit(1)
+ output_directory = output_directory_compat or output_directory or "./dist"
+
outpath = Path(output_directory).resolve()
outpath.mkdir(exist_ok=True)
extras: list[str] = []
|
{"golden_diff": "diff --git a/pyodide-build/pyodide_build/cli/build.py b/pyodide-build/pyodide_build/cli/build.py\n--- a/pyodide-build/pyodide_build/cli/build.py\n+++ b/pyodide-build/pyodide_build/cli/build.py\n@@ -1,5 +1,6 @@\n import re\n import shutil\n+import sys\n import tempfile\n from pathlib import Path\n from typing import Optional\n@@ -113,9 +114,12 @@\n help=\"Build source, can be source folder, pypi version specification, or url to a source dist archive or wheel file. If this is blank, it will build the current directory.\",\n ),\n output_directory: str = typer.Option(\n- \"./dist\",\n+ \"\",\n+ \"--outdir\",\n+ \"-o\",\n help=\"which directory should the output be placed into?\",\n ),\n+ output_directory_compat: str = typer.Option(\"\", \"--output-directory\", hidden=True),\n requirements_txt: str = typer.Option(\n \"\",\n \"--requirements\",\n@@ -143,6 +147,16 @@\n ctx: typer.Context = typer.Context,\n ) -> None:\n \"\"\"Use pypa/build to build a Python package from source, pypi or url.\"\"\"\n+ if output_directory_compat:\n+ print(\n+ \"--output-directory is deprecated, use --outdir or -o instead\",\n+ file=sys.stderr,\n+ )\n+ if output_directory_compat and output_directory:\n+ print(\"Cannot provide both --outdir and --output-directory\", file=sys.stderr)\n+ sys.exit(1)\n+ output_directory = output_directory_compat or output_directory or \"./dist\"\n+\n outpath = Path(output_directory).resolve()\n outpath.mkdir(exist_ok=True)\n extras: list[str] = []\n", "issue": "Allow specifying output directory in pyodide build with -o or --outdir\nI added the capability to specify output directory in pyodide build with `--output-directory`. But here `pypa/build` accepts either `--outdir` or `-o`. We should accept these as ways to specify the output directory for consistency\n", "before_files": [{"content": "import re\nimport shutil\nimport tempfile\nfrom pathlib import Path\nfrom typing import Optional\nfrom urllib.parse import urlparse\n\nimport requests\nimport typer\n\nfrom .. import common\nfrom ..out_of_tree import build\nfrom ..out_of_tree.pypi import (\n build_dependencies_for_wheel,\n build_wheels_from_pypi_requirements,\n fetch_pypi_package,\n)\nfrom ..out_of_tree.utils import initialize_pyodide_root\n\n\ndef pypi(\n package: str,\n output_directory: Path,\n exports: str = typer.Option(\n \"requested\",\n help=\"Which symbols should be exported when linking .so files?\",\n ),\n ctx: typer.Context = typer.Context,\n) -> Path:\n \"\"\"Fetch a wheel from pypi, or build from source if none available.\"\"\"\n initialize_pyodide_root()\n common.check_emscripten_version()\n backend_flags = ctx.args\n with tempfile.TemporaryDirectory() as tmpdir:\n srcdir = Path(tmpdir)\n\n # get package from pypi\n package_path = fetch_pypi_package(package, srcdir)\n if not package_path.is_dir():\n # a pure-python wheel has been downloaded - just copy to dist folder\n dest_file = output_directory / package_path.name\n shutil.copyfile(str(package_path), output_directory / package_path.name)\n print(f\"Successfully fetched: {package_path.name}\")\n return dest_file\n\n built_wheel = build.run(srcdir, output_directory, exports, backend_flags)\n return built_wheel\n\n\ndef download_url(url: str, output_directory: Path) -> str:\n with requests.get(url, stream=True) as response:\n urlpath = Path(urlparse(response.url).path)\n if urlpath.suffix == \".gz\":\n urlpath = urlpath.with_suffix(\"\")\n file_name = urlpath.name\n with open(output_directory / file_name, \"wb\") as f:\n for chunk in response.iter_content(chunk_size=1 << 20):\n f.write(chunk)\n return file_name\n\n\ndef url(\n package_url: str,\n output_directory: Path,\n exports: str = typer.Option(\n \"requested\",\n help=\"Which symbols should be exported when linking .so files?\",\n ),\n ctx: typer.Context = typer.Context,\n) -> Path:\n \"\"\"Fetch a wheel or build sdist from url.\"\"\"\n initialize_pyodide_root()\n common.check_emscripten_version()\n backend_flags = ctx.args\n with tempfile.TemporaryDirectory() as tmpdir:\n tmppath = Path(tmpdir)\n filename = download_url(package_url, tmppath)\n if Path(filename).suffix == \".whl\":\n shutil.move(tmppath / filename, output_directory / filename)\n return output_directory / filename\n\n builddir = tmppath / \"build\"\n shutil.unpack_archive(tmppath / filename, builddir)\n files = list(builddir.iterdir())\n if len(files) == 1 and files[0].is_dir():\n # unzipped into subfolder\n builddir = files[0]\n wheel_path = build.run(builddir, output_directory, exports, backend_flags)\n return wheel_path\n\n\ndef source(\n source_location: Path,\n output_directory: Path,\n exports: str = typer.Option(\n \"requested\",\n help=\"Which symbols should be exported when linking .so files?\",\n ),\n ctx: typer.Context = typer.Context,\n) -> Path:\n \"\"\"Use pypa/build to build a Python package from source\"\"\"\n initialize_pyodide_root()\n common.check_emscripten_version()\n backend_flags = ctx.args\n built_wheel = build.run(source_location, output_directory, exports, backend_flags)\n return built_wheel\n\n\n# simple 'pyodide build' command\ndef main(\n source_location: \"Optional[str]\" = typer.Argument(\n \"\",\n help=\"Build source, can be source folder, pypi version specification, or url to a source dist archive or wheel file. If this is blank, it will build the current directory.\",\n ),\n output_directory: str = typer.Option(\n \"./dist\",\n help=\"which directory should the output be placed into?\",\n ),\n requirements_txt: str = typer.Option(\n \"\",\n \"--requirements\",\n \"-r\",\n help=\"Build a list of package requirements from a requirements.txt file\",\n ),\n exports: str = typer.Option(\n \"requested\",\n help=\"Which symbols should be exported when linking .so files?\",\n ),\n build_dependencies: bool = typer.Option(\n False, help=\"Fetch non-pyodide dependencies from pypi and build them too.\"\n ),\n output_lockfile: str = typer.Option(\n \"\",\n help=\"Output list of resolved dependencies to a file in requirements.txt format\",\n ),\n skip_dependency: list[str] = typer.Option(\n [],\n help=\"Skip building or resolving a single dependency. Use multiple times or provide a comma separated list to skip multiple dependencies.\",\n ),\n compression_level: int = typer.Option(\n 6, help=\"Compression level to use for the created zip file\"\n ),\n ctx: typer.Context = typer.Context,\n) -> None:\n \"\"\"Use pypa/build to build a Python package from source, pypi or url.\"\"\"\n outpath = Path(output_directory).resolve()\n outpath.mkdir(exist_ok=True)\n extras: list[str] = []\n\n if len(requirements_txt) > 0:\n # a requirements.txt - build it (and optionally deps)\n if not Path(requirements_txt).exists():\n raise RuntimeError(\n f\"Couldn't find requirements text file {requirements_txt}\"\n )\n reqs = []\n with open(requirements_txt) as f:\n raw_reqs = [x.strip() for x in f.readlines()]\n for x in raw_reqs:\n # remove comments\n comment_pos = x.find(\"#\")\n if comment_pos != -1:\n x = x[:comment_pos].strip()\n if len(x) > 0:\n if x[0] == \"-\":\n raise RuntimeError(\n f\"pyodide build only supports name-based PEP508 requirements. [{x}] will not work.\"\n )\n if x.find(\"@\") != -1:\n raise RuntimeError(\n f\"pyodide build does not support URL based requirements. [{x}] will not work\"\n )\n reqs.append(x)\n try:\n build_wheels_from_pypi_requirements(\n reqs,\n outpath,\n build_dependencies,\n skip_dependency,\n exports,\n ctx.args,\n output_lockfile=output_lockfile,\n )\n except BaseException as e:\n import traceback\n\n print(\"Failed building multiple wheels:\", traceback.format_exc())\n raise e\n return\n\n if source_location is not None:\n extras = re.findall(r\"\\[(\\w+)\\]\", source_location)\n if len(extras) != 0:\n source_location = source_location[0 : source_location.find(\"[\")]\n if not source_location:\n # build the current folder\n wheel = source(Path.cwd(), outpath, exports, ctx)\n elif source_location.find(\"://\") != -1:\n wheel = url(source_location, outpath, exports, ctx)\n elif Path(source_location).is_dir():\n # a folder, build it\n wheel = source(Path(source_location).resolve(), outpath, exports, ctx)\n elif source_location.find(\"/\") == -1:\n # try fetch or build from pypi\n wheel = pypi(source_location, outpath, exports, ctx)\n else:\n raise RuntimeError(f\"Couldn't determine source type for {source_location}\")\n # now build deps for wheel\n if build_dependencies:\n try:\n build_dependencies_for_wheel(\n wheel,\n extras,\n skip_dependency,\n exports,\n ctx.args,\n output_lockfile=output_lockfile,\n compression_level=compression_level,\n )\n except BaseException as e:\n import traceback\n\n print(\"Failed building dependencies for wheel:\", traceback.format_exc())\n wheel.unlink()\n raise e\n\n\nmain.typer_kwargs = { # type: ignore[attr-defined]\n \"context_settings\": {\n \"ignore_unknown_options\": True,\n \"allow_extra_args\": True,\n },\n}\n", "path": "pyodide-build/pyodide_build/cli/build.py"}], "after_files": [{"content": "import re\nimport shutil\nimport sys\nimport tempfile\nfrom pathlib import Path\nfrom typing import Optional\nfrom urllib.parse import urlparse\n\nimport requests\nimport typer\n\nfrom .. import common\nfrom ..out_of_tree import build\nfrom ..out_of_tree.pypi import (\n build_dependencies_for_wheel,\n build_wheels_from_pypi_requirements,\n fetch_pypi_package,\n)\nfrom ..out_of_tree.utils import initialize_pyodide_root\n\n\ndef pypi(\n package: str,\n output_directory: Path,\n exports: str = typer.Option(\n \"requested\",\n help=\"Which symbols should be exported when linking .so files?\",\n ),\n ctx: typer.Context = typer.Context,\n) -> Path:\n \"\"\"Fetch a wheel from pypi, or build from source if none available.\"\"\"\n initialize_pyodide_root()\n common.check_emscripten_version()\n backend_flags = ctx.args\n with tempfile.TemporaryDirectory() as tmpdir:\n srcdir = Path(tmpdir)\n\n # get package from pypi\n package_path = fetch_pypi_package(package, srcdir)\n if not package_path.is_dir():\n # a pure-python wheel has been downloaded - just copy to dist folder\n dest_file = output_directory / package_path.name\n shutil.copyfile(str(package_path), output_directory / package_path.name)\n print(f\"Successfully fetched: {package_path.name}\")\n return dest_file\n\n built_wheel = build.run(srcdir, output_directory, exports, backend_flags)\n return built_wheel\n\n\ndef download_url(url: str, output_directory: Path) -> str:\n with requests.get(url, stream=True) as response:\n urlpath = Path(urlparse(response.url).path)\n if urlpath.suffix == \".gz\":\n urlpath = urlpath.with_suffix(\"\")\n file_name = urlpath.name\n with open(output_directory / file_name, \"wb\") as f:\n for chunk in response.iter_content(chunk_size=1 << 20):\n f.write(chunk)\n return file_name\n\n\ndef url(\n package_url: str,\n output_directory: Path,\n exports: str = typer.Option(\n \"requested\",\n help=\"Which symbols should be exported when linking .so files?\",\n ),\n ctx: typer.Context = typer.Context,\n) -> Path:\n \"\"\"Fetch a wheel or build sdist from url.\"\"\"\n initialize_pyodide_root()\n common.check_emscripten_version()\n backend_flags = ctx.args\n with tempfile.TemporaryDirectory() as tmpdir:\n tmppath = Path(tmpdir)\n filename = download_url(package_url, tmppath)\n if Path(filename).suffix == \".whl\":\n shutil.move(tmppath / filename, output_directory / filename)\n return output_directory / filename\n\n builddir = tmppath / \"build\"\n shutil.unpack_archive(tmppath / filename, builddir)\n files = list(builddir.iterdir())\n if len(files) == 1 and files[0].is_dir():\n # unzipped into subfolder\n builddir = files[0]\n wheel_path = build.run(builddir, output_directory, exports, backend_flags)\n return wheel_path\n\n\ndef source(\n source_location: Path,\n output_directory: Path,\n exports: str = typer.Option(\n \"requested\",\n help=\"Which symbols should be exported when linking .so files?\",\n ),\n ctx: typer.Context = typer.Context,\n) -> Path:\n \"\"\"Use pypa/build to build a Python package from source\"\"\"\n initialize_pyodide_root()\n common.check_emscripten_version()\n backend_flags = ctx.args\n built_wheel = build.run(source_location, output_directory, exports, backend_flags)\n return built_wheel\n\n\n# simple 'pyodide build' command\ndef main(\n source_location: \"Optional[str]\" = typer.Argument(\n \"\",\n help=\"Build source, can be source folder, pypi version specification, or url to a source dist archive or wheel file. If this is blank, it will build the current directory.\",\n ),\n output_directory: str = typer.Option(\n \"\",\n \"--outdir\",\n \"-o\",\n help=\"which directory should the output be placed into?\",\n ),\n output_directory_compat: str = typer.Option(\"\", \"--output-directory\", hidden=True),\n requirements_txt: str = typer.Option(\n \"\",\n \"--requirements\",\n \"-r\",\n help=\"Build a list of package requirements from a requirements.txt file\",\n ),\n exports: str = typer.Option(\n \"requested\",\n help=\"Which symbols should be exported when linking .so files?\",\n ),\n build_dependencies: bool = typer.Option(\n False, help=\"Fetch non-pyodide dependencies from pypi and build them too.\"\n ),\n output_lockfile: str = typer.Option(\n \"\",\n help=\"Output list of resolved dependencies to a file in requirements.txt format\",\n ),\n skip_dependency: list[str] = typer.Option(\n [],\n help=\"Skip building or resolving a single dependency. Use multiple times or provide a comma separated list to skip multiple dependencies.\",\n ),\n compression_level: int = typer.Option(\n 6, help=\"Compression level to use for the created zip file\"\n ),\n ctx: typer.Context = typer.Context,\n) -> None:\n \"\"\"Use pypa/build to build a Python package from source, pypi or url.\"\"\"\n if output_directory_compat:\n print(\n \"--output-directory is deprecated, use --outdir or -o instead\",\n file=sys.stderr,\n )\n if output_directory_compat and output_directory:\n print(\"Cannot provide both --outdir and --output-directory\", file=sys.stderr)\n sys.exit(1)\n output_directory = output_directory_compat or output_directory or \"./dist\"\n\n outpath = Path(output_directory).resolve()\n outpath.mkdir(exist_ok=True)\n extras: list[str] = []\n\n if len(requirements_txt) > 0:\n # a requirements.txt - build it (and optionally deps)\n if not Path(requirements_txt).exists():\n raise RuntimeError(\n f\"Couldn't find requirements text file {requirements_txt}\"\n )\n reqs = []\n with open(requirements_txt) as f:\n raw_reqs = [x.strip() for x in f.readlines()]\n for x in raw_reqs:\n # remove comments\n comment_pos = x.find(\"#\")\n if comment_pos != -1:\n x = x[:comment_pos].strip()\n if len(x) > 0:\n if x[0] == \"-\":\n raise RuntimeError(\n f\"pyodide build only supports name-based PEP508 requirements. [{x}] will not work.\"\n )\n if x.find(\"@\") != -1:\n raise RuntimeError(\n f\"pyodide build does not support URL based requirements. [{x}] will not work\"\n )\n reqs.append(x)\n try:\n build_wheels_from_pypi_requirements(\n reqs,\n outpath,\n build_dependencies,\n skip_dependency,\n exports,\n ctx.args,\n output_lockfile=output_lockfile,\n )\n except BaseException as e:\n import traceback\n\n print(\"Failed building multiple wheels:\", traceback.format_exc())\n raise e\n return\n\n if source_location is not None:\n extras = re.findall(r\"\\[(\\w+)\\]\", source_location)\n if len(extras) != 0:\n source_location = source_location[0 : source_location.find(\"[\")]\n if not source_location:\n # build the current folder\n wheel = source(Path.cwd(), outpath, exports, ctx)\n elif source_location.find(\"://\") != -1:\n wheel = url(source_location, outpath, exports, ctx)\n elif Path(source_location).is_dir():\n # a folder, build it\n wheel = source(Path(source_location).resolve(), outpath, exports, ctx)\n elif source_location.find(\"/\") == -1:\n # try fetch or build from pypi\n wheel = pypi(source_location, outpath, exports, ctx)\n else:\n raise RuntimeError(f\"Couldn't determine source type for {source_location}\")\n # now build deps for wheel\n if build_dependencies:\n try:\n build_dependencies_for_wheel(\n wheel,\n extras,\n skip_dependency,\n exports,\n ctx.args,\n output_lockfile=output_lockfile,\n compression_level=compression_level,\n )\n except BaseException as e:\n import traceback\n\n print(\"Failed building dependencies for wheel:\", traceback.format_exc())\n wheel.unlink()\n raise e\n\n\nmain.typer_kwargs = { # type: ignore[attr-defined]\n \"context_settings\": {\n \"ignore_unknown_options\": True,\n \"allow_extra_args\": True,\n },\n}\n", "path": "pyodide-build/pyodide_build/cli/build.py"}]}
| 2,703 | 386 |
gh_patches_debug_15071
|
rasdani/github-patches
|
git_diff
|
PlasmaPy__PlasmaPy-688
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update acknowledgements to include NSF CSSI grant
Good news, everyone! The NSF CSSI proposal that we submitted has officially been awarded! At the request of NSF program manager Slava Lukin, we should update our acknowledgements to include this grant and reflect the support from NSF. I'll assign myself to do this when the grant officially starts (which appears to have been moved up to October 1, 2019). The things that we need to do include:
- [x] Revise the sentence that we ask people to include in the acknowledgements section of their papers to reflect funding from NSF
- [x] Add an acknowledgements section at the end of `README.md`
- [x] Update `CITATION.md`
- [x] Update `docs/about/credits.rst`
- [x] Update `plasmapy.__citation__`
Another thing peripherally related to this issue is to:
- [x] Update the [proposal](https://doi.org/10.5281/zenodo.2633286) that we archived on Zenodo to include the answers to questions we had to send in and the panel's comments
I'll assign this to myself.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plasmapy/__init__.py`
Content:
```
1 """
2 PlasmaPy: A plasma physics Python package
3 ================================================
4
5 Documentation is available in the docstrings,
6 online at https://docs.plasmapy.org (accessible also using
7 the ``plasmapy.online_help`` function).
8
9 Contents
10 --------
11 PlasmaPy provides the following functionality:
12
13 Subpackages
14 -----------
15 Each of these subpackages requires an explicit import, for example,
16 via ``import plasmapy.physics``.
17
18 ::
19
20 atomic --- Database for atoms, isotopes, ions...
21 classes --- (WIP) classes used in multiple places
22 data --- Data used for testing and examples
23 diagnostics --- Experimental research data analysis
24 mathematics --- General formulae used elsewhere
25 physics --- Plasma theory functionality
26 transport --- Transport theory functionality
27 utils --- Various utilities
28
29 Utility tools
30 -------------
31 ::
32
33 test --- Run PlasmaPy unit tests
34 online_help --- Search the online documentation
35 __version__ --- PlasmaPy version string
36 __citation__ --- PlasmaPy citation template
37
38 """
39 # Licensed under a 3-clause BSD style license - see LICENSE.rst
40
41 # Packages may add whatever they like to this file, but
42 # should keep this content at the top.
43 # ----------------------------------------------------------------------------
44 from ._base_init import *
45 # ----------------------------------------------------------------------------
46
47 # Enforce Python version check during package import.
48 # This is the same check as the one at the top of setup.py
49 import sys
50
51 __name__ = "plasmapy"
52
53 if sys.version_info < tuple((int(val) for val in "3.6".split('.'))):
54 raise Exception("plasmapy does not support Python < {}".format(3.6))
55
56
57 def online_help(query):
58 """
59 Search the online PlasmaPy documentation for the given query from plasmapy.org
60 Opens the results in the default web browser.
61 Requires an active Internet connection.
62 Redirects to Astropy.units in case of query 'unit' or 'units'
63
64 Parameters
65 ----------
66 query : str
67 The search query.
68 """
69 from urllib.parse import urlencode
70 import webbrowser
71
72 url = ('http://docs.plasmapy.org/en/stable/search.html?'
73 '{0}&check_keywords=yes&area=default').format(urlencode({'q': query}))
74
75 if(query.lower() in ('unit', 'units')):
76 url = 'http://docs.astropy.org/en/stable/units/'
77
78 webbrowser.open(url)
79
80
81 __citation__ = [
82 "https://doi.org/10.5281/zenodo.1238132",
83 "https://doi.org/10.5281/zenodo.3235817",
84 ]
85
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/plasmapy/__init__.py b/plasmapy/__init__.py
--- a/plasmapy/__init__.py
+++ b/plasmapy/__init__.py
@@ -50,8 +50,13 @@
__name__ = "plasmapy"
+__citation__ = (
+ "Instructions on how to cite and acknowledge PlasmaPy are provided in the "
+ "online documentation at: http://docs.plasmapy.org/en/latest/about/citation.html"
+)
+
if sys.version_info < tuple((int(val) for val in "3.6".split('.'))):
- raise Exception("plasmapy does not support Python < {}".format(3.6))
+ raise Exception("PlasmaPy does not support Python < {}".format(3.6))
def online_help(query):
@@ -76,9 +81,3 @@
url = 'http://docs.astropy.org/en/stable/units/'
webbrowser.open(url)
-
-
-__citation__ = [
- "https://doi.org/10.5281/zenodo.1238132",
- "https://doi.org/10.5281/zenodo.3235817",
-]
|
{"golden_diff": "diff --git a/plasmapy/__init__.py b/plasmapy/__init__.py\n--- a/plasmapy/__init__.py\n+++ b/plasmapy/__init__.py\n@@ -50,8 +50,13 @@\n \n __name__ = \"plasmapy\"\n \n+__citation__ = (\n+ \"Instructions on how to cite and acknowledge PlasmaPy are provided in the \"\n+ \"online documentation at: http://docs.plasmapy.org/en/latest/about/citation.html\"\n+)\n+\n if sys.version_info < tuple((int(val) for val in \"3.6\".split('.'))):\n- raise Exception(\"plasmapy does not support Python < {}\".format(3.6))\n+ raise Exception(\"PlasmaPy does not support Python < {}\".format(3.6))\n \n \n def online_help(query):\n@@ -76,9 +81,3 @@\n url = 'http://docs.astropy.org/en/stable/units/'\n \n webbrowser.open(url)\n-\n-\n-__citation__ = [\n- \"https://doi.org/10.5281/zenodo.1238132\",\n- \"https://doi.org/10.5281/zenodo.3235817\",\n-]\n", "issue": "Update acknowledgements to include NSF CSSI grant\nGood news, everyone! The NSF CSSI proposal that we submitted has officially been awarded! At the request of NSF program manager Slava Lukin, we should update our acknowledgements to include this grant and reflect the support from NSF. I'll assign myself to do this when the grant officially starts (which appears to have been moved up to October 1, 2019). The things that we need to do include:\r\n\r\n - [x] Revise the sentence that we ask people to include in the acknowledgements section of their papers to reflect funding from NSF \r\n - [x] Add an acknowledgements section at the end of `README.md`\r\n - [x] Update `CITATION.md`\r\n - [x] Update `docs/about/credits.rst`\r\n - [x] Update `plasmapy.__citation__`\r\n\r\nAnother thing peripherally related to this issue is to:\r\n\r\n - [x] Update the [proposal](https://doi.org/10.5281/zenodo.2633286) that we archived on Zenodo to include the answers to questions we had to send in and the panel's comments\r\n\r\nI'll assign this to myself. \r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nPlasmaPy: A plasma physics Python package\n================================================\n\nDocumentation is available in the docstrings,\nonline at https://docs.plasmapy.org (accessible also using\nthe ``plasmapy.online_help`` function).\n\nContents\n--------\nPlasmaPy provides the following functionality:\n\nSubpackages\n-----------\nEach of these subpackages requires an explicit import, for example,\nvia ``import plasmapy.physics``.\n\n::\n\n atomic --- Database for atoms, isotopes, ions...\n classes --- (WIP) classes used in multiple places\n data --- Data used for testing and examples\n diagnostics --- Experimental research data analysis\n mathematics --- General formulae used elsewhere\n physics --- Plasma theory functionality\n transport --- Transport theory functionality\n utils --- Various utilities\n\nUtility tools\n-------------\n::\n\n test --- Run PlasmaPy unit tests\n online_help --- Search the online documentation\n __version__ --- PlasmaPy version string\n __citation__ --- PlasmaPy citation template\n\n\"\"\"\n# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n# Packages may add whatever they like to this file, but\n# should keep this content at the top.\n# ----------------------------------------------------------------------------\nfrom ._base_init import *\n# ----------------------------------------------------------------------------\n\n# Enforce Python version check during package import.\n# This is the same check as the one at the top of setup.py\nimport sys\n\n__name__ = \"plasmapy\"\n\nif sys.version_info < tuple((int(val) for val in \"3.6\".split('.'))):\n raise Exception(\"plasmapy does not support Python < {}\".format(3.6))\n\n\ndef online_help(query):\n \"\"\"\n Search the online PlasmaPy documentation for the given query from plasmapy.org\n Opens the results in the default web browser.\n Requires an active Internet connection.\n Redirects to Astropy.units in case of query 'unit' or 'units'\n\n Parameters\n ----------\n query : str\n The search query.\n \"\"\"\n from urllib.parse import urlencode\n import webbrowser\n\n url = ('http://docs.plasmapy.org/en/stable/search.html?'\n '{0}&check_keywords=yes&area=default').format(urlencode({'q': query}))\n\n if(query.lower() in ('unit', 'units')):\n url = 'http://docs.astropy.org/en/stable/units/'\n\n webbrowser.open(url)\n\n\n__citation__ = [\n \"https://doi.org/10.5281/zenodo.1238132\",\n \"https://doi.org/10.5281/zenodo.3235817\",\n]\n", "path": "plasmapy/__init__.py"}], "after_files": [{"content": "\"\"\"\nPlasmaPy: A plasma physics Python package\n================================================\n\nDocumentation is available in the docstrings,\nonline at https://docs.plasmapy.org (accessible also using\nthe ``plasmapy.online_help`` function).\n\nContents\n--------\nPlasmaPy provides the following functionality:\n\nSubpackages\n-----------\nEach of these subpackages requires an explicit import, for example,\nvia ``import plasmapy.physics``.\n\n::\n\n atomic --- Database for atoms, isotopes, ions...\n classes --- (WIP) classes used in multiple places\n data --- Data used for testing and examples\n diagnostics --- Experimental research data analysis\n mathematics --- General formulae used elsewhere\n physics --- Plasma theory functionality\n transport --- Transport theory functionality\n utils --- Various utilities\n\nUtility tools\n-------------\n::\n\n test --- Run PlasmaPy unit tests\n online_help --- Search the online documentation\n __version__ --- PlasmaPy version string\n __citation__ --- PlasmaPy citation template\n\n\"\"\"\n# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n# Packages may add whatever they like to this file, but\n# should keep this content at the top.\n# ----------------------------------------------------------------------------\nfrom ._base_init import *\n# ----------------------------------------------------------------------------\n\n# Enforce Python version check during package import.\n# This is the same check as the one at the top of setup.py\nimport sys\n\n__name__ = \"plasmapy\"\n\n__citation__ = (\n \"Instructions on how to cite and acknowledge PlasmaPy are provided in the \"\n \"online documentation at: http://docs.plasmapy.org/en/latest/about/citation.html\"\n)\n\nif sys.version_info < tuple((int(val) for val in \"3.6\".split('.'))):\n raise Exception(\"PlasmaPy does not support Python < {}\".format(3.6))\n\n\ndef online_help(query):\n \"\"\"\n Search the online PlasmaPy documentation for the given query from plasmapy.org\n Opens the results in the default web browser.\n Requires an active Internet connection.\n Redirects to Astropy.units in case of query 'unit' or 'units'\n\n Parameters\n ----------\n query : str\n The search query.\n \"\"\"\n from urllib.parse import urlencode\n import webbrowser\n\n url = ('http://docs.plasmapy.org/en/stable/search.html?'\n '{0}&check_keywords=yes&area=default').format(urlencode({'q': query}))\n\n if(query.lower() in ('unit', 'units')):\n url = 'http://docs.astropy.org/en/stable/units/'\n\n webbrowser.open(url)\n", "path": "plasmapy/__init__.py"}]}
| 1,268 | 284 |
gh_patches_debug_26065
|
rasdani/github-patches
|
git_diff
|
iterative__dvc-2743
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
dvc: .dvcignore trouble with nfs mounted directory
I have a large NFS mounted in a directory that I would like dvc to ignore.
Directory Structure:
```
directory
|___nfs
|___...
|___.dvc
|___.dvcignore
```
My *.dvcignore* has the following line:
`/nfs/` (I've tried `nfs/` and `nfs/*`)
The problem is that when I run `dvc status` or `dvc pull` the processes will just hang:
```
DEBUG: PRAGMA user_version;
DEBUG: fetched: [(3,)]
DEBUG: CREATE TABLE IF NOT EXISTS state (inode INTEGER PRIMARY KEY, mtime TEXT NOT NULL, size TEXT NOT NULL, md5 TEXT NOT NULL, timestamp TEXT NOT NULL)
DEBUG: CREATE TABLE IF NOT EXISTS state_info (count INTEGER)
DEBUG: CREATE TABLE IF NOT EXISTS link_state (path TEXT PRIMARY KEY, inode INTEGER NOT NULL, mtime TEXT NOT NULL)
DEBUG: INSERT OR IGNORE INTO state_info (count) SELECT 0 WHERE NOT EXISTS (SELECT * FROM state_info)
DEBUG: PRAGMA user_version = 3;
```
Here is the traceback from `KeyboardInterrupt`:
```
File "/home/ec2-user/app/proc/.env/lib/python3.7/site-packages/dvc/repo/__init__.py", line 499, in dvcignore
return DvcIgnoreFilter(self.root_dir)
File "/home/ec2-user/app/proc/.env/lib/python3.7/site-packages/dvc/ignore.py", line 67, in __init__
for root, dirs, _ in os.walk(root_dir):
File "/home/ec2-user/app/proc/.env/lib64/python3.7/os.py", line 410, in walk
yield from walk(new_path, topdown, onerror, followlinks)
File "/home/ec2-user/app/proc/.env/lib64/python3.7/os.py", line 368, in walk
is_dir = entry.is_dir()
```
Which makes me feel like the directory is not being ignored.
***Additonal***
I've unmounted the NFS directory and ran `dvc status` with no problem so I believe the issue stems from dvc trying to traverse it.
System Information:
```
DVC version: 0.66.6
Python version: 3.7.4
Platform: Linux 4.14.109-99.92.amzn2.x86_64
Installation: pip
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/ignore.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import logging
4 import os
5
6 from pathspec import PathSpec
7 from pathspec.patterns import GitWildMatchPattern
8
9 from dvc.utils import relpath
10 from dvc.utils.compat import open
11
12 logger = logging.getLogger(__name__)
13
14
15 class DvcIgnore(object):
16 DVCIGNORE_FILE = ".dvcignore"
17
18 def __call__(self, root, dirs, files):
19 raise NotImplementedError
20
21
22 class DvcIgnorePatterns(DvcIgnore):
23 def __init__(self, ignore_file_path):
24 assert os.path.isabs(ignore_file_path)
25
26 self.ignore_file_path = ignore_file_path
27 self.dirname = os.path.normpath(os.path.dirname(ignore_file_path))
28
29 with open(ignore_file_path, encoding="utf-8") as fobj:
30 self.ignore_spec = PathSpec.from_lines(GitWildMatchPattern, fobj)
31
32 def __call__(self, root, dirs, files):
33 files = [f for f in files if not self.matches(root, f)]
34 dirs = [d for d in dirs if not self.matches(root, d)]
35
36 return dirs, files
37
38 def matches(self, dirname, basename):
39 abs_path = os.path.join(dirname, basename)
40 rel_path = relpath(abs_path, self.dirname)
41
42 if os.pardir + os.sep in rel_path:
43 return False
44 return self.ignore_spec.match_file(rel_path)
45
46 def __hash__(self):
47 return hash(self.ignore_file_path)
48
49 def __eq__(self, other):
50 return self.ignore_file_path == other.ignore_file_path
51
52
53 class DvcIgnoreDirs(DvcIgnore):
54 def __init__(self, basenames):
55 self.basenames = set(basenames)
56
57 def __call__(self, root, dirs, files):
58 dirs = [d for d in dirs if d not in self.basenames]
59
60 return dirs, files
61
62
63 class DvcIgnoreFilter(object):
64 def __init__(self, root_dir):
65 self.ignores = {DvcIgnoreDirs([".git", ".hg", ".dvc"])}
66 self._update(root_dir)
67 for root, dirs, _ in os.walk(root_dir):
68 for d in dirs:
69 self._update(os.path.join(root, d))
70
71 def _update(self, dirname):
72 ignore_file_path = os.path.join(dirname, DvcIgnore.DVCIGNORE_FILE)
73 if os.path.exists(ignore_file_path):
74 self.ignores.add(DvcIgnorePatterns(ignore_file_path))
75
76 def __call__(self, root, dirs, files):
77 for ignore in self.ignores:
78 dirs, files = ignore(root, dirs, files)
79
80 return dirs, files
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/dvc/ignore.py b/dvc/ignore.py
--- a/dvc/ignore.py
+++ b/dvc/ignore.py
@@ -6,6 +6,7 @@
from pathspec import PathSpec
from pathspec.patterns import GitWildMatchPattern
+from dvc.utils import dvc_walk
from dvc.utils import relpath
from dvc.utils.compat import open
@@ -47,6 +48,9 @@
return hash(self.ignore_file_path)
def __eq__(self, other):
+ if not isinstance(other, DvcIgnorePatterns):
+ return NotImplemented
+
return self.ignore_file_path == other.ignore_file_path
@@ -59,12 +63,21 @@
return dirs, files
+ def __hash__(self):
+ return hash(tuple(self.basenames))
+
+ def __eq__(self, other):
+ if not isinstance(other, DvcIgnoreDirs):
+ return NotImplemented
+
+ return self.basenames == other.basenames
+
class DvcIgnoreFilter(object):
def __init__(self, root_dir):
self.ignores = {DvcIgnoreDirs([".git", ".hg", ".dvc"])}
self._update(root_dir)
- for root, dirs, _ in os.walk(root_dir):
+ for root, dirs, _ in dvc_walk(root_dir, self):
for d in dirs:
self._update(os.path.join(root, d))
|
{"golden_diff": "diff --git a/dvc/ignore.py b/dvc/ignore.py\n--- a/dvc/ignore.py\n+++ b/dvc/ignore.py\n@@ -6,6 +6,7 @@\n from pathspec import PathSpec\n from pathspec.patterns import GitWildMatchPattern\n \n+from dvc.utils import dvc_walk\n from dvc.utils import relpath\n from dvc.utils.compat import open\n \n@@ -47,6 +48,9 @@\n return hash(self.ignore_file_path)\n \n def __eq__(self, other):\n+ if not isinstance(other, DvcIgnorePatterns):\n+ return NotImplemented\n+\n return self.ignore_file_path == other.ignore_file_path\n \n \n@@ -59,12 +63,21 @@\n \n return dirs, files\n \n+ def __hash__(self):\n+ return hash(tuple(self.basenames))\n+\n+ def __eq__(self, other):\n+ if not isinstance(other, DvcIgnoreDirs):\n+ return NotImplemented\n+\n+ return self.basenames == other.basenames\n+\n \n class DvcIgnoreFilter(object):\n def __init__(self, root_dir):\n self.ignores = {DvcIgnoreDirs([\".git\", \".hg\", \".dvc\"])}\n self._update(root_dir)\n- for root, dirs, _ in os.walk(root_dir):\n+ for root, dirs, _ in dvc_walk(root_dir, self):\n for d in dirs:\n self._update(os.path.join(root, d))\n", "issue": "dvc: .dvcignore trouble with nfs mounted directory\nI have a large NFS mounted in a directory that I would like dvc to ignore. \r\n\r\nDirectory Structure:\r\n```\r\ndirectory\r\n|___nfs\r\n|___...\r\n|___.dvc\r\n|___.dvcignore\r\n```\r\nMy *.dvcignore* has the following line:\r\n`/nfs/` (I've tried `nfs/` and `nfs/*`)\r\n\r\nThe problem is that when I run `dvc status` or `dvc pull` the processes will just hang:\r\n```\r\nDEBUG: PRAGMA user_version;\r\nDEBUG: fetched: [(3,)]\r\nDEBUG: CREATE TABLE IF NOT EXISTS state (inode INTEGER PRIMARY KEY, mtime TEXT NOT NULL, size TEXT NOT NULL, md5 TEXT NOT NULL, timestamp TEXT NOT NULL)\r\nDEBUG: CREATE TABLE IF NOT EXISTS state_info (count INTEGER)\r\nDEBUG: CREATE TABLE IF NOT EXISTS link_state (path TEXT PRIMARY KEY, inode INTEGER NOT NULL, mtime TEXT NOT NULL)\r\nDEBUG: INSERT OR IGNORE INTO state_info (count) SELECT 0 WHERE NOT EXISTS (SELECT * FROM state_info)\r\nDEBUG: PRAGMA user_version = 3; \r\n```\r\n\r\nHere is the traceback from `KeyboardInterrupt`:\r\n```\r\n File \"/home/ec2-user/app/proc/.env/lib/python3.7/site-packages/dvc/repo/__init__.py\", line 499, in dvcignore\r\n return DvcIgnoreFilter(self.root_dir)\r\n File \"/home/ec2-user/app/proc/.env/lib/python3.7/site-packages/dvc/ignore.py\", line 67, in __init__\r\n for root, dirs, _ in os.walk(root_dir):\r\n File \"/home/ec2-user/app/proc/.env/lib64/python3.7/os.py\", line 410, in walk\r\n yield from walk(new_path, topdown, onerror, followlinks)\r\n File \"/home/ec2-user/app/proc/.env/lib64/python3.7/os.py\", line 368, in walk\r\n is_dir = entry.is_dir() \r\n```\r\nWhich makes me feel like the directory is not being ignored.\r\n\r\n***Additonal***\r\nI've unmounted the NFS directory and ran `dvc status` with no problem so I believe the issue stems from dvc trying to traverse it.\r\n\r\nSystem Information:\r\n``` \r\nDVC version: 0.66.6\r\nPython version: 3.7.4\r\nPlatform: Linux 4.14.109-99.92.amzn2.x86_64\r\nInstallation: pip\r\n```\r\n\r\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport logging\nimport os\n\nfrom pathspec import PathSpec\nfrom pathspec.patterns import GitWildMatchPattern\n\nfrom dvc.utils import relpath\nfrom dvc.utils.compat import open\n\nlogger = logging.getLogger(__name__)\n\n\nclass DvcIgnore(object):\n DVCIGNORE_FILE = \".dvcignore\"\n\n def __call__(self, root, dirs, files):\n raise NotImplementedError\n\n\nclass DvcIgnorePatterns(DvcIgnore):\n def __init__(self, ignore_file_path):\n assert os.path.isabs(ignore_file_path)\n\n self.ignore_file_path = ignore_file_path\n self.dirname = os.path.normpath(os.path.dirname(ignore_file_path))\n\n with open(ignore_file_path, encoding=\"utf-8\") as fobj:\n self.ignore_spec = PathSpec.from_lines(GitWildMatchPattern, fobj)\n\n def __call__(self, root, dirs, files):\n files = [f for f in files if not self.matches(root, f)]\n dirs = [d for d in dirs if not self.matches(root, d)]\n\n return dirs, files\n\n def matches(self, dirname, basename):\n abs_path = os.path.join(dirname, basename)\n rel_path = relpath(abs_path, self.dirname)\n\n if os.pardir + os.sep in rel_path:\n return False\n return self.ignore_spec.match_file(rel_path)\n\n def __hash__(self):\n return hash(self.ignore_file_path)\n\n def __eq__(self, other):\n return self.ignore_file_path == other.ignore_file_path\n\n\nclass DvcIgnoreDirs(DvcIgnore):\n def __init__(self, basenames):\n self.basenames = set(basenames)\n\n def __call__(self, root, dirs, files):\n dirs = [d for d in dirs if d not in self.basenames]\n\n return dirs, files\n\n\nclass DvcIgnoreFilter(object):\n def __init__(self, root_dir):\n self.ignores = {DvcIgnoreDirs([\".git\", \".hg\", \".dvc\"])}\n self._update(root_dir)\n for root, dirs, _ in os.walk(root_dir):\n for d in dirs:\n self._update(os.path.join(root, d))\n\n def _update(self, dirname):\n ignore_file_path = os.path.join(dirname, DvcIgnore.DVCIGNORE_FILE)\n if os.path.exists(ignore_file_path):\n self.ignores.add(DvcIgnorePatterns(ignore_file_path))\n\n def __call__(self, root, dirs, files):\n for ignore in self.ignores:\n dirs, files = ignore(root, dirs, files)\n\n return dirs, files\n", "path": "dvc/ignore.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport logging\nimport os\n\nfrom pathspec import PathSpec\nfrom pathspec.patterns import GitWildMatchPattern\n\nfrom dvc.utils import dvc_walk\nfrom dvc.utils import relpath\nfrom dvc.utils.compat import open\n\nlogger = logging.getLogger(__name__)\n\n\nclass DvcIgnore(object):\n DVCIGNORE_FILE = \".dvcignore\"\n\n def __call__(self, root, dirs, files):\n raise NotImplementedError\n\n\nclass DvcIgnorePatterns(DvcIgnore):\n def __init__(self, ignore_file_path):\n assert os.path.isabs(ignore_file_path)\n\n self.ignore_file_path = ignore_file_path\n self.dirname = os.path.normpath(os.path.dirname(ignore_file_path))\n\n with open(ignore_file_path, encoding=\"utf-8\") as fobj:\n self.ignore_spec = PathSpec.from_lines(GitWildMatchPattern, fobj)\n\n def __call__(self, root, dirs, files):\n files = [f for f in files if not self.matches(root, f)]\n dirs = [d for d in dirs if not self.matches(root, d)]\n\n return dirs, files\n\n def matches(self, dirname, basename):\n abs_path = os.path.join(dirname, basename)\n rel_path = relpath(abs_path, self.dirname)\n\n if os.pardir + os.sep in rel_path:\n return False\n return self.ignore_spec.match_file(rel_path)\n\n def __hash__(self):\n return hash(self.ignore_file_path)\n\n def __eq__(self, other):\n if not isinstance(other, DvcIgnorePatterns):\n return NotImplemented\n\n return self.ignore_file_path == other.ignore_file_path\n\n\nclass DvcIgnoreDirs(DvcIgnore):\n def __init__(self, basenames):\n self.basenames = set(basenames)\n\n def __call__(self, root, dirs, files):\n dirs = [d for d in dirs if d not in self.basenames]\n\n return dirs, files\n\n def __hash__(self):\n return hash(tuple(self.basenames))\n\n def __eq__(self, other):\n if not isinstance(other, DvcIgnoreDirs):\n return NotImplemented\n\n return self.basenames == other.basenames\n\n\nclass DvcIgnoreFilter(object):\n def __init__(self, root_dir):\n self.ignores = {DvcIgnoreDirs([\".git\", \".hg\", \".dvc\"])}\n self._update(root_dir)\n for root, dirs, _ in dvc_walk(root_dir, self):\n for d in dirs:\n self._update(os.path.join(root, d))\n\n def _update(self, dirname):\n ignore_file_path = os.path.join(dirname, DvcIgnore.DVCIGNORE_FILE)\n if os.path.exists(ignore_file_path):\n self.ignores.add(DvcIgnorePatterns(ignore_file_path))\n\n def __call__(self, root, dirs, files):\n for ignore in self.ignores:\n dirs, files = ignore(root, dirs, files)\n\n return dirs, files\n", "path": "dvc/ignore.py"}]}
| 1,555 | 325 |
gh_patches_debug_19143
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-2999
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
why does conan detect gcc 1 ?
This is running in a docker linux container, clang 6 is installed. gcc is not.
```bash
`[openssl] $ sh -c "conan create -s build_type=Debug -s compiler=clang -s compiler.libcxx=libstdc++11 -s compiler.version=6.0 . turboit/openssl "
OpenSSL/1.0.2o@turboit/openssl: Exporting package recipe
Linter warnings
WARN: Linter. Line 101: Anomalous backslash in string: '\$'. String constant might be missing an r prefix.
WARN: Linter. Line 101: Anomalous backslash in string: '\$'. String constant might be missing an r prefix.
WARN: Linter. Line 101: Anomalous backslash in string: '\$'. String constant might be missing an r prefix.
WARN: Linter. Line 101: Anomalous backslash in string: '\$'. String constant might be missing an r prefix.
WARN: Linter. Line 102: Anomalous backslash in string: '\$'. String constant might be missing an r prefix.
WARN: Linter. Line 102: Anomalous backslash in string: '\$'. String constant might be missing an r prefix.
OpenSSL/1.0.2o@turboit/openssl: A new conanfile.py version was exported
OpenSSL/1.0.2o@turboit/openssl: Folder: /root/workspace/conan/openssl@tmp/conan.tmp6052754136315568067/.conan/data/OpenSSL/1.0.2o/turboit/openssl/export
Auto detecting your dev setup to initialize the default profile (/root/workspace/conan/openssl@tmp/conan.tmp6052754136315568067/.conan/profiles/default)
Found gcc 1
Found clang 6.0
Default settings
os=Linux
os_build=Linux
arch=x86_64
arch_build=x86_64
compiler=gcc
compiler.version=1
compiler.libcxx=libstdc++
build_type=Release
*** You can change them in /root/workspace/conan/openssl@tmp/conan.tmp6052754136315568067/.conan/profiles/default ***
*** Or override with -s compiler='other' -s ...s***
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conans/client/conf/detect.py`
Content:
```
1 import os
2 import platform
3 import re
4 from subprocess import Popen, PIPE, STDOUT
5
6 from conans.client.output import Color
7 from conans.model.version import Version
8 from conans.tools import vs_installation_path
9
10
11 def _execute(command):
12 proc = Popen(command, shell=True, bufsize=1, stdout=PIPE, stderr=STDOUT)
13
14 output_buffer = []
15 while True:
16 line = proc.stdout.readline()
17 if not line:
18 break
19 # output.write(line)
20 output_buffer.append(str(line))
21
22 proc.communicate()
23 return proc.returncode, "".join(output_buffer)
24
25
26 def _gcc_compiler(output, compiler_exe="gcc"):
27
28 try:
29 if platform.system() == "Darwin":
30 # In Mac OS X check if gcc is a fronted using apple-clang
31 _, out = _execute("%s --version" % compiler_exe)
32 out = out.lower()
33 if "clang" in out:
34 return None
35
36 _, out = _execute('%s -dumpversion' % compiler_exe)
37 compiler = "gcc"
38 installed_version = re.search("([0-9](\.[0-9])?)", out).group()
39 # Since GCC 7.1, -dumpversion return the major version number
40 # only ("7"). We must use -dumpfullversion to get the full version
41 # number ("7.1.1").
42 if installed_version:
43 output.success("Found %s %s" % (compiler, installed_version))
44 major = installed_version.split(".")[0]
45 if int(major) >= 5:
46 output.info("gcc>=5, using the major as version")
47 installed_version = major
48 return compiler, installed_version
49 except:
50 return None
51
52
53 def _clang_compiler(output, compiler_exe="clang"):
54 try:
55 _, out = _execute('%s --version' % compiler_exe)
56 if "Apple" in out:
57 compiler = "apple-clang"
58 elif "clang version" in out:
59 compiler = "clang"
60 installed_version = re.search("([0-9]\.[0-9])", out).group()
61 if installed_version:
62 output.success("Found %s %s" % (compiler, installed_version))
63 return compiler, installed_version
64 except:
65 return None
66
67
68 def _visual_compiler_cygwin(output, version):
69 if os.path.isfile("/proc/registry/HKEY_LOCAL_MACHINE/SOFTWARE/Microsoft/Windows/CurrentVersion/ProgramFilesDir (x86)"):
70 is_64bits = True
71 else:
72 is_64bits = False
73
74 if is_64bits:
75 key_name = r'HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\Microsoft\VisualStudio\SxS\VC7'
76 else:
77 key_name = r'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\VisualStudio\SxS\VC7'
78
79 if not os.path.isfile("/proc/registry/" + key_name.replace('\\', '/') + "/" + version):
80 return None
81
82 installed_version = Version(version).major(fill=False)
83 compiler = "Visual Studio"
84 output.success("CYGWIN: Found %s %s" % (compiler, installed_version))
85 return compiler, installed_version
86
87
88 def _visual_compiler(output, version):
89 'version have to be 8.0, or 9.0 or... anything .0'
90 if platform.system().startswith("CYGWIN"):
91 return _visual_compiler_cygwin(output, version)
92
93 if version == "15":
94 vs_path = os.getenv('vs150comntools')
95 path = vs_path or vs_installation_path("15")
96 if path:
97 compiler = "Visual Studio"
98 output.success("Found %s %s" % (compiler, "15"))
99 return compiler, "15"
100 return None
101
102 version = "%s.0" % version
103 from six.moves import winreg # @UnresolvedImport
104 try:
105 hKey = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
106 r"SOFTWARE\Microsoft\Windows\CurrentVersion")
107 winreg.QueryValueEx(hKey, "ProgramFilesDir (x86)")
108 is_64bits = True
109 except EnvironmentError:
110 is_64bits = False
111 finally:
112 winreg.CloseKey(hKey)
113
114 if is_64bits:
115 key_name = r'SOFTWARE\Wow6432Node\Microsoft\VisualStudio\SxS\VC7'
116 else:
117 key_name = r'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\VisualStudio\SxS\VC7'
118
119 try:
120 key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, key_name)
121 winreg.QueryValueEx(key, version)
122
123 installed_version = Version(version).major(fill=False)
124 compiler = "Visual Studio"
125 output.success("Found %s %s" % (compiler, installed_version))
126 return compiler, installed_version
127 except EnvironmentError:
128 return None
129
130
131 def _visual_compiler_last(output):
132 last_version = None
133 for version in ["8", "9", "10", "11", "12", "14", "15"]:
134 vs = _visual_compiler(output, version)
135 last_version = vs or last_version
136 return last_version
137
138
139 def _sun_cc_compiler(output, compiler_exe="cc"):
140 try:
141 _, out = _execute('%s -V' % compiler_exe)
142 compiler = "sun-cc"
143 installed_version = re.search("([0-9]+\.[0-9]+)", out).group()
144 if installed_version:
145 output.success("Found %s %s" % (compiler, installed_version))
146 return compiler, installed_version
147 except:
148 return None
149
150
151 def _get_default_compiler(output):
152 cc = os.environ.get("CC", "")
153 cxx = os.environ.get("CXX", "")
154 if cc or cxx: # Env defined, use them
155 output.info("CC and CXX: %s, %s " % (cc or "None", cxx or "None"))
156 command = cc or cxx
157 if "gcc" in command:
158 gcc = _gcc_compiler(output, command)
159 if platform.system() == "Darwin" and gcc is None:
160 output.error(
161 "%s detected as a frontend using apple-clang. Compiler not supported" % command
162 )
163 return gcc
164 if "clang" in command.lower():
165 return _clang_compiler(output, command)
166 if platform.system() == "SunOS" and command.lower() == "cc":
167 return _sun_cc_compiler(output, command)
168 # I am not able to find its version
169 output.error("Not able to automatically detect '%s' version" % command)
170 return None
171
172 if detected_os() == "Windows":
173 vs = _visual_compiler_last(output)
174 gcc = _gcc_compiler(output)
175 clang = _clang_compiler(output)
176 if platform.system() == "SunOS":
177 sun_cc = _sun_cc_compiler(output)
178
179 if detected_os() == "Windows":
180 return vs or gcc or clang
181 elif platform.system() == "Darwin":
182 return clang or gcc
183 elif platform.system() == "SunOS":
184 return sun_cc or gcc or clang
185 else:
186 return gcc or clang
187
188
189 def _detect_compiler_version(result, output):
190 try:
191 compiler, version = _get_default_compiler(output)
192 except:
193 compiler, version = None, None
194 if not compiler or not version:
195 output.error("Unable to find a working compiler")
196 else:
197 result.append(("compiler", compiler))
198 result.append(("compiler.version", version))
199 if compiler == "apple-clang":
200 result.append(("compiler.libcxx", "libc++"))
201 elif compiler == "gcc":
202 result.append(("compiler.libcxx", "libstdc++"))
203 if Version(version) >= Version("5.1"):
204
205 msg = """
206 Conan detected a GCC version > 5 but has adjusted the 'compiler.libcxx' setting to
207 'libstdc++' for backwards compatibility.
208 Your compiler is likely using the new CXX11 ABI by default (libstdc++11).
209
210 If you want Conan to use the new ABI, edit the default profile at:
211
212 ~/.conan/profiles/default
213
214 adjusting 'compiler.libcxx=libstdc++11'
215 """
216 output.writeln("\n************************* WARNING: GCC OLD ABI COMPATIBILITY "
217 "***********************\n %s\n************************************"
218 "************************************************\n\n\n" % msg,
219 Color.BRIGHT_RED)
220 elif compiler == "cc":
221 if platform.system() == "SunOS":
222 result.append(("compiler.libstdcxx", "libstdcxx4"))
223 elif compiler == "clang":
224 if platform.system() == "FreeBSD":
225 result.append(("compiler.libcxx", "libc++"))
226 else:
227 result.append(("compiler.libcxx", "libstdc++"))
228 elif compiler == "sun-cc":
229 result.append(("compiler.libcxx", "libCstd"))
230
231
232 def detected_os():
233 result = platform.system()
234 if result == "Darwin":
235 return "Macos"
236 if result.startswith("CYGWIN"):
237 return "Windows"
238 if result.startswith("MINGW32_NT") or result.startswith("MINGW64_NT"):
239 return "Windows"
240 if result.startswith("MSYS_NT"):
241 return "Windows"
242 return result
243
244
245 def _detect_os_arch(result, output):
246 architectures = {'i386': 'x86',
247 'i686': 'x86',
248 'i86pc': 'x86',
249 'amd64': 'x86_64',
250 'aarch64': 'armv8',
251 'sun4v': 'sparc'}
252 the_os = detected_os()
253 result.append(("os", the_os))
254 result.append(("os_build", the_os))
255 arch = architectures.get(platform.machine().lower(), platform.machine().lower())
256 if arch.startswith('arm'):
257 for a in ("armv6", "armv7hf", "armv7", "armv8"):
258 if arch.startswith(a):
259 arch = a
260 break
261 else:
262 output.error("Your ARM '%s' architecture is probably not defined in settings.yml\n"
263 "Please check your conan.conf and settings.yml files" % arch)
264 result.append(("arch", arch))
265 result.append(("arch_build", arch))
266
267
268 def detect_defaults_settings(output):
269 """ try to deduce current machine values without any constraints at all
270 """
271 result = []
272 _detect_os_arch(result, output)
273 _detect_compiler_version(result, output)
274 result.append(("build_type", "Release"))
275
276 return result
277
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/conans/client/conf/detect.py b/conans/client/conf/detect.py
--- a/conans/client/conf/detect.py
+++ b/conans/client/conf/detect.py
@@ -33,7 +33,9 @@
if "clang" in out:
return None
- _, out = _execute('%s -dumpversion' % compiler_exe)
+ ret, out = _execute('%s -dumpversion' % compiler_exe)
+ if ret != 0:
+ return None
compiler = "gcc"
installed_version = re.search("([0-9](\.[0-9])?)", out).group()
# Since GCC 7.1, -dumpversion return the major version number
@@ -52,7 +54,9 @@
def _clang_compiler(output, compiler_exe="clang"):
try:
- _, out = _execute('%s --version' % compiler_exe)
+ ret, out = _execute('%s --version' % compiler_exe)
+ if ret != 0:
+ return None
if "Apple" in out:
compiler = "apple-clang"
elif "clang version" in out:
|
{"golden_diff": "diff --git a/conans/client/conf/detect.py b/conans/client/conf/detect.py\n--- a/conans/client/conf/detect.py\n+++ b/conans/client/conf/detect.py\n@@ -33,7 +33,9 @@\n if \"clang\" in out:\n return None\n \n- _, out = _execute('%s -dumpversion' % compiler_exe)\n+ ret, out = _execute('%s -dumpversion' % compiler_exe)\n+ if ret != 0:\n+ return None\n compiler = \"gcc\"\n installed_version = re.search(\"([0-9](\\.[0-9])?)\", out).group()\n # Since GCC 7.1, -dumpversion return the major version number\n@@ -52,7 +54,9 @@\n \n def _clang_compiler(output, compiler_exe=\"clang\"):\n try:\n- _, out = _execute('%s --version' % compiler_exe)\n+ ret, out = _execute('%s --version' % compiler_exe)\n+ if ret != 0:\n+ return None\n if \"Apple\" in out:\n compiler = \"apple-clang\"\n elif \"clang version\" in out:\n", "issue": "why does conan detect gcc 1 ?\nThis is running in a docker linux container, clang 6 is installed. gcc is not.\r\n\r\n```bash\r\n`[openssl] $ sh -c \"conan create -s build_type=Debug -s compiler=clang -s compiler.libcxx=libstdc++11 -s compiler.version=6.0 . turboit/openssl \"\r\nOpenSSL/1.0.2o@turboit/openssl: Exporting package recipe\r\nLinter warnings\r\n WARN: Linter. Line 101: Anomalous backslash in string: '\\$'. String constant might be missing an r prefix.\r\n WARN: Linter. Line 101: Anomalous backslash in string: '\\$'. String constant might be missing an r prefix.\r\n WARN: Linter. Line 101: Anomalous backslash in string: '\\$'. String constant might be missing an r prefix.\r\n WARN: Linter. Line 101: Anomalous backslash in string: '\\$'. String constant might be missing an r prefix.\r\n WARN: Linter. Line 102: Anomalous backslash in string: '\\$'. String constant might be missing an r prefix.\r\n WARN: Linter. Line 102: Anomalous backslash in string: '\\$'. String constant might be missing an r prefix.\r\n\r\nOpenSSL/1.0.2o@turboit/openssl: A new conanfile.py version was exported\r\n\r\nOpenSSL/1.0.2o@turboit/openssl: Folder: /root/workspace/conan/openssl@tmp/conan.tmp6052754136315568067/.conan/data/OpenSSL/1.0.2o/turboit/openssl/export\r\n\r\nAuto detecting your dev setup to initialize the default profile (/root/workspace/conan/openssl@tmp/conan.tmp6052754136315568067/.conan/profiles/default)\r\n\r\nFound gcc 1\r\nFound clang 6.0\r\nDefault settings\r\n\tos=Linux\r\n\tos_build=Linux\r\n\tarch=x86_64\r\n\tarch_build=x86_64\r\n\tcompiler=gcc\r\n\tcompiler.version=1\r\n\tcompiler.libcxx=libstdc++\r\n\tbuild_type=Release\r\n\r\n*** You can change them in /root/workspace/conan/openssl@tmp/conan.tmp6052754136315568067/.conan/profiles/default ***\r\n\r\n*** Or override with -s compiler='other' -s ...s***\r\n```\n", "before_files": [{"content": "import os\nimport platform\nimport re\nfrom subprocess import Popen, PIPE, STDOUT\n\nfrom conans.client.output import Color\nfrom conans.model.version import Version\nfrom conans.tools import vs_installation_path\n\n\ndef _execute(command):\n proc = Popen(command, shell=True, bufsize=1, stdout=PIPE, stderr=STDOUT)\n\n output_buffer = []\n while True:\n line = proc.stdout.readline()\n if not line:\n break\n # output.write(line)\n output_buffer.append(str(line))\n\n proc.communicate()\n return proc.returncode, \"\".join(output_buffer)\n\n\ndef _gcc_compiler(output, compiler_exe=\"gcc\"):\n\n try:\n if platform.system() == \"Darwin\":\n # In Mac OS X check if gcc is a fronted using apple-clang\n _, out = _execute(\"%s --version\" % compiler_exe)\n out = out.lower()\n if \"clang\" in out:\n return None\n\n _, out = _execute('%s -dumpversion' % compiler_exe)\n compiler = \"gcc\"\n installed_version = re.search(\"([0-9](\\.[0-9])?)\", out).group()\n # Since GCC 7.1, -dumpversion return the major version number\n # only (\"7\"). We must use -dumpfullversion to get the full version\n # number (\"7.1.1\").\n if installed_version:\n output.success(\"Found %s %s\" % (compiler, installed_version))\n major = installed_version.split(\".\")[0]\n if int(major) >= 5:\n output.info(\"gcc>=5, using the major as version\")\n installed_version = major\n return compiler, installed_version\n except:\n return None\n\n\ndef _clang_compiler(output, compiler_exe=\"clang\"):\n try:\n _, out = _execute('%s --version' % compiler_exe)\n if \"Apple\" in out:\n compiler = \"apple-clang\"\n elif \"clang version\" in out:\n compiler = \"clang\"\n installed_version = re.search(\"([0-9]\\.[0-9])\", out).group()\n if installed_version:\n output.success(\"Found %s %s\" % (compiler, installed_version))\n return compiler, installed_version\n except:\n return None\n\n\ndef _visual_compiler_cygwin(output, version):\n if os.path.isfile(\"/proc/registry/HKEY_LOCAL_MACHINE/SOFTWARE/Microsoft/Windows/CurrentVersion/ProgramFilesDir (x86)\"):\n is_64bits = True\n else:\n is_64bits = False\n\n if is_64bits:\n key_name = r'HKEY_LOCAL_MACHINE\\SOFTWARE\\Wow6432Node\\Microsoft\\VisualStudio\\SxS\\VC7'\n else:\n key_name = r'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\VisualStudio\\SxS\\VC7'\n\n if not os.path.isfile(\"/proc/registry/\" + key_name.replace('\\\\', '/') + \"/\" + version):\n return None\n\n installed_version = Version(version).major(fill=False)\n compiler = \"Visual Studio\"\n output.success(\"CYGWIN: Found %s %s\" % (compiler, installed_version))\n return compiler, installed_version\n\n\ndef _visual_compiler(output, version):\n 'version have to be 8.0, or 9.0 or... anything .0'\n if platform.system().startswith(\"CYGWIN\"):\n return _visual_compiler_cygwin(output, version)\n\n if version == \"15\":\n vs_path = os.getenv('vs150comntools')\n path = vs_path or vs_installation_path(\"15\")\n if path:\n compiler = \"Visual Studio\"\n output.success(\"Found %s %s\" % (compiler, \"15\"))\n return compiler, \"15\"\n return None\n\n version = \"%s.0\" % version\n from six.moves import winreg # @UnresolvedImport\n try:\n hKey = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,\n r\"SOFTWARE\\Microsoft\\Windows\\CurrentVersion\")\n winreg.QueryValueEx(hKey, \"ProgramFilesDir (x86)\")\n is_64bits = True\n except EnvironmentError:\n is_64bits = False\n finally:\n winreg.CloseKey(hKey)\n\n if is_64bits:\n key_name = r'SOFTWARE\\Wow6432Node\\Microsoft\\VisualStudio\\SxS\\VC7'\n else:\n key_name = r'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\VisualStudio\\SxS\\VC7'\n\n try:\n key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, key_name)\n winreg.QueryValueEx(key, version)\n\n installed_version = Version(version).major(fill=False)\n compiler = \"Visual Studio\"\n output.success(\"Found %s %s\" % (compiler, installed_version))\n return compiler, installed_version\n except EnvironmentError:\n return None\n\n\ndef _visual_compiler_last(output):\n last_version = None\n for version in [\"8\", \"9\", \"10\", \"11\", \"12\", \"14\", \"15\"]:\n vs = _visual_compiler(output, version)\n last_version = vs or last_version\n return last_version\n\n\ndef _sun_cc_compiler(output, compiler_exe=\"cc\"):\n try:\n _, out = _execute('%s -V' % compiler_exe)\n compiler = \"sun-cc\"\n installed_version = re.search(\"([0-9]+\\.[0-9]+)\", out).group()\n if installed_version:\n output.success(\"Found %s %s\" % (compiler, installed_version))\n return compiler, installed_version\n except:\n return None\n\n\ndef _get_default_compiler(output):\n cc = os.environ.get(\"CC\", \"\")\n cxx = os.environ.get(\"CXX\", \"\")\n if cc or cxx: # Env defined, use them\n output.info(\"CC and CXX: %s, %s \" % (cc or \"None\", cxx or \"None\"))\n command = cc or cxx\n if \"gcc\" in command:\n gcc = _gcc_compiler(output, command)\n if platform.system() == \"Darwin\" and gcc is None:\n output.error(\n \"%s detected as a frontend using apple-clang. Compiler not supported\" % command\n )\n return gcc\n if \"clang\" in command.lower():\n return _clang_compiler(output, command)\n if platform.system() == \"SunOS\" and command.lower() == \"cc\":\n return _sun_cc_compiler(output, command)\n # I am not able to find its version\n output.error(\"Not able to automatically detect '%s' version\" % command)\n return None\n\n if detected_os() == \"Windows\":\n vs = _visual_compiler_last(output)\n gcc = _gcc_compiler(output)\n clang = _clang_compiler(output)\n if platform.system() == \"SunOS\":\n sun_cc = _sun_cc_compiler(output)\n\n if detected_os() == \"Windows\":\n return vs or gcc or clang\n elif platform.system() == \"Darwin\":\n return clang or gcc\n elif platform.system() == \"SunOS\":\n return sun_cc or gcc or clang\n else:\n return gcc or clang\n\n\ndef _detect_compiler_version(result, output):\n try:\n compiler, version = _get_default_compiler(output)\n except:\n compiler, version = None, None\n if not compiler or not version:\n output.error(\"Unable to find a working compiler\")\n else:\n result.append((\"compiler\", compiler))\n result.append((\"compiler.version\", version))\n if compiler == \"apple-clang\":\n result.append((\"compiler.libcxx\", \"libc++\"))\n elif compiler == \"gcc\":\n result.append((\"compiler.libcxx\", \"libstdc++\"))\n if Version(version) >= Version(\"5.1\"):\n\n msg = \"\"\"\nConan detected a GCC version > 5 but has adjusted the 'compiler.libcxx' setting to\n'libstdc++' for backwards compatibility.\nYour compiler is likely using the new CXX11 ABI by default (libstdc++11).\n\nIf you want Conan to use the new ABI, edit the default profile at:\n\n ~/.conan/profiles/default\n\nadjusting 'compiler.libcxx=libstdc++11'\n\"\"\"\n output.writeln(\"\\n************************* WARNING: GCC OLD ABI COMPATIBILITY \"\n \"***********************\\n %s\\n************************************\"\n \"************************************************\\n\\n\\n\" % msg,\n Color.BRIGHT_RED)\n elif compiler == \"cc\":\n if platform.system() == \"SunOS\":\n result.append((\"compiler.libstdcxx\", \"libstdcxx4\"))\n elif compiler == \"clang\":\n if platform.system() == \"FreeBSD\":\n result.append((\"compiler.libcxx\", \"libc++\"))\n else:\n result.append((\"compiler.libcxx\", \"libstdc++\"))\n elif compiler == \"sun-cc\":\n result.append((\"compiler.libcxx\", \"libCstd\"))\n\n\ndef detected_os():\n result = platform.system()\n if result == \"Darwin\":\n return \"Macos\"\n if result.startswith(\"CYGWIN\"):\n return \"Windows\"\n if result.startswith(\"MINGW32_NT\") or result.startswith(\"MINGW64_NT\"):\n return \"Windows\"\n if result.startswith(\"MSYS_NT\"):\n return \"Windows\"\n return result\n\n\ndef _detect_os_arch(result, output):\n architectures = {'i386': 'x86',\n 'i686': 'x86',\n 'i86pc': 'x86',\n 'amd64': 'x86_64',\n 'aarch64': 'armv8',\n 'sun4v': 'sparc'}\n the_os = detected_os()\n result.append((\"os\", the_os))\n result.append((\"os_build\", the_os))\n arch = architectures.get(platform.machine().lower(), platform.machine().lower())\n if arch.startswith('arm'):\n for a in (\"armv6\", \"armv7hf\", \"armv7\", \"armv8\"):\n if arch.startswith(a):\n arch = a\n break\n else:\n output.error(\"Your ARM '%s' architecture is probably not defined in settings.yml\\n\"\n \"Please check your conan.conf and settings.yml files\" % arch)\n result.append((\"arch\", arch))\n result.append((\"arch_build\", arch))\n\n\ndef detect_defaults_settings(output):\n \"\"\" try to deduce current machine values without any constraints at all\n \"\"\"\n result = []\n _detect_os_arch(result, output)\n _detect_compiler_version(result, output)\n result.append((\"build_type\", \"Release\"))\n\n return result\n", "path": "conans/client/conf/detect.py"}], "after_files": [{"content": "import os\nimport platform\nimport re\nfrom subprocess import Popen, PIPE, STDOUT\n\nfrom conans.client.output import Color\nfrom conans.model.version import Version\nfrom conans.tools import vs_installation_path\n\n\ndef _execute(command):\n proc = Popen(command, shell=True, bufsize=1, stdout=PIPE, stderr=STDOUT)\n\n output_buffer = []\n while True:\n line = proc.stdout.readline()\n if not line:\n break\n # output.write(line)\n output_buffer.append(str(line))\n\n proc.communicate()\n return proc.returncode, \"\".join(output_buffer)\n\n\ndef _gcc_compiler(output, compiler_exe=\"gcc\"):\n\n try:\n if platform.system() == \"Darwin\":\n # In Mac OS X check if gcc is a fronted using apple-clang\n _, out = _execute(\"%s --version\" % compiler_exe)\n out = out.lower()\n if \"clang\" in out:\n return None\n\n ret, out = _execute('%s -dumpversion' % compiler_exe)\n if ret != 0:\n return None\n compiler = \"gcc\"\n installed_version = re.search(\"([0-9](\\.[0-9])?)\", out).group()\n # Since GCC 7.1, -dumpversion return the major version number\n # only (\"7\"). We must use -dumpfullversion to get the full version\n # number (\"7.1.1\").\n if installed_version:\n output.success(\"Found %s %s\" % (compiler, installed_version))\n major = installed_version.split(\".\")[0]\n if int(major) >= 5:\n output.info(\"gcc>=5, using the major as version\")\n installed_version = major\n return compiler, installed_version\n except:\n return None\n\n\ndef _clang_compiler(output, compiler_exe=\"clang\"):\n try:\n ret, out = _execute('%s --version' % compiler_exe)\n if ret != 0:\n return None\n if \"Apple\" in out:\n compiler = \"apple-clang\"\n elif \"clang version\" in out:\n compiler = \"clang\"\n installed_version = re.search(\"([0-9]\\.[0-9])\", out).group()\n if installed_version:\n output.success(\"Found %s %s\" % (compiler, installed_version))\n return compiler, installed_version\n except:\n return None\n\n\ndef _visual_compiler_cygwin(output, version):\n if os.path.isfile(\"/proc/registry/HKEY_LOCAL_MACHINE/SOFTWARE/Microsoft/Windows/CurrentVersion/ProgramFilesDir (x86)\"):\n is_64bits = True\n else:\n is_64bits = False\n\n if is_64bits:\n key_name = r'HKEY_LOCAL_MACHINE\\SOFTWARE\\Wow6432Node\\Microsoft\\VisualStudio\\SxS\\VC7'\n else:\n key_name = r'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\VisualStudio\\SxS\\VC7'\n\n if not os.path.isfile(\"/proc/registry/\" + key_name.replace('\\\\', '/') + \"/\" + version):\n return None\n\n installed_version = Version(version).major(fill=False)\n compiler = \"Visual Studio\"\n output.success(\"CYGWIN: Found %s %s\" % (compiler, installed_version))\n return compiler, installed_version\n\n\ndef _visual_compiler(output, version):\n 'version have to be 8.0, or 9.0 or... anything .0'\n if platform.system().startswith(\"CYGWIN\"):\n return _visual_compiler_cygwin(output, version)\n\n if version == \"15\":\n vs_path = os.getenv('vs150comntools')\n path = vs_path or vs_installation_path(\"15\")\n if path:\n compiler = \"Visual Studio\"\n output.success(\"Found %s %s\" % (compiler, \"15\"))\n return compiler, \"15\"\n return None\n\n version = \"%s.0\" % version\n from six.moves import winreg # @UnresolvedImport\n try:\n hKey = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,\n r\"SOFTWARE\\Microsoft\\Windows\\CurrentVersion\")\n winreg.QueryValueEx(hKey, \"ProgramFilesDir (x86)\")\n is_64bits = True\n except EnvironmentError:\n is_64bits = False\n finally:\n winreg.CloseKey(hKey)\n\n if is_64bits:\n key_name = r'SOFTWARE\\Wow6432Node\\Microsoft\\VisualStudio\\SxS\\VC7'\n else:\n key_name = r'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\VisualStudio\\SxS\\VC7'\n\n try:\n key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, key_name)\n winreg.QueryValueEx(key, version)\n\n installed_version = Version(version).major(fill=False)\n compiler = \"Visual Studio\"\n output.success(\"Found %s %s\" % (compiler, installed_version))\n return compiler, installed_version\n except EnvironmentError:\n return None\n\n\ndef _visual_compiler_last(output):\n last_version = None\n for version in [\"8\", \"9\", \"10\", \"11\", \"12\", \"14\", \"15\"]:\n vs = _visual_compiler(output, version)\n last_version = vs or last_version\n return last_version\n\n\ndef _sun_cc_compiler(output, compiler_exe=\"cc\"):\n try:\n _, out = _execute('%s -V' % compiler_exe)\n compiler = \"sun-cc\"\n installed_version = re.search(\"([0-9]+\\.[0-9]+)\", out).group()\n if installed_version:\n output.success(\"Found %s %s\" % (compiler, installed_version))\n return compiler, installed_version\n except:\n return None\n\n\ndef _get_default_compiler(output):\n cc = os.environ.get(\"CC\", \"\")\n cxx = os.environ.get(\"CXX\", \"\")\n if cc or cxx: # Env defined, use them\n output.info(\"CC and CXX: %s, %s \" % (cc or \"None\", cxx or \"None\"))\n command = cc or cxx\n if \"gcc\" in command:\n gcc = _gcc_compiler(output, command)\n if platform.system() == \"Darwin\" and gcc is None:\n output.error(\n \"%s detected as a frontend using apple-clang. Compiler not supported\" % command\n )\n return gcc\n if \"clang\" in command.lower():\n return _clang_compiler(output, command)\n if platform.system() == \"SunOS\" and command.lower() == \"cc\":\n return _sun_cc_compiler(output, command)\n # I am not able to find its version\n output.error(\"Not able to automatically detect '%s' version\" % command)\n return None\n\n if detected_os() == \"Windows\":\n vs = _visual_compiler_last(output)\n gcc = _gcc_compiler(output)\n clang = _clang_compiler(output)\n if platform.system() == \"SunOS\":\n sun_cc = _sun_cc_compiler(output)\n\n if detected_os() == \"Windows\":\n return vs or gcc or clang\n elif platform.system() == \"Darwin\":\n return clang or gcc\n elif platform.system() == \"SunOS\":\n return sun_cc or gcc or clang\n else:\n return gcc or clang\n\n\ndef _detect_compiler_version(result, output):\n try:\n compiler, version = _get_default_compiler(output)\n except:\n compiler, version = None, None\n if not compiler or not version:\n output.error(\"Unable to find a working compiler\")\n else:\n result.append((\"compiler\", compiler))\n result.append((\"compiler.version\", version))\n if compiler == \"apple-clang\":\n result.append((\"compiler.libcxx\", \"libc++\"))\n elif compiler == \"gcc\":\n result.append((\"compiler.libcxx\", \"libstdc++\"))\n if Version(version) >= Version(\"5.1\"):\n\n msg = \"\"\"\nConan detected a GCC version > 5 but has adjusted the 'compiler.libcxx' setting to\n'libstdc++' for backwards compatibility.\nYour compiler is likely using the new CXX11 ABI by default (libstdc++11).\n\nIf you want Conan to use the new ABI, edit the default profile at:\n\n ~/.conan/profiles/default\n\nadjusting 'compiler.libcxx=libstdc++11'\n\"\"\"\n output.writeln(\"\\n************************* WARNING: GCC OLD ABI COMPATIBILITY \"\n \"***********************\\n %s\\n************************************\"\n \"************************************************\\n\\n\\n\" % msg,\n Color.BRIGHT_RED)\n elif compiler == \"cc\":\n if platform.system() == \"SunOS\":\n result.append((\"compiler.libstdcxx\", \"libstdcxx4\"))\n elif compiler == \"clang\":\n if platform.system() == \"FreeBSD\":\n result.append((\"compiler.libcxx\", \"libc++\"))\n else:\n result.append((\"compiler.libcxx\", \"libstdc++\"))\n elif compiler == \"sun-cc\":\n result.append((\"compiler.libcxx\", \"libCstd\"))\n\n\ndef detected_os():\n result = platform.system()\n if result == \"Darwin\":\n return \"Macos\"\n if result.startswith(\"CYGWIN\"):\n return \"Windows\"\n if result.startswith(\"MINGW32_NT\") or result.startswith(\"MINGW64_NT\"):\n return \"Windows\"\n if result.startswith(\"MSYS_NT\"):\n return \"Windows\"\n return result\n\n\ndef _detect_os_arch(result, output):\n architectures = {'i386': 'x86',\n 'i686': 'x86',\n 'i86pc': 'x86',\n 'amd64': 'x86_64',\n 'aarch64': 'armv8',\n 'sun4v': 'sparc'}\n the_os = detected_os()\n result.append((\"os\", the_os))\n result.append((\"os_build\", the_os))\n arch = architectures.get(platform.machine().lower(), platform.machine().lower())\n if arch.startswith('arm'):\n for a in (\"armv6\", \"armv7hf\", \"armv7\", \"armv8\"):\n if arch.startswith(a):\n arch = a\n break\n else:\n output.error(\"Your ARM '%s' architecture is probably not defined in settings.yml\\n\"\n \"Please check your conan.conf and settings.yml files\" % arch)\n result.append((\"arch\", arch))\n result.append((\"arch_build\", arch))\n\n\ndef detect_defaults_settings(output):\n \"\"\" try to deduce current machine values without any constraints at all\n \"\"\"\n result = []\n _detect_os_arch(result, output)\n _detect_compiler_version(result, output)\n result.append((\"build_type\", \"Release\"))\n\n return result\n", "path": "conans/client/conf/detect.py"}]}
| 3,901 | 258 |
gh_patches_debug_16139
|
rasdani/github-patches
|
git_diff
|
aws-cloudformation__cfn-lint-1922
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
E1017 with nested !Select
*cfn-lint version: (`cfn-lint --version`)*
0.46.0
*Description of issue.*
When linting this CFT YAML:
```yaml
subnet0A:
DependsOn: ipv6CidrBlock
Type: 'AWS::EC2::Subnet'
Properties:
VpcId: !Ref vpc
CidrBlock: !Select
- !Select
- 1
- !Split
- ','
- !FindInMap
- subnetMap
- !Ref numAzs
- !Ref numSubnets
- 'Fn::Cidr':
- !GetAtt
- vpc
- CidrBlock
- !Select
- 0
- !Split
- ','
- !FindInMap
- subnetMap
- !Ref numAzs
- !Ref numSubnets
- !FindInMap
- subnetMap
- maskTocidrBits
- !Ref subnetMask
```
I get this error:
E1017 Select index should be an Integer or a function Ref or FindInMap for Resources/subnet0A/Properties/CidrBlock/Fn::Select
Template works fine.
thanks
Cfn-lint uses the [CloudFormation Resource Specifications](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-resource-specification.html) as the base to do validation. These files are included as part of the application version. Please update to the latest version of `cfn-lint` or update the spec files manually (`cfn-lint -u`)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/functions/Select.py`
Content:
```
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 import six
6 from cfnlint.rules import CloudFormationLintRule
7 from cfnlint.rules import RuleMatch
8
9
10 class Select(CloudFormationLintRule):
11 """Check if Select values are correct"""
12 id = 'E1017'
13 shortdesc = 'Select validation of parameters'
14 description = 'Making sure the function not is of list'
15 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-select.html'
16 tags = ['functions', 'select']
17
18 def match(self, cfn):
19 matches = []
20
21 select_objs = cfn.search_deep_keys('Fn::Select')
22
23 supported_functions = [
24 'Fn::FindInMap',
25 'Fn::GetAtt',
26 'Fn::GetAZs',
27 'Fn::If',
28 'Fn::Split',
29 'Fn::Cidr',
30 'Ref'
31 ]
32
33 for select_obj in select_objs:
34 select_value_obj = select_obj[-1]
35 tree = select_obj[:-1]
36 if isinstance(select_value_obj, list):
37 if len(select_value_obj) == 2:
38 index_obj = select_value_obj[0]
39 list_of_objs = select_value_obj[1]
40 if isinstance(index_obj, dict):
41 if len(index_obj) == 1:
42 for index_key, _ in index_obj.items():
43 if index_key not in ['Ref', 'Fn::FindInMap']:
44 message = 'Select index should be an Integer or a function Ref or FindInMap for {0}'
45 matches.append(RuleMatch(
46 tree, message.format('/'.join(map(str, tree)))))
47 elif not isinstance(index_obj, six.integer_types):
48 try:
49 int(index_obj)
50 except ValueError:
51 message = 'Select index should be an Integer or a function of Ref or FindInMap for {0}'
52 matches.append(RuleMatch(
53 tree, message.format('/'.join(map(str, tree)))))
54 if isinstance(list_of_objs, dict):
55 if len(list_of_objs) == 1:
56 for key, _ in list_of_objs.items():
57 if key not in supported_functions:
58 message = 'Select should use a supported function of {0}'
59 matches.append(RuleMatch(
60 tree, message.format(', '.join(map(str, supported_functions)))))
61 else:
62 message = 'Select should use a supported function of {0}'
63 matches.append(RuleMatch(
64 tree, message.format(', '.join(map(str, supported_functions)))))
65 elif not isinstance(list_of_objs, list):
66 message = 'Select should be an array of values for {0}'
67 matches.append(RuleMatch(
68 tree, message.format('/'.join(map(str, tree)))))
69 else:
70 message = 'Select should be a list of 2 elements for {0}'
71 matches.append(RuleMatch(
72 tree, message.format('/'.join(map(str, tree)))))
73 else:
74 message = 'Select should be a list of 2 elements for {0}'
75 matches.append(RuleMatch(
76 tree, message.format('/'.join(map(str, tree)))))
77 return matches
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cfnlint/rules/functions/Select.py b/src/cfnlint/rules/functions/Select.py
--- a/src/cfnlint/rules/functions/Select.py
+++ b/src/cfnlint/rules/functions/Select.py
@@ -40,7 +40,7 @@
if isinstance(index_obj, dict):
if len(index_obj) == 1:
for index_key, _ in index_obj.items():
- if index_key not in ['Ref', 'Fn::FindInMap']:
+ if index_key not in ['Ref', 'Fn::FindInMap', 'Fn::Select']:
message = 'Select index should be an Integer or a function Ref or FindInMap for {0}'
matches.append(RuleMatch(
tree, message.format('/'.join(map(str, tree)))))
|
{"golden_diff": "diff --git a/src/cfnlint/rules/functions/Select.py b/src/cfnlint/rules/functions/Select.py\n--- a/src/cfnlint/rules/functions/Select.py\n+++ b/src/cfnlint/rules/functions/Select.py\n@@ -40,7 +40,7 @@\n if isinstance(index_obj, dict):\n if len(index_obj) == 1:\n for index_key, _ in index_obj.items():\n- if index_key not in ['Ref', 'Fn::FindInMap']:\n+ if index_key not in ['Ref', 'Fn::FindInMap', 'Fn::Select']:\n message = 'Select index should be an Integer or a function Ref or FindInMap for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n", "issue": "E1017 with nested !Select\n*cfn-lint version: (`cfn-lint --version`)*\r\n0.46.0\r\n\r\n*Description of issue.*\r\nWhen linting this CFT YAML:\r\n```yaml\r\nsubnet0A:\r\n DependsOn: ipv6CidrBlock\r\n Type: 'AWS::EC2::Subnet'\r\n Properties:\r\n VpcId: !Ref vpc\r\n CidrBlock: !Select\r\n - !Select\r\n - 1\r\n - !Split\r\n - ','\r\n - !FindInMap\r\n - subnetMap\r\n - !Ref numAzs\r\n - !Ref numSubnets\r\n - 'Fn::Cidr':\r\n - !GetAtt\r\n - vpc\r\n - CidrBlock\r\n - !Select\r\n - 0\r\n - !Split\r\n - ','\r\n - !FindInMap\r\n - subnetMap\r\n - !Ref numAzs\r\n - !Ref numSubnets\r\n - !FindInMap\r\n - subnetMap\r\n - maskTocidrBits\r\n - !Ref subnetMask\r\n```\r\n\r\nI get this error:\r\n\r\nE1017 Select index should be an Integer or a function Ref or FindInMap for Resources/subnet0A/Properties/CidrBlock/Fn::Select\r\n\r\nTemplate works fine.\r\nthanks\r\n\r\nCfn-lint uses the [CloudFormation Resource Specifications](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-resource-specification.html) as the base to do validation. These files are included as part of the application version. Please update to the latest version of `cfn-lint` or update the spec files manually (`cfn-lint -u`)\r\n\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport six\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\n\nclass Select(CloudFormationLintRule):\n \"\"\"Check if Select values are correct\"\"\"\n id = 'E1017'\n shortdesc = 'Select validation of parameters'\n description = 'Making sure the function not is of list'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-select.html'\n tags = ['functions', 'select']\n\n def match(self, cfn):\n matches = []\n\n select_objs = cfn.search_deep_keys('Fn::Select')\n\n supported_functions = [\n 'Fn::FindInMap',\n 'Fn::GetAtt',\n 'Fn::GetAZs',\n 'Fn::If',\n 'Fn::Split',\n 'Fn::Cidr',\n 'Ref'\n ]\n\n for select_obj in select_objs:\n select_value_obj = select_obj[-1]\n tree = select_obj[:-1]\n if isinstance(select_value_obj, list):\n if len(select_value_obj) == 2:\n index_obj = select_value_obj[0]\n list_of_objs = select_value_obj[1]\n if isinstance(index_obj, dict):\n if len(index_obj) == 1:\n for index_key, _ in index_obj.items():\n if index_key not in ['Ref', 'Fn::FindInMap']:\n message = 'Select index should be an Integer or a function Ref or FindInMap for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n elif not isinstance(index_obj, six.integer_types):\n try:\n int(index_obj)\n except ValueError:\n message = 'Select index should be an Integer or a function of Ref or FindInMap for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n if isinstance(list_of_objs, dict):\n if len(list_of_objs) == 1:\n for key, _ in list_of_objs.items():\n if key not in supported_functions:\n message = 'Select should use a supported function of {0}'\n matches.append(RuleMatch(\n tree, message.format(', '.join(map(str, supported_functions)))))\n else:\n message = 'Select should use a supported function of {0}'\n matches.append(RuleMatch(\n tree, message.format(', '.join(map(str, supported_functions)))))\n elif not isinstance(list_of_objs, list):\n message = 'Select should be an array of values for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n else:\n message = 'Select should be a list of 2 elements for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n else:\n message = 'Select should be a list of 2 elements for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n return matches\n", "path": "src/cfnlint/rules/functions/Select.py"}], "after_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport six\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\n\nclass Select(CloudFormationLintRule):\n \"\"\"Check if Select values are correct\"\"\"\n id = 'E1017'\n shortdesc = 'Select validation of parameters'\n description = 'Making sure the function not is of list'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-select.html'\n tags = ['functions', 'select']\n\n def match(self, cfn):\n matches = []\n\n select_objs = cfn.search_deep_keys('Fn::Select')\n\n supported_functions = [\n 'Fn::FindInMap',\n 'Fn::GetAtt',\n 'Fn::GetAZs',\n 'Fn::If',\n 'Fn::Split',\n 'Fn::Cidr',\n 'Ref'\n ]\n\n for select_obj in select_objs:\n select_value_obj = select_obj[-1]\n tree = select_obj[:-1]\n if isinstance(select_value_obj, list):\n if len(select_value_obj) == 2:\n index_obj = select_value_obj[0]\n list_of_objs = select_value_obj[1]\n if isinstance(index_obj, dict):\n if len(index_obj) == 1:\n for index_key, _ in index_obj.items():\n if index_key not in ['Ref', 'Fn::FindInMap', 'Fn::Select']:\n message = 'Select index should be an Integer or a function Ref or FindInMap for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n elif not isinstance(index_obj, six.integer_types):\n try:\n int(index_obj)\n except ValueError:\n message = 'Select index should be an Integer or a function of Ref or FindInMap for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n if isinstance(list_of_objs, dict):\n if len(list_of_objs) == 1:\n for key, _ in list_of_objs.items():\n if key not in supported_functions:\n message = 'Select should use a supported function of {0}'\n matches.append(RuleMatch(\n tree, message.format(', '.join(map(str, supported_functions)))))\n else:\n message = 'Select should use a supported function of {0}'\n matches.append(RuleMatch(\n tree, message.format(', '.join(map(str, supported_functions)))))\n elif not isinstance(list_of_objs, list):\n message = 'Select should be an array of values for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n else:\n message = 'Select should be a list of 2 elements for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n else:\n message = 'Select should be a list of 2 elements for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n return matches\n", "path": "src/cfnlint/rules/functions/Select.py"}]}
| 1,464 | 170 |
gh_patches_debug_3665
|
rasdani/github-patches
|
git_diff
|
pex-tool__pex-634
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bootstrap demotion is not robust
While working https://github.com/pantsbuild/pants/issues/6927 it was discovered that there were multiple `.bootstrap/` on the sys.path and only one was demoted, leading to pants, when running in a pex, picking .bootstrap.pex instead of the pex dist it in `.deps/`. In this case, Pants was purposefully duplicating pex path entries to work around a bug in the `coverage` library. PEX should be robust and demote all instances of it's `.bootstrap/` entry on the sys.path.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/bootstrap.py`
Content:
```
1 # coding=utf-8
2 # Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
3 # Licensed under the Apache License, Version 2.0 (see LICENSE).
4
5 import os
6
7
8 class Bootstrap(object):
9 """Supports introspection of the PEX bootstrap code."""
10
11 _INSTANCE = None
12
13 @classmethod
14 def locate(cls):
15 """Locates the active PEX bootstrap.
16
17 :rtype: :class:`Bootstrap`
18 """
19 if cls._INSTANCE is None:
20 bootstrap_path = __file__
21 module_import_path = __name__.split('.')
22
23 # For example, our __file__ might be requests.pex/.bootstrap/pex/bootstrap.pyc and our import
24 # path pex.bootstrap; so we walk back through all the module components of our import path to
25 # find the base sys.path entry where we were found (requests.pex/.bootstrap in this example).
26 for _ in module_import_path:
27 bootstrap_path = os.path.dirname(bootstrap_path)
28
29 cls._INSTANCE = cls(sys_path_entry=bootstrap_path)
30 return cls._INSTANCE
31
32 def __init__(self, sys_path_entry):
33 self._sys_path_entry = sys_path_entry
34 self._realpath = os.path.realpath(self._sys_path_entry)
35
36 def demote(self):
37 """Demote the bootstrap code to the end of the `sys.path` so it is found last.
38
39 :return: The list of un-imported bootstrap modules.
40 :rtype: list of :class:`types.ModuleType`
41 """
42 import sys # Grab a hold of `sys` early since we'll be un-importing our module in this process.
43
44 unimported_modules = []
45 for name, module in reversed(sorted(sys.modules.items())):
46 if self.imported_from_bootstrap(module):
47 unimported_modules.append(sys.modules.pop(name))
48
49 sys.path.remove(self._sys_path_entry)
50 sys.path.append(self._sys_path_entry)
51
52 return unimported_modules
53
54 def imported_from_bootstrap(self, module):
55 """Return ``True`` if the given ``module`` object was imported from bootstrap code.
56
57 :param module: The module to check the provenance of.
58 :type module: :class:`types.ModuleType`
59 :rtype: bool
60 """
61
62 # A vendored module.
63 path = getattr(module, '__file__', None)
64 if path and os.path.realpath(path).startswith(self._realpath):
65 return True
66
67 # A vendored package.
68 path = getattr(module, '__path__', None)
69 if path and any(os.path.realpath(path_item).startswith(self._realpath)
70 for path_item in path):
71 return True
72
73 return False
74
75 def __repr__(self):
76 return '{cls}(sys_path_entry={sys_path_entry!r})'.format(cls=type(self).__name__,
77 sys_path_entry=self._sys_path_entry)
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pex/bootstrap.py b/pex/bootstrap.py
--- a/pex/bootstrap.py
+++ b/pex/bootstrap.py
@@ -46,7 +46,7 @@
if self.imported_from_bootstrap(module):
unimported_modules.append(sys.modules.pop(name))
- sys.path.remove(self._sys_path_entry)
+ sys.path[:] = [path for path in sys.path if os.path.realpath(path) != self._realpath]
sys.path.append(self._sys_path_entry)
return unimported_modules
|
{"golden_diff": "diff --git a/pex/bootstrap.py b/pex/bootstrap.py\n--- a/pex/bootstrap.py\n+++ b/pex/bootstrap.py\n@@ -46,7 +46,7 @@\n if self.imported_from_bootstrap(module):\n unimported_modules.append(sys.modules.pop(name))\n \n- sys.path.remove(self._sys_path_entry)\n+ sys.path[:] = [path for path in sys.path if os.path.realpath(path) != self._realpath]\n sys.path.append(self._sys_path_entry)\n \n return unimported_modules\n", "issue": "Bootstrap demotion is not robust\nWhile working https://github.com/pantsbuild/pants/issues/6927 it was discovered that there were multiple `.bootstrap/` on the sys.path and only one was demoted, leading to pants, when running in a pex, picking .bootstrap.pex instead of the pex dist it in `.deps/`. In this case, Pants was purposefully duplicating pex path entries to work around a bug in the `coverage` library. PEX should be robust and demote all instances of it's `.bootstrap/` entry on the sys.path.\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nimport os\n\n\nclass Bootstrap(object):\n \"\"\"Supports introspection of the PEX bootstrap code.\"\"\"\n\n _INSTANCE = None\n\n @classmethod\n def locate(cls):\n \"\"\"Locates the active PEX bootstrap.\n\n :rtype: :class:`Bootstrap`\n \"\"\"\n if cls._INSTANCE is None:\n bootstrap_path = __file__\n module_import_path = __name__.split('.')\n\n # For example, our __file__ might be requests.pex/.bootstrap/pex/bootstrap.pyc and our import\n # path pex.bootstrap; so we walk back through all the module components of our import path to\n # find the base sys.path entry where we were found (requests.pex/.bootstrap in this example).\n for _ in module_import_path:\n bootstrap_path = os.path.dirname(bootstrap_path)\n\n cls._INSTANCE = cls(sys_path_entry=bootstrap_path)\n return cls._INSTANCE\n\n def __init__(self, sys_path_entry):\n self._sys_path_entry = sys_path_entry\n self._realpath = os.path.realpath(self._sys_path_entry)\n\n def demote(self):\n \"\"\"Demote the bootstrap code to the end of the `sys.path` so it is found last.\n\n :return: The list of un-imported bootstrap modules.\n :rtype: list of :class:`types.ModuleType`\n \"\"\"\n import sys # Grab a hold of `sys` early since we'll be un-importing our module in this process.\n\n unimported_modules = []\n for name, module in reversed(sorted(sys.modules.items())):\n if self.imported_from_bootstrap(module):\n unimported_modules.append(sys.modules.pop(name))\n\n sys.path.remove(self._sys_path_entry)\n sys.path.append(self._sys_path_entry)\n\n return unimported_modules\n\n def imported_from_bootstrap(self, module):\n \"\"\"Return ``True`` if the given ``module`` object was imported from bootstrap code.\n\n :param module: The module to check the provenance of.\n :type module: :class:`types.ModuleType`\n :rtype: bool\n \"\"\"\n\n # A vendored module.\n path = getattr(module, '__file__', None)\n if path and os.path.realpath(path).startswith(self._realpath):\n return True\n\n # A vendored package.\n path = getattr(module, '__path__', None)\n if path and any(os.path.realpath(path_item).startswith(self._realpath)\n for path_item in path):\n return True\n\n return False\n\n def __repr__(self):\n return '{cls}(sys_path_entry={sys_path_entry!r})'.format(cls=type(self).__name__,\n sys_path_entry=self._sys_path_entry)\n", "path": "pex/bootstrap.py"}], "after_files": [{"content": "# coding=utf-8\n# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nimport os\n\n\nclass Bootstrap(object):\n \"\"\"Supports introspection of the PEX bootstrap code.\"\"\"\n\n _INSTANCE = None\n\n @classmethod\n def locate(cls):\n \"\"\"Locates the active PEX bootstrap.\n\n :rtype: :class:`Bootstrap`\n \"\"\"\n if cls._INSTANCE is None:\n bootstrap_path = __file__\n module_import_path = __name__.split('.')\n\n # For example, our __file__ might be requests.pex/.bootstrap/pex/bootstrap.pyc and our import\n # path pex.bootstrap; so we walk back through all the module components of our import path to\n # find the base sys.path entry where we were found (requests.pex/.bootstrap in this example).\n for _ in module_import_path:\n bootstrap_path = os.path.dirname(bootstrap_path)\n\n cls._INSTANCE = cls(sys_path_entry=bootstrap_path)\n return cls._INSTANCE\n\n def __init__(self, sys_path_entry):\n self._sys_path_entry = sys_path_entry\n self._realpath = os.path.realpath(self._sys_path_entry)\n\n def demote(self):\n \"\"\"Demote the bootstrap code to the end of the `sys.path` so it is found last.\n\n :return: The list of un-imported bootstrap modules.\n :rtype: list of :class:`types.ModuleType`\n \"\"\"\n import sys # Grab a hold of `sys` early since we'll be un-importing our module in this process.\n\n unimported_modules = []\n for name, module in reversed(sorted(sys.modules.items())):\n if self.imported_from_bootstrap(module):\n unimported_modules.append(sys.modules.pop(name))\n\n sys.path[:] = [path for path in sys.path if os.path.realpath(path) != self._realpath]\n sys.path.append(self._sys_path_entry)\n\n return unimported_modules\n\n def imported_from_bootstrap(self, module):\n \"\"\"Return ``True`` if the given ``module`` object was imported from bootstrap code.\n\n :param module: The module to check the provenance of.\n :type module: :class:`types.ModuleType`\n :rtype: bool\n \"\"\"\n\n # A vendored module.\n path = getattr(module, '__file__', None)\n if path and os.path.realpath(path).startswith(self._realpath):\n return True\n\n # A vendored package.\n path = getattr(module, '__path__', None)\n if path and any(os.path.realpath(path_item).startswith(self._realpath)\n for path_item in path):\n return True\n\n return False\n\n def __repr__(self):\n return '{cls}(sys_path_entry={sys_path_entry!r})'.format(cls=type(self).__name__,\n sys_path_entry=self._sys_path_entry)\n", "path": "pex/bootstrap.py"}]}
| 1,149 | 115 |
gh_patches_debug_32954
|
rasdani/github-patches
|
git_diff
|
liqd__a4-meinberlin-2442
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
in text commenting links to first chapter lead back to the project view
...so I am back to the timeline and the tiles
There are two links doing that: "back to chapter" in paragraph detail view and "previous chapter" in second chapter.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/documents/views.py`
Content:
```
1 from django.http import Http404
2 from django.urls import reverse
3 from django.utils.translation import ugettext_lazy as _
4 from django.views import generic
5
6 from adhocracy4.dashboard import mixins as dashboard_mixins
7 from adhocracy4.projects.mixins import DisplayProjectOrModuleMixin
8 from adhocracy4.projects.mixins import ProjectMixin
9 from adhocracy4.rules import mixins as rules_mixins
10 from meinberlin.apps.exports.views import DashboardExportView
11
12 from . import models
13
14
15 class DocumentDashboardView(ProjectMixin,
16 dashboard_mixins.DashboardBaseMixin,
17 dashboard_mixins.DashboardComponentMixin,
18 generic.TemplateView):
19 template_name = 'meinberlin_documents/document_dashboard.html'
20 permission_required = 'a4projects.change_project'
21
22 def get_permission_object(self):
23 return self.project
24
25
26 class ChapterDetailView(ProjectMixin,
27 rules_mixins.PermissionRequiredMixin,
28 generic.DetailView,
29 DisplayProjectOrModuleMixin):
30 model = models.Chapter
31 permission_required = 'meinberlin_documents.view_chapter'
32 get_context_from_object = True
33
34 def get_context_data(self, **kwargs):
35 context = super(ChapterDetailView, self).get_context_data(**kwargs)
36 context['chapter_list'] = self.chapter_list
37 return context
38
39 @property
40 def chapter_list(self):
41 return models.Chapter.objects.filter(module=self.module)
42
43
44 class DocumentDetailView(ChapterDetailView):
45 get_context_from_object = False
46
47 def get_object(self):
48 first_chapter = models.Chapter.objects \
49 .filter(module=self.module) \
50 .first()
51
52 if not first_chapter:
53 raise Http404(_('Document has no chapters defined.'))
54 return first_chapter
55
56
57 class ParagraphDetailView(ProjectMixin,
58 rules_mixins.PermissionRequiredMixin,
59 generic.DetailView):
60 model = models.Paragraph
61 permission_required = 'meinberlin_documents.view_paragraph'
62
63
64 class DocumentDashboardExportView(DashboardExportView):
65 template_name = 'meinberlin_exports/export_dashboard.html'
66
67 def get_context_data(self, **kwargs):
68 context = super().get_context_data(**kwargs)
69 context['comment_export'] = reverse(
70 'a4dashboard:document-comment-export',
71 kwargs={'module_slug': self.module.slug})
72 return context
73
```
Path: `meinberlin/apps/documents/models.py`
Content:
```
1 from ckeditor_uploader.fields import RichTextUploadingField
2 from django.contrib.contenttypes.fields import GenericRelation
3 from django.db import models
4 from django.urls import reverse
5 from django.utils.functional import cached_property
6
7 from adhocracy4 import transforms
8 from adhocracy4.comments import models as comment_models
9 from adhocracy4.models import base
10 from adhocracy4.modules import models as module_models
11
12
13 class Chapter(module_models.Item):
14 name = models.CharField(max_length=120)
15 comments = GenericRelation(comment_models.Comment,
16 related_query_name='chapter',
17 object_id_field='object_pk')
18 weight = models.PositiveIntegerField(default=0)
19
20 class Meta:
21 ordering = ('weight',)
22
23 def __str__(self):
24 return "{}_chapter_{}".format(str(self.module), self.pk)
25
26 def get_absolute_url(self):
27 return reverse('meinberlin_documents:chapter-detail',
28 args=[str(self.pk)])
29
30 @cached_property
31 def prev(self):
32 return Chapter.objects\
33 .filter(module=self.module)\
34 .filter(weight__lt=self.weight)\
35 .order_by('-weight')\
36 .first()
37
38 @cached_property
39 def next(self):
40 return Chapter.objects\
41 .filter(module=self.module)\
42 .filter(weight__gt=self.weight)\
43 .order_by('weight')\
44 .first()
45
46
47 class Paragraph(base.TimeStampedModel):
48 name = models.CharField(max_length=120, blank=True)
49 text = RichTextUploadingField(config_name='image-editor')
50 weight = models.PositiveIntegerField()
51 chapter = models.ForeignKey(Chapter,
52 on_delete=models.CASCADE,
53 related_name='paragraphs')
54 comments = GenericRelation(comment_models.Comment,
55 related_query_name='paragraph',
56 object_id_field='object_pk')
57
58 class Meta:
59 ordering = ('weight',)
60
61 def __str__(self):
62 return "{}_paragraph_{}".format(str(self.chapter), self.weight)
63
64 def save(self, *args, **kwargs):
65 self.text = transforms.clean_html_field(
66 self.text, 'image-editor')
67 super().save(*args, **kwargs)
68
69 def get_absolute_url(self):
70 return reverse('meinberlin_documents:paragraph-detail',
71 args=[str(self.pk)])
72
73 @property
74 def creator(self):
75 return self.chapter.creator
76
77 @property
78 def project(self):
79 return self.module.project
80
81 @property
82 def module(self):
83 return self.chapter.module
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/meinberlin/apps/documents/models.py b/meinberlin/apps/documents/models.py
--- a/meinberlin/apps/documents/models.py
+++ b/meinberlin/apps/documents/models.py
@@ -24,6 +24,12 @@
return "{}_chapter_{}".format(str(self.module), self.pk)
def get_absolute_url(self):
+ if (self.project.display_timeline
+ and not self.module.is_in_module_cluster):
+ return ('{}?initialSlide={}'
+ .format(reverse('meinberlin_documents:chapter-detail',
+ args=[str(self.pk)]),
+ self.module.get_timeline_index))
return reverse('meinberlin_documents:chapter-detail',
args=[str(self.pk)])
diff --git a/meinberlin/apps/documents/views.py b/meinberlin/apps/documents/views.py
--- a/meinberlin/apps/documents/views.py
+++ b/meinberlin/apps/documents/views.py
@@ -1,5 +1,6 @@
from django.http import Http404
from django.urls import reverse
+from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from django.views import generic
@@ -32,7 +33,7 @@
get_context_from_object = True
def get_context_data(self, **kwargs):
- context = super(ChapterDetailView, self).get_context_data(**kwargs)
+ context = super().get_context_data(**kwargs)
context['chapter_list'] = self.chapter_list
return context
@@ -40,6 +41,15 @@
def chapter_list(self):
return models.Chapter.objects.filter(module=self.module)
+ @cached_property
+ def extends(self):
+ if self.url_name == 'module-detail':
+ return 'a4modules/module_detail.html'
+ if self.url_name == 'chapter-detail':
+ if self.module.is_in_module_cluster:
+ return 'a4modules/module_detail.html'
+ return 'a4projects/project_detail.html'
+
class DocumentDetailView(ChapterDetailView):
get_context_from_object = False
|
{"golden_diff": "diff --git a/meinberlin/apps/documents/models.py b/meinberlin/apps/documents/models.py\n--- a/meinberlin/apps/documents/models.py\n+++ b/meinberlin/apps/documents/models.py\n@@ -24,6 +24,12 @@\n return \"{}_chapter_{}\".format(str(self.module), self.pk)\n \n def get_absolute_url(self):\n+ if (self.project.display_timeline\n+ and not self.module.is_in_module_cluster):\n+ return ('{}?initialSlide={}'\n+ .format(reverse('meinberlin_documents:chapter-detail',\n+ args=[str(self.pk)]),\n+ self.module.get_timeline_index))\n return reverse('meinberlin_documents:chapter-detail',\n args=[str(self.pk)])\n \ndiff --git a/meinberlin/apps/documents/views.py b/meinberlin/apps/documents/views.py\n--- a/meinberlin/apps/documents/views.py\n+++ b/meinberlin/apps/documents/views.py\n@@ -1,5 +1,6 @@\n from django.http import Http404\n from django.urls import reverse\n+from django.utils.functional import cached_property\n from django.utils.translation import ugettext_lazy as _\n from django.views import generic\n \n@@ -32,7 +33,7 @@\n get_context_from_object = True\n \n def get_context_data(self, **kwargs):\n- context = super(ChapterDetailView, self).get_context_data(**kwargs)\n+ context = super().get_context_data(**kwargs)\n context['chapter_list'] = self.chapter_list\n return context\n \n@@ -40,6 +41,15 @@\n def chapter_list(self):\n return models.Chapter.objects.filter(module=self.module)\n \n+ @cached_property\n+ def extends(self):\n+ if self.url_name == 'module-detail':\n+ return 'a4modules/module_detail.html'\n+ if self.url_name == 'chapter-detail':\n+ if self.module.is_in_module_cluster:\n+ return 'a4modules/module_detail.html'\n+ return 'a4projects/project_detail.html'\n+\n \n class DocumentDetailView(ChapterDetailView):\n get_context_from_object = False\n", "issue": "in text commenting links to first chapter lead back to the project view\n...so I am back to the timeline and the tiles\r\n\r\nThere are two links doing that: \"back to chapter\" in paragraph detail view and \"previous chapter\" in second chapter.\n", "before_files": [{"content": "from django.http import Http404\nfrom django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\n\nfrom adhocracy4.dashboard import mixins as dashboard_mixins\nfrom adhocracy4.projects.mixins import DisplayProjectOrModuleMixin\nfrom adhocracy4.projects.mixins import ProjectMixin\nfrom adhocracy4.rules import mixins as rules_mixins\nfrom meinberlin.apps.exports.views import DashboardExportView\n\nfrom . import models\n\n\nclass DocumentDashboardView(ProjectMixin,\n dashboard_mixins.DashboardBaseMixin,\n dashboard_mixins.DashboardComponentMixin,\n generic.TemplateView):\n template_name = 'meinberlin_documents/document_dashboard.html'\n permission_required = 'a4projects.change_project'\n\n def get_permission_object(self):\n return self.project\n\n\nclass ChapterDetailView(ProjectMixin,\n rules_mixins.PermissionRequiredMixin,\n generic.DetailView,\n DisplayProjectOrModuleMixin):\n model = models.Chapter\n permission_required = 'meinberlin_documents.view_chapter'\n get_context_from_object = True\n\n def get_context_data(self, **kwargs):\n context = super(ChapterDetailView, self).get_context_data(**kwargs)\n context['chapter_list'] = self.chapter_list\n return context\n\n @property\n def chapter_list(self):\n return models.Chapter.objects.filter(module=self.module)\n\n\nclass DocumentDetailView(ChapterDetailView):\n get_context_from_object = False\n\n def get_object(self):\n first_chapter = models.Chapter.objects \\\n .filter(module=self.module) \\\n .first()\n\n if not first_chapter:\n raise Http404(_('Document has no chapters defined.'))\n return first_chapter\n\n\nclass ParagraphDetailView(ProjectMixin,\n rules_mixins.PermissionRequiredMixin,\n generic.DetailView):\n model = models.Paragraph\n permission_required = 'meinberlin_documents.view_paragraph'\n\n\nclass DocumentDashboardExportView(DashboardExportView):\n template_name = 'meinberlin_exports/export_dashboard.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['comment_export'] = reverse(\n 'a4dashboard:document-comment-export',\n kwargs={'module_slug': self.module.slug})\n return context\n", "path": "meinberlin/apps/documents/views.py"}, {"content": "from ckeditor_uploader.fields import RichTextUploadingField\nfrom django.contrib.contenttypes.fields import GenericRelation\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils.functional import cached_property\n\nfrom adhocracy4 import transforms\nfrom adhocracy4.comments import models as comment_models\nfrom adhocracy4.models import base\nfrom adhocracy4.modules import models as module_models\n\n\nclass Chapter(module_models.Item):\n name = models.CharField(max_length=120)\n comments = GenericRelation(comment_models.Comment,\n related_query_name='chapter',\n object_id_field='object_pk')\n weight = models.PositiveIntegerField(default=0)\n\n class Meta:\n ordering = ('weight',)\n\n def __str__(self):\n return \"{}_chapter_{}\".format(str(self.module), self.pk)\n\n def get_absolute_url(self):\n return reverse('meinberlin_documents:chapter-detail',\n args=[str(self.pk)])\n\n @cached_property\n def prev(self):\n return Chapter.objects\\\n .filter(module=self.module)\\\n .filter(weight__lt=self.weight)\\\n .order_by('-weight')\\\n .first()\n\n @cached_property\n def next(self):\n return Chapter.objects\\\n .filter(module=self.module)\\\n .filter(weight__gt=self.weight)\\\n .order_by('weight')\\\n .first()\n\n\nclass Paragraph(base.TimeStampedModel):\n name = models.CharField(max_length=120, blank=True)\n text = RichTextUploadingField(config_name='image-editor')\n weight = models.PositiveIntegerField()\n chapter = models.ForeignKey(Chapter,\n on_delete=models.CASCADE,\n related_name='paragraphs')\n comments = GenericRelation(comment_models.Comment,\n related_query_name='paragraph',\n object_id_field='object_pk')\n\n class Meta:\n ordering = ('weight',)\n\n def __str__(self):\n return \"{}_paragraph_{}\".format(str(self.chapter), self.weight)\n\n def save(self, *args, **kwargs):\n self.text = transforms.clean_html_field(\n self.text, 'image-editor')\n super().save(*args, **kwargs)\n\n def get_absolute_url(self):\n return reverse('meinberlin_documents:paragraph-detail',\n args=[str(self.pk)])\n\n @property\n def creator(self):\n return self.chapter.creator\n\n @property\n def project(self):\n return self.module.project\n\n @property\n def module(self):\n return self.chapter.module\n", "path": "meinberlin/apps/documents/models.py"}], "after_files": [{"content": "from django.http import Http404\nfrom django.urls import reverse\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\n\nfrom adhocracy4.dashboard import mixins as dashboard_mixins\nfrom adhocracy4.projects.mixins import DisplayProjectOrModuleMixin\nfrom adhocracy4.projects.mixins import ProjectMixin\nfrom adhocracy4.rules import mixins as rules_mixins\nfrom meinberlin.apps.exports.views import DashboardExportView\n\nfrom . import models\n\n\nclass DocumentDashboardView(ProjectMixin,\n dashboard_mixins.DashboardBaseMixin,\n dashboard_mixins.DashboardComponentMixin,\n generic.TemplateView):\n template_name = 'meinberlin_documents/document_dashboard.html'\n permission_required = 'a4projects.change_project'\n\n def get_permission_object(self):\n return self.project\n\n\nclass ChapterDetailView(ProjectMixin,\n rules_mixins.PermissionRequiredMixin,\n generic.DetailView,\n DisplayProjectOrModuleMixin):\n model = models.Chapter\n permission_required = 'meinberlin_documents.view_chapter'\n get_context_from_object = True\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['chapter_list'] = self.chapter_list\n return context\n\n @property\n def chapter_list(self):\n return models.Chapter.objects.filter(module=self.module)\n\n @cached_property\n def extends(self):\n if self.url_name == 'module-detail':\n return 'a4modules/module_detail.html'\n if self.url_name == 'chapter-detail':\n if self.module.is_in_module_cluster:\n return 'a4modules/module_detail.html'\n return 'a4projects/project_detail.html'\n\n\nclass DocumentDetailView(ChapterDetailView):\n get_context_from_object = False\n\n def get_object(self):\n first_chapter = models.Chapter.objects \\\n .filter(module=self.module) \\\n .first()\n\n if not first_chapter:\n raise Http404(_('Document has no chapters defined.'))\n return first_chapter\n\n\nclass ParagraphDetailView(ProjectMixin,\n rules_mixins.PermissionRequiredMixin,\n generic.DetailView):\n model = models.Paragraph\n permission_required = 'meinberlin_documents.view_paragraph'\n\n\nclass DocumentDashboardExportView(DashboardExportView):\n template_name = 'meinberlin_exports/export_dashboard.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['comment_export'] = reverse(\n 'a4dashboard:document-comment-export',\n kwargs={'module_slug': self.module.slug})\n return context\n", "path": "meinberlin/apps/documents/views.py"}, {"content": "from ckeditor_uploader.fields import RichTextUploadingField\nfrom django.contrib.contenttypes.fields import GenericRelation\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils.functional import cached_property\n\nfrom adhocracy4 import transforms\nfrom adhocracy4.comments import models as comment_models\nfrom adhocracy4.models import base\nfrom adhocracy4.modules import models as module_models\n\n\nclass Chapter(module_models.Item):\n name = models.CharField(max_length=120)\n comments = GenericRelation(comment_models.Comment,\n related_query_name='chapter',\n object_id_field='object_pk')\n weight = models.PositiveIntegerField(default=0)\n\n class Meta:\n ordering = ('weight',)\n\n def __str__(self):\n return \"{}_chapter_{}\".format(str(self.module), self.pk)\n\n def get_absolute_url(self):\n if (self.project.display_timeline\n and not self.module.is_in_module_cluster):\n return ('{}?initialSlide={}'\n .format(reverse('meinberlin_documents:chapter-detail',\n args=[str(self.pk)]),\n self.module.get_timeline_index))\n return reverse('meinberlin_documents:chapter-detail',\n args=[str(self.pk)])\n\n @cached_property\n def prev(self):\n return Chapter.objects\\\n .filter(module=self.module)\\\n .filter(weight__lt=self.weight)\\\n .order_by('-weight')\\\n .first()\n\n @cached_property\n def next(self):\n return Chapter.objects\\\n .filter(module=self.module)\\\n .filter(weight__gt=self.weight)\\\n .order_by('weight')\\\n .first()\n\n\nclass Paragraph(base.TimeStampedModel):\n name = models.CharField(max_length=120, blank=True)\n text = RichTextUploadingField(config_name='image-editor')\n weight = models.PositiveIntegerField()\n chapter = models.ForeignKey(Chapter,\n on_delete=models.CASCADE,\n related_name='paragraphs')\n comments = GenericRelation(comment_models.Comment,\n related_query_name='paragraph',\n object_id_field='object_pk')\n\n class Meta:\n ordering = ('weight',)\n\n def __str__(self):\n return \"{}_paragraph_{}\".format(str(self.chapter), self.weight)\n\n def save(self, *args, **kwargs):\n self.text = transforms.clean_html_field(\n self.text, 'image-editor')\n super().save(*args, **kwargs)\n\n def get_absolute_url(self):\n return reverse('meinberlin_documents:paragraph-detail',\n args=[str(self.pk)])\n\n @property\n def creator(self):\n return self.chapter.creator\n\n @property\n def project(self):\n return self.module.project\n\n @property\n def module(self):\n return self.chapter.module\n", "path": "meinberlin/apps/documents/models.py"}]}
| 1,644 | 456 |
gh_patches_debug_32403
|
rasdani/github-patches
|
git_diff
|
microsoft__hi-ml-504
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Histo dataset mounting script does not show files permanently
Files are only visible while the script is running, but disappear once terminated.
This could be a consequence of the library updates in #455.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hi-ml-histopathology/src/histopathology/scripts/mount_azure_dataset.py`
Content:
```
1 # ------------------------------------------------------------------------------------------
2 # Copyright (c) Microsoft Corporation. All rights reserved.
3 # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 # ------------------------------------------------------------------------------------------
5 from pathlib import Path
6 import sys
7
8 himl_histo_root_dir = Path(__file__).parent.parent.parent
9 himl_root = himl_histo_root_dir.parent.parent
10 himl_azure_package_root = himl_root / "hi-ml-azure" / "src"
11 sys.path.insert(0, str(himl_azure_package_root))
12
13 from health_azure import DatasetConfig # noqa: E402
14 from health_azure.utils import get_workspace # noqa: E402
15
16
17 def mount_dataset(dataset_id: str) -> str:
18 ws = get_workspace()
19 target_folder = "/tmp/datasets/" + dataset_id
20 dataset = DatasetConfig(name=dataset_id, target_folder=target_folder, use_mounting=True)
21 dataset_mount_folder, mount_ctx = dataset.to_input_dataset_local(ws)
22 assert mount_ctx is not None # for mypy
23 mount_ctx.start()
24 return str(dataset_mount_folder)
25
26
27 if __name__ == '__main__':
28 import argparse
29 parser = argparse.ArgumentParser()
30 # Run this script as "python mount_azure_dataset.py --dataset_id TCGA-CRCk"
31 parser.add_argument('--dataset_id', type=str,
32 help='Name of the Azure dataset e.g. PANDA or TCGA-CRCk')
33 args = parser.parse_args()
34 mount_dataset(args.dataset_id)
35
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/hi-ml-histopathology/src/histopathology/scripts/mount_azure_dataset.py b/hi-ml-histopathology/src/histopathology/scripts/mount_azure_dataset.py
--- a/hi-ml-histopathology/src/histopathology/scripts/mount_azure_dataset.py
+++ b/hi-ml-histopathology/src/histopathology/scripts/mount_azure_dataset.py
@@ -4,6 +4,8 @@
# ------------------------------------------------------------------------------------------
from pathlib import Path
import sys
+import time
+from typing import Any
himl_histo_root_dir = Path(__file__).parent.parent.parent
himl_root = himl_histo_root_dir.parent.parent
@@ -14,14 +16,14 @@
from health_azure.utils import get_workspace # noqa: E402
-def mount_dataset(dataset_id: str) -> str:
+def mount_dataset(dataset_id: str) -> Any:
ws = get_workspace()
target_folder = "/tmp/datasets/" + dataset_id
dataset = DatasetConfig(name=dataset_id, target_folder=target_folder, use_mounting=True)
- dataset_mount_folder, mount_ctx = dataset.to_input_dataset_local(ws)
+ _, mount_ctx = dataset.to_input_dataset_local(ws)
assert mount_ctx is not None # for mypy
mount_ctx.start()
- return str(dataset_mount_folder)
+ return mount_ctx
if __name__ == '__main__':
@@ -31,4 +33,9 @@
parser.add_argument('--dataset_id', type=str,
help='Name of the Azure dataset e.g. PANDA or TCGA-CRCk')
args = parser.parse_args()
- mount_dataset(args.dataset_id)
+ # It is essential that the mount context is returned from the mounting function and referenced here.
+ # If not, mounting will be stopped, and the files are no longer available.
+ _ = mount_dataset(args.dataset_id)
+ print("The mounted dataset will only be available while this script is running. Press Ctrl-C to terminate it.`")
+ while True:
+ time.sleep(60)
|
{"golden_diff": "diff --git a/hi-ml-histopathology/src/histopathology/scripts/mount_azure_dataset.py b/hi-ml-histopathology/src/histopathology/scripts/mount_azure_dataset.py\n--- a/hi-ml-histopathology/src/histopathology/scripts/mount_azure_dataset.py\n+++ b/hi-ml-histopathology/src/histopathology/scripts/mount_azure_dataset.py\n@@ -4,6 +4,8 @@\n # ------------------------------------------------------------------------------------------\n from pathlib import Path\n import sys\n+import time\n+from typing import Any\n \n himl_histo_root_dir = Path(__file__).parent.parent.parent\n himl_root = himl_histo_root_dir.parent.parent\n@@ -14,14 +16,14 @@\n from health_azure.utils import get_workspace # noqa: E402\n \n \n-def mount_dataset(dataset_id: str) -> str:\n+def mount_dataset(dataset_id: str) -> Any:\n ws = get_workspace()\n target_folder = \"/tmp/datasets/\" + dataset_id\n dataset = DatasetConfig(name=dataset_id, target_folder=target_folder, use_mounting=True)\n- dataset_mount_folder, mount_ctx = dataset.to_input_dataset_local(ws)\n+ _, mount_ctx = dataset.to_input_dataset_local(ws)\n assert mount_ctx is not None # for mypy\n mount_ctx.start()\n- return str(dataset_mount_folder)\n+ return mount_ctx\n \n \n if __name__ == '__main__':\n@@ -31,4 +33,9 @@\n parser.add_argument('--dataset_id', type=str,\n help='Name of the Azure dataset e.g. PANDA or TCGA-CRCk')\n args = parser.parse_args()\n- mount_dataset(args.dataset_id)\n+ # It is essential that the mount context is returned from the mounting function and referenced here.\n+ # If not, mounting will be stopped, and the files are no longer available.\n+ _ = mount_dataset(args.dataset_id)\n+ print(\"The mounted dataset will only be available while this script is running. Press Ctrl-C to terminate it.`\")\n+ while True:\n+ time.sleep(60)\n", "issue": "Histo dataset mounting script does not show files permanently\nFiles are only visible while the script is running, but disappear once terminated. \r\nThis could be a consequence of the library updates in #455.\n", "before_files": [{"content": "# ------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.\n# ------------------------------------------------------------------------------------------\nfrom pathlib import Path\nimport sys\n\nhiml_histo_root_dir = Path(__file__).parent.parent.parent\nhiml_root = himl_histo_root_dir.parent.parent\nhiml_azure_package_root = himl_root / \"hi-ml-azure\" / \"src\"\nsys.path.insert(0, str(himl_azure_package_root))\n\nfrom health_azure import DatasetConfig # noqa: E402\nfrom health_azure.utils import get_workspace # noqa: E402\n\n\ndef mount_dataset(dataset_id: str) -> str:\n ws = get_workspace()\n target_folder = \"/tmp/datasets/\" + dataset_id\n dataset = DatasetConfig(name=dataset_id, target_folder=target_folder, use_mounting=True)\n dataset_mount_folder, mount_ctx = dataset.to_input_dataset_local(ws)\n assert mount_ctx is not None # for mypy\n mount_ctx.start()\n return str(dataset_mount_folder)\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n # Run this script as \"python mount_azure_dataset.py --dataset_id TCGA-CRCk\"\n parser.add_argument('--dataset_id', type=str,\n help='Name of the Azure dataset e.g. PANDA or TCGA-CRCk')\n args = parser.parse_args()\n mount_dataset(args.dataset_id)\n", "path": "hi-ml-histopathology/src/histopathology/scripts/mount_azure_dataset.py"}], "after_files": [{"content": "# ------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.\n# ------------------------------------------------------------------------------------------\nfrom pathlib import Path\nimport sys\nimport time\nfrom typing import Any\n\nhiml_histo_root_dir = Path(__file__).parent.parent.parent\nhiml_root = himl_histo_root_dir.parent.parent\nhiml_azure_package_root = himl_root / \"hi-ml-azure\" / \"src\"\nsys.path.insert(0, str(himl_azure_package_root))\n\nfrom health_azure import DatasetConfig # noqa: E402\nfrom health_azure.utils import get_workspace # noqa: E402\n\n\ndef mount_dataset(dataset_id: str) -> Any:\n ws = get_workspace()\n target_folder = \"/tmp/datasets/\" + dataset_id\n dataset = DatasetConfig(name=dataset_id, target_folder=target_folder, use_mounting=True)\n _, mount_ctx = dataset.to_input_dataset_local(ws)\n assert mount_ctx is not None # for mypy\n mount_ctx.start()\n return mount_ctx\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n # Run this script as \"python mount_azure_dataset.py --dataset_id TCGA-CRCk\"\n parser.add_argument('--dataset_id', type=str,\n help='Name of the Azure dataset e.g. PANDA or TCGA-CRCk')\n args = parser.parse_args()\n # It is essential that the mount context is returned from the mounting function and referenced here.\n # If not, mounting will be stopped, and the files are no longer available.\n _ = mount_dataset(args.dataset_id)\n print(\"The mounted dataset will only be available while this script is running. Press Ctrl-C to terminate it.`\")\n while True:\n time.sleep(60)\n", "path": "hi-ml-histopathology/src/histopathology/scripts/mount_azure_dataset.py"}]}
| 707 | 463 |
gh_patches_debug_22820
|
rasdani/github-patches
|
git_diff
|
bokeh__bokeh-8672
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use utf-8 encoding to open notebook file.
Currently, `ipynb` file is opened by following code:
```
with open(filename) as f:
pass
```
https://github.com/bokeh/bokeh/blob/master/bokeh/application/handlers/notebook.py#L117
I suggest to open the file with `encoding="utf-8"`, otherwise the serve command will raise error on Windows system.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bokeh/application/handlers/notebook.py`
Content:
```
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
3 # All rights reserved.
4 #
5 # The full license is in the file LICENSE.txt, distributed with this software.
6 #-----------------------------------------------------------------------------
7 ''' Provide a Bokeh Application Handler to build up documents by running
8 the code from Jupyter notebook (``.ipynb``) files.
9
10 This handler is configured with the filename of a Jupyter notebook. When a
11 Bokeh application calls ``modify_doc``, the code from all the notebook cells
12 is collected and executed to process a new Document for a session. When the
13 notebook code is executed, the Document being modified will be available as
14 ``curdoc``, and any optionally provided ``args`` will be available as
15 ``sys.argv``.
16
17 '''
18
19 #-----------------------------------------------------------------------------
20 # Boilerplate
21 #-----------------------------------------------------------------------------
22 from __future__ import absolute_import, division, print_function, unicode_literals
23
24 import re
25 import sys
26 import logging
27 log = logging.getLogger(__name__)
28
29 #-----------------------------------------------------------------------------
30 # Imports
31 #-----------------------------------------------------------------------------
32
33 # Standard library imports
34
35 # External imports
36
37 # Bokeh imports
38 from ...util.dependencies import import_required
39 from .code import CodeHandler
40
41 #-----------------------------------------------------------------------------
42 # Globals and constants
43 #-----------------------------------------------------------------------------
44
45 __all__ = (
46 'NotebookHandler',
47 )
48
49 #-----------------------------------------------------------------------------
50 # General API
51 #-----------------------------------------------------------------------------
52
53 #-----------------------------------------------------------------------------
54 # Dev API
55 #-----------------------------------------------------------------------------
56
57 class NotebookHandler(CodeHandler):
58 ''' A Handler that uses code in a Jupyter notebook for modifying Bokeh
59 Documents.
60
61 '''
62
63 _logger_text = "%s: call to %s() ignored when running notebooks with the 'bokeh' command."
64
65 _origin = "Notebook"
66
67 def __init__(self, *args, **kwargs):
68 '''
69
70 Keywords:
71 filename (str) : a path to a Jupyter notebook (".ipynb") file
72
73 '''
74 nbformat = import_required('nbformat', 'The Bokeh notebook application handler requires Jupyter Notebook to be installed.')
75 nbconvert = import_required('nbconvert', 'The Bokeh notebook application handler requires Jupyter Notebook to be installed.')
76
77 if 'filename' not in kwargs:
78 raise ValueError('Must pass a filename to NotebookHandler')
79
80
81 class StripMagicsProcessor(nbconvert.preprocessors.Preprocessor):
82 """
83 Preprocessor to convert notebooks to Python source while stripping
84 out all magics (i.e IPython specific syntax).
85 """
86
87 _magic_pattern = re.compile(r'^\s*(?P<magic>%%\w\w+)($|(\s+))')
88
89 def strip_magics(self, source):
90 """
91 Given the source of a cell, filter out all cell and line magics.
92 """
93 filtered=[]
94 for line in source.splitlines():
95 match = self._magic_pattern.match(line)
96 if match is None:
97 filtered.append(line)
98 else:
99 msg = 'Stripping out IPython magic {magic} in code cell {cell}'
100 message = msg.format(cell=self._cell_counter, magic=match.group('magic'))
101 log.warning(message)
102 return '\n'.join(filtered)
103
104 def preprocess_cell(self, cell, resources, index):
105 if cell['cell_type'] == 'code':
106 self._cell_counter += 1
107 cell['source'] = self.strip_magics(cell['source'])
108 return cell, resources
109
110 def __call__(self, nb, resources):
111 self._cell_counter = 0
112 return self.preprocess(nb,resources)
113
114 preprocessors=[StripMagicsProcessor()]
115 filename = kwargs['filename']
116
117 with open(filename) as f:
118 nb = nbformat.read(f, nbformat.NO_CONVERT)
119 exporter = nbconvert.PythonExporter()
120
121 for preprocessor in preprocessors:
122 exporter.register_preprocessor(preprocessor)
123
124 source, _ = exporter.from_notebook_node(nb)
125 source = source.replace('get_ipython().run_line_magic', '')
126 source = source.replace('get_ipython().magic', '')
127
128 if sys.version_info.major == 2 and isinstance(source, unicode): # NOQA
129 source = source.encode('utf-8')
130 kwargs['source'] = source
131
132 super(NotebookHandler, self).__init__(*args, **kwargs)
133
134 #-----------------------------------------------------------------------------
135 # Private API
136 #-----------------------------------------------------------------------------
137
138 #-----------------------------------------------------------------------------
139 # Code
140 #-----------------------------------------------------------------------------
141
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bokeh/application/handlers/notebook.py b/bokeh/application/handlers/notebook.py
--- a/bokeh/application/handlers/notebook.py
+++ b/bokeh/application/handlers/notebook.py
@@ -21,8 +21,6 @@
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
-import re
-import sys
import logging
log = logging.getLogger(__name__)
@@ -31,6 +29,9 @@
#-----------------------------------------------------------------------------
# Standard library imports
+import io
+import re
+import sys
# External imports
@@ -114,7 +115,7 @@
preprocessors=[StripMagicsProcessor()]
filename = kwargs['filename']
- with open(filename) as f:
+ with io.open(filename, encoding="utf-8") as f:
nb = nbformat.read(f, nbformat.NO_CONVERT)
exporter = nbconvert.PythonExporter()
|
{"golden_diff": "diff --git a/bokeh/application/handlers/notebook.py b/bokeh/application/handlers/notebook.py\n--- a/bokeh/application/handlers/notebook.py\n+++ b/bokeh/application/handlers/notebook.py\n@@ -21,8 +21,6 @@\n #-----------------------------------------------------------------------------\n from __future__ import absolute_import, division, print_function, unicode_literals\n \n-import re\n-import sys\n import logging\n log = logging.getLogger(__name__)\n \n@@ -31,6 +29,9 @@\n #-----------------------------------------------------------------------------\n \n # Standard library imports\n+import io\n+import re\n+import sys\n \n # External imports\n \n@@ -114,7 +115,7 @@\n preprocessors=[StripMagicsProcessor()]\n filename = kwargs['filename']\n \n- with open(filename) as f:\n+ with io.open(filename, encoding=\"utf-8\") as f:\n nb = nbformat.read(f, nbformat.NO_CONVERT)\n exporter = nbconvert.PythonExporter()\n", "issue": "Use utf-8 encoding to open notebook file.\nCurrently, `ipynb` file is opened by following code:\r\n\r\n```\r\n with open(filename) as f:\r\n pass\r\n```\r\n\r\nhttps://github.com/bokeh/bokeh/blob/master/bokeh/application/handlers/notebook.py#L117\r\n\r\nI suggest to open the file with `encoding=\"utf-8\"`, otherwise the serve command will raise error on Windows system.\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.\n# All rights reserved.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n''' Provide a Bokeh Application Handler to build up documents by running\nthe code from Jupyter notebook (``.ipynb``) files.\n\nThis handler is configured with the filename of a Jupyter notebook. When a\nBokeh application calls ``modify_doc``, the code from all the notebook cells\nis collected and executed to process a new Document for a session. When the\nnotebook code is executed, the Document being modified will be available as\n``curdoc``, and any optionally provided ``args`` will be available as\n``sys.argv``.\n\n'''\n\n#-----------------------------------------------------------------------------\n# Boilerplate\n#-----------------------------------------------------------------------------\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport re\nimport sys\nimport logging\nlog = logging.getLogger(__name__)\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Standard library imports\n\n# External imports\n\n# Bokeh imports\nfrom ...util.dependencies import import_required\nfrom .code import CodeHandler\n\n#-----------------------------------------------------------------------------\n# Globals and constants\n#-----------------------------------------------------------------------------\n\n__all__ = (\n 'NotebookHandler',\n)\n\n#-----------------------------------------------------------------------------\n# General API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\nclass NotebookHandler(CodeHandler):\n ''' A Handler that uses code in a Jupyter notebook for modifying Bokeh\n Documents.\n\n '''\n\n _logger_text = \"%s: call to %s() ignored when running notebooks with the 'bokeh' command.\"\n\n _origin = \"Notebook\"\n\n def __init__(self, *args, **kwargs):\n '''\n\n Keywords:\n filename (str) : a path to a Jupyter notebook (\".ipynb\") file\n\n '''\n nbformat = import_required('nbformat', 'The Bokeh notebook application handler requires Jupyter Notebook to be installed.')\n nbconvert = import_required('nbconvert', 'The Bokeh notebook application handler requires Jupyter Notebook to be installed.')\n\n if 'filename' not in kwargs:\n raise ValueError('Must pass a filename to NotebookHandler')\n\n\n class StripMagicsProcessor(nbconvert.preprocessors.Preprocessor):\n \"\"\"\n Preprocessor to convert notebooks to Python source while stripping\n out all magics (i.e IPython specific syntax).\n \"\"\"\n\n _magic_pattern = re.compile(r'^\\s*(?P<magic>%%\\w\\w+)($|(\\s+))')\n\n def strip_magics(self, source):\n \"\"\"\n Given the source of a cell, filter out all cell and line magics.\n \"\"\"\n filtered=[]\n for line in source.splitlines():\n match = self._magic_pattern.match(line)\n if match is None:\n filtered.append(line)\n else:\n msg = 'Stripping out IPython magic {magic} in code cell {cell}'\n message = msg.format(cell=self._cell_counter, magic=match.group('magic'))\n log.warning(message)\n return '\\n'.join(filtered)\n\n def preprocess_cell(self, cell, resources, index):\n if cell['cell_type'] == 'code':\n self._cell_counter += 1\n cell['source'] = self.strip_magics(cell['source'])\n return cell, resources\n\n def __call__(self, nb, resources):\n self._cell_counter = 0\n return self.preprocess(nb,resources)\n\n preprocessors=[StripMagicsProcessor()]\n filename = kwargs['filename']\n\n with open(filename) as f:\n nb = nbformat.read(f, nbformat.NO_CONVERT)\n exporter = nbconvert.PythonExporter()\n\n for preprocessor in preprocessors:\n exporter.register_preprocessor(preprocessor)\n\n source, _ = exporter.from_notebook_node(nb)\n source = source.replace('get_ipython().run_line_magic', '')\n source = source.replace('get_ipython().magic', '')\n\n if sys.version_info.major == 2 and isinstance(source, unicode): # NOQA\n source = source.encode('utf-8')\n kwargs['source'] = source\n\n super(NotebookHandler, self).__init__(*args, **kwargs)\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n", "path": "bokeh/application/handlers/notebook.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.\n# All rights reserved.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n''' Provide a Bokeh Application Handler to build up documents by running\nthe code from Jupyter notebook (``.ipynb``) files.\n\nThis handler is configured with the filename of a Jupyter notebook. When a\nBokeh application calls ``modify_doc``, the code from all the notebook cells\nis collected and executed to process a new Document for a session. When the\nnotebook code is executed, the Document being modified will be available as\n``curdoc``, and any optionally provided ``args`` will be available as\n``sys.argv``.\n\n'''\n\n#-----------------------------------------------------------------------------\n# Boilerplate\n#-----------------------------------------------------------------------------\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nlog = logging.getLogger(__name__)\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Standard library imports\nimport io\nimport re\nimport sys\n\n# External imports\n\n# Bokeh imports\nfrom ...util.dependencies import import_required\nfrom .code import CodeHandler\n\n#-----------------------------------------------------------------------------\n# Globals and constants\n#-----------------------------------------------------------------------------\n\n__all__ = (\n 'NotebookHandler',\n)\n\n#-----------------------------------------------------------------------------\n# General API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\nclass NotebookHandler(CodeHandler):\n ''' A Handler that uses code in a Jupyter notebook for modifying Bokeh\n Documents.\n\n '''\n\n _logger_text = \"%s: call to %s() ignored when running notebooks with the 'bokeh' command.\"\n\n _origin = \"Notebook\"\n\n def __init__(self, *args, **kwargs):\n '''\n\n Keywords:\n filename (str) : a path to a Jupyter notebook (\".ipynb\") file\n\n '''\n nbformat = import_required('nbformat', 'The Bokeh notebook application handler requires Jupyter Notebook to be installed.')\n nbconvert = import_required('nbconvert', 'The Bokeh notebook application handler requires Jupyter Notebook to be installed.')\n\n if 'filename' not in kwargs:\n raise ValueError('Must pass a filename to NotebookHandler')\n\n\n class StripMagicsProcessor(nbconvert.preprocessors.Preprocessor):\n \"\"\"\n Preprocessor to convert notebooks to Python source while stripping\n out all magics (i.e IPython specific syntax).\n \"\"\"\n\n _magic_pattern = re.compile(r'^\\s*(?P<magic>%%\\w\\w+)($|(\\s+))')\n\n def strip_magics(self, source):\n \"\"\"\n Given the source of a cell, filter out all cell and line magics.\n \"\"\"\n filtered=[]\n for line in source.splitlines():\n match = self._magic_pattern.match(line)\n if match is None:\n filtered.append(line)\n else:\n msg = 'Stripping out IPython magic {magic} in code cell {cell}'\n message = msg.format(cell=self._cell_counter, magic=match.group('magic'))\n log.warning(message)\n return '\\n'.join(filtered)\n\n def preprocess_cell(self, cell, resources, index):\n if cell['cell_type'] == 'code':\n self._cell_counter += 1\n cell['source'] = self.strip_magics(cell['source'])\n return cell, resources\n\n def __call__(self, nb, resources):\n self._cell_counter = 0\n return self.preprocess(nb,resources)\n\n preprocessors=[StripMagicsProcessor()]\n filename = kwargs['filename']\n\n with io.open(filename, encoding=\"utf-8\") as f:\n nb = nbformat.read(f, nbformat.NO_CONVERT)\n exporter = nbconvert.PythonExporter()\n\n for preprocessor in preprocessors:\n exporter.register_preprocessor(preprocessor)\n\n source, _ = exporter.from_notebook_node(nb)\n source = source.replace('get_ipython().run_line_magic', '')\n source = source.replace('get_ipython().magic', '')\n\n if sys.version_info.major == 2 and isinstance(source, unicode): # NOQA\n source = source.encode('utf-8')\n kwargs['source'] = source\n\n super(NotebookHandler, self).__init__(*args, **kwargs)\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n", "path": "bokeh/application/handlers/notebook.py"}]}
| 1,614 | 219 |
gh_patches_debug_31100
|
rasdani/github-patches
|
git_diff
|
gratipay__gratipay.com-2498
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
fix regression in receiving amounts
Stripe is giving to Bundler now, but Bundler's "receiving" doesn't reflect this, due to a regression with #2352. [IRC](https://botbot.me/freenode/gittip/2014-06-12/?msg=16141806&page=1)
cc: @indirect @gdb
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gittip/billing/__init__.py`
Content:
```
1 """This module encapsulates billing logic and db access.
2
3 There are three pieces of information for each participant related to billing:
4
5 balanced_customer_href
6 * NULL - This participant has never been billed.
7 * 'deadbeef' - This participant has had a Balanced account created for
8 them, either by adding a credit card or a bank account.
9 last_bill_result
10 * NULL - This participant has not had their credit card charged yet.
11 * '' - This participant has a working card.
12 * <message> - An error message.
13 last_ach_result
14 * NULL - This participant has not wired up a bank account yet.
15 * '' - This participant has a working bank account.
16 * <message> - An error message.
17
18 """
19 from __future__ import unicode_literals
20
21 import balanced
22 from aspen.utils import typecheck
23
24
25 def get_balanced_account(db, username, balanced_customer_href):
26 """Find or create a balanced.Account.
27 """
28 typecheck( username, unicode
29 , balanced_customer_href, (unicode, None)
30 )
31
32 if balanced_customer_href is None:
33 customer = balanced.Customer(meta={
34 'username': username,
35 }).save()
36 BALANCED_ACCOUNT = """\
37
38 UPDATE participants
39 SET balanced_customer_href=%s
40 WHERE username=%s
41
42 """
43 db.run(BALANCED_ACCOUNT, (customer.href, username))
44 else:
45 customer = balanced.Customer.fetch(balanced_customer_href)
46 return customer
47
48
49 def associate(db, thing, username, balanced_customer_href, balanced_thing_uri):
50 """Given four unicodes, return a unicode.
51
52 This function attempts to associate the credit card or bank account details
53 referenced by balanced_thing_uri with a Balanced Account. If it fails we
54 log and return a unicode describing the failure. Even for failure we keep
55 balanced_customer_href; we don't reset it to None/NULL. It's useful for
56 loading the previous (bad) info from Balanced in order to prepopulate the
57 form.
58
59 """
60 typecheck( username, unicode
61 , balanced_customer_href, (unicode, None, balanced.Customer)
62 , balanced_thing_uri, unicode
63 , thing, unicode
64 )
65
66 if isinstance(balanced_customer_href, balanced.Customer):
67 balanced_account = balanced_customer_href
68 else:
69 balanced_account = get_balanced_account( db
70 , username
71 , balanced_customer_href
72 )
73 invalidate_on_balanced(thing, balanced_account.href)
74 SQL = "UPDATE participants SET last_%s_result=%%s WHERE username=%%s"
75 try:
76 if thing == "credit card":
77 SQL %= "bill"
78 obj = balanced.Card.fetch(balanced_thing_uri)
79 #add = balanced_account.add_card
80
81 else:
82 assert thing == "bank account", thing # sanity check
83 SQL %= "ach"
84 obj = balanced.BankAccount.fetch(balanced_thing_uri)
85 #add = balanced_account.add_bank_account
86
87 obj.associate_to_customer(balanced_account)
88 except balanced.exc.HTTPError as err:
89 error = err.message.message.decode('UTF-8') # XXX UTF-8?
90 else:
91 error = ''
92 typecheck(error, unicode)
93
94 db.run(SQL, (error, username))
95 return error
96
97
98 def invalidate_on_balanced(thing, balanced_customer_href):
99 """XXX Things in balanced cannot be deleted at the moment.
100
101 Instead we mark all valid cards as invalid which will restrict against
102 anyone being able to issue charges against them in the future.
103
104 See: https://github.com/balanced/balanced-api/issues/22
105
106 """
107 assert thing in ("credit card", "bank account")
108 typecheck(balanced_customer_href, (str, unicode))
109
110 customer = balanced.Customer.fetch(balanced_customer_href)
111 things = customer.cards if thing == "credit card" else customer.bank_accounts
112
113 for _thing in things:
114 _thing.unstore()
115
116
117 def clear(db, thing, username, balanced_customer_href):
118 typecheck( thing, unicode
119 , username, unicode
120 , balanced_customer_href, (unicode, str)
121 )
122 assert thing in ("credit card", "bank account"), thing
123 invalidate_on_balanced(thing, balanced_customer_href)
124 CLEAR = """\
125
126 UPDATE participants
127 SET last_%s_result=NULL
128 WHERE username=%%s
129
130 """ % ("bill" if thing == "credit card" else "ach")
131 db.run(CLEAR, (username,))
132
133
134 def store_error(db, thing, username, msg):
135 typecheck(thing, unicode, username, unicode, msg, unicode)
136 assert thing in ("credit card", "bank account"), thing
137 ERROR = """\
138
139 UPDATE participants
140 SET last_%s_result=%%s
141 WHERE username=%%s
142
143 """ % ("bill" if thing == "credit card" else "ach")
144 db.run(ERROR, (msg, username))
145
146
147 class BalancedThing(object):
148 """Represent either a credit card or a bank account.
149 """
150
151 thing_type = None # either 'card' or 'bank_account'
152 keys_to_attr_paths = None # set to a mapping in subclasses
153
154 _customer = None # underlying balanced.Customer object
155 _thing = None # underlying balanced.{BankAccount,Card} object
156
157 def __getitem__(self, key):
158 """Given a name, return a unicode.
159
160 Allow subclasses to provide a flat set of keys, which, under the hood,
161 might be nested attributes and/or keys. The traversal path is relative
162 to _thing (not self!).
163
164 """
165 attr_path = self.keys_to_attr_paths.get(key, key)
166
167 out = None
168 if self._customer is not None and self._thing is not None:
169 out = self._thing
170 for val in attr_path.split('.'):
171 if type(out) is dict:
172 # this lets us reach into the meta dict
173 out = out.get(val)
174 else:
175 try:
176 out = getattr(out, val)
177 except AttributeError:
178 raise KeyError("{} not found".format(val))
179 if out is None:
180 break
181
182 if out is None:
183 # Default to ''; see https://github.com/gittip/www.gittip.com/issues/2161.
184 out = ''
185
186 return out
187
188 def __init__(self, balanced_customer_href):
189 """Given a Balanced account_uri, load data from Balanced.
190 """
191 if balanced_customer_href is None:
192 return
193
194 # XXX Indexing is borken. See:
195 # https://github.com/balanced/balanced-python/issues/10
196
197 self._customer = balanced.Customer.fetch(balanced_customer_href)
198
199 things = getattr(self._customer, self.thing_type+'s')\
200 .filter(is_valid=True).all()
201 nvalid = len(things)
202
203 if nvalid == 0:
204 self._thing = None
205 elif nvalid == 1:
206 self._thing = things[0]
207 else:
208 msg = "%s has %d valid %ss"
209 msg %= (balanced_customer_href, len(things), self.thing_type)
210 raise RuntimeError(msg)
211
212 @property
213 def is_setup(self):
214 return self._thing is not None
215
216
217 class BalancedCard(BalancedThing):
218 """This is a dict-like wrapper around a Balanced credit card.
219 """
220
221 thing_type = 'card'
222
223 keys_to_attr_paths = {
224 'id': 'customer.href',
225 'address_1': 'address.line1',
226 'address_2': 'meta.address_2',
227 'country': 'meta.country',
228 'city_town': 'meta.city_town',
229 'zip': 'address.postal_code',
230 # gittip is saving the state in the meta field
231 # for compatibility with legacy customers
232 'state': 'meta.region',
233 'last4': 'number',
234 'last_four': 'number',
235 'expiration_month': 'expiration_month',
236 'expiration_year': 'expiration_year',
237 }
238
239
240 class BalancedBankAccount(BalancedThing):
241 """This is a dict-like wrapper around a Balanced bank account.
242 """
243
244 thing_type = 'bank_account'
245
246 keys_to_attr_paths = {
247 'customer_href': 'customer.href',
248 }
249
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gittip/billing/__init__.py b/gittip/billing/__init__.py
--- a/gittip/billing/__init__.py
+++ b/gittip/billing/__init__.py
@@ -22,6 +22,29 @@
from aspen.utils import typecheck
+def update_receiving_amounts(db, tipper, error):
+ """Update receiving amounts of participants tipped by given tipper.
+ """
+ from gittip.models.participant import Participant
+ tipper = Participant.from_username(tipper)
+ if tipper.is_suspicious or tipper.last_bill_result == error:
+ return
+ if error == '':
+ op = '+'
+ else:
+ op = '-'
+ db.run("""
+ UPDATE participants
+ SET receiving = (receiving {0} amount)
+ FROM ( SELECT DISTINCT ON (tippee) tippee, amount
+ FROM tips
+ WHERE tipper=%(tipper)s
+ ORDER BY tippee, mtime DESC
+ ) foo
+ WHERE tippee = username;
+ """.format(op), dict(tipper=tipper))
+
+
def get_balanced_account(db, username, balanced_customer_href):
"""Find or create a balanced.Account.
"""
@@ -92,6 +115,8 @@
typecheck(error, unicode)
db.run(SQL, (error, username))
+ if thing == "credit card":
+ update_receiving_amounts(db, username, error)
return error
@@ -129,6 +154,8 @@
""" % ("bill" if thing == "credit card" else "ach")
db.run(CLEAR, (username,))
+ if thing == "credit card":
+ update_receiving_amounts(db, username, None)
def store_error(db, thing, username, msg):
@@ -142,6 +169,8 @@
""" % ("bill" if thing == "credit card" else "ach")
db.run(ERROR, (msg, username))
+ if thing == "credit card":
+ update_receiving_amounts(db, username, msg)
class BalancedThing(object):
|
{"golden_diff": "diff --git a/gittip/billing/__init__.py b/gittip/billing/__init__.py\n--- a/gittip/billing/__init__.py\n+++ b/gittip/billing/__init__.py\n@@ -22,6 +22,29 @@\n from aspen.utils import typecheck\n \n \n+def update_receiving_amounts(db, tipper, error):\n+ \"\"\"Update receiving amounts of participants tipped by given tipper.\n+ \"\"\"\n+ from gittip.models.participant import Participant\n+ tipper = Participant.from_username(tipper)\n+ if tipper.is_suspicious or tipper.last_bill_result == error:\n+ return\n+ if error == '':\n+ op = '+'\n+ else:\n+ op = '-'\n+ db.run(\"\"\"\n+ UPDATE participants\n+ SET receiving = (receiving {0} amount)\n+ FROM ( SELECT DISTINCT ON (tippee) tippee, amount\n+ FROM tips\n+ WHERE tipper=%(tipper)s\n+ ORDER BY tippee, mtime DESC\n+ ) foo\n+ WHERE tippee = username;\n+ \"\"\".format(op), dict(tipper=tipper))\n+\n+\n def get_balanced_account(db, username, balanced_customer_href):\n \"\"\"Find or create a balanced.Account.\n \"\"\"\n@@ -92,6 +115,8 @@\n typecheck(error, unicode)\n \n db.run(SQL, (error, username))\n+ if thing == \"credit card\":\n+ update_receiving_amounts(db, username, error)\n return error\n \n \n@@ -129,6 +154,8 @@\n \n \"\"\" % (\"bill\" if thing == \"credit card\" else \"ach\")\n db.run(CLEAR, (username,))\n+ if thing == \"credit card\":\n+ update_receiving_amounts(db, username, None)\n \n \n def store_error(db, thing, username, msg):\n@@ -142,6 +169,8 @@\n \n \"\"\" % (\"bill\" if thing == \"credit card\" else \"ach\")\n db.run(ERROR, (msg, username))\n+ if thing == \"credit card\":\n+ update_receiving_amounts(db, username, msg)\n \n \n class BalancedThing(object):\n", "issue": "fix regression in receiving amounts\nStripe is giving to Bundler now, but Bundler's \"receiving\" doesn't reflect this, due to a regression with #2352. [IRC](https://botbot.me/freenode/gittip/2014-06-12/?msg=16141806&page=1)\n\ncc: @indirect @gdb\n\n", "before_files": [{"content": "\"\"\"This module encapsulates billing logic and db access.\n\nThere are three pieces of information for each participant related to billing:\n\n balanced_customer_href\n * NULL - This participant has never been billed.\n * 'deadbeef' - This participant has had a Balanced account created for\n them, either by adding a credit card or a bank account.\n last_bill_result\n * NULL - This participant has not had their credit card charged yet.\n * '' - This participant has a working card.\n * <message> - An error message.\n last_ach_result\n * NULL - This participant has not wired up a bank account yet.\n * '' - This participant has a working bank account.\n * <message> - An error message.\n\n\"\"\"\nfrom __future__ import unicode_literals\n\nimport balanced\nfrom aspen.utils import typecheck\n\n\ndef get_balanced_account(db, username, balanced_customer_href):\n \"\"\"Find or create a balanced.Account.\n \"\"\"\n typecheck( username, unicode\n , balanced_customer_href, (unicode, None)\n )\n\n if balanced_customer_href is None:\n customer = balanced.Customer(meta={\n 'username': username,\n }).save()\n BALANCED_ACCOUNT = \"\"\"\\\n\n UPDATE participants\n SET balanced_customer_href=%s\n WHERE username=%s\n\n \"\"\"\n db.run(BALANCED_ACCOUNT, (customer.href, username))\n else:\n customer = balanced.Customer.fetch(balanced_customer_href)\n return customer\n\n\ndef associate(db, thing, username, balanced_customer_href, balanced_thing_uri):\n \"\"\"Given four unicodes, return a unicode.\n\n This function attempts to associate the credit card or bank account details\n referenced by balanced_thing_uri with a Balanced Account. If it fails we\n log and return a unicode describing the failure. Even for failure we keep\n balanced_customer_href; we don't reset it to None/NULL. It's useful for\n loading the previous (bad) info from Balanced in order to prepopulate the\n form.\n\n \"\"\"\n typecheck( username, unicode\n , balanced_customer_href, (unicode, None, balanced.Customer)\n , balanced_thing_uri, unicode\n , thing, unicode\n )\n\n if isinstance(balanced_customer_href, balanced.Customer):\n balanced_account = balanced_customer_href\n else:\n balanced_account = get_balanced_account( db\n , username\n , balanced_customer_href\n )\n invalidate_on_balanced(thing, balanced_account.href)\n SQL = \"UPDATE participants SET last_%s_result=%%s WHERE username=%%s\"\n try:\n if thing == \"credit card\":\n SQL %= \"bill\"\n obj = balanced.Card.fetch(balanced_thing_uri)\n #add = balanced_account.add_card\n\n else:\n assert thing == \"bank account\", thing # sanity check\n SQL %= \"ach\"\n obj = balanced.BankAccount.fetch(balanced_thing_uri)\n #add = balanced_account.add_bank_account\n\n obj.associate_to_customer(balanced_account)\n except balanced.exc.HTTPError as err:\n error = err.message.message.decode('UTF-8') # XXX UTF-8?\n else:\n error = ''\n typecheck(error, unicode)\n\n db.run(SQL, (error, username))\n return error\n\n\ndef invalidate_on_balanced(thing, balanced_customer_href):\n \"\"\"XXX Things in balanced cannot be deleted at the moment.\n\n Instead we mark all valid cards as invalid which will restrict against\n anyone being able to issue charges against them in the future.\n\n See: https://github.com/balanced/balanced-api/issues/22\n\n \"\"\"\n assert thing in (\"credit card\", \"bank account\")\n typecheck(balanced_customer_href, (str, unicode))\n\n customer = balanced.Customer.fetch(balanced_customer_href)\n things = customer.cards if thing == \"credit card\" else customer.bank_accounts\n\n for _thing in things:\n _thing.unstore()\n\n\ndef clear(db, thing, username, balanced_customer_href):\n typecheck( thing, unicode\n , username, unicode\n , balanced_customer_href, (unicode, str)\n )\n assert thing in (\"credit card\", \"bank account\"), thing\n invalidate_on_balanced(thing, balanced_customer_href)\n CLEAR = \"\"\"\\\n\n UPDATE participants\n SET last_%s_result=NULL\n WHERE username=%%s\n\n \"\"\" % (\"bill\" if thing == \"credit card\" else \"ach\")\n db.run(CLEAR, (username,))\n\n\ndef store_error(db, thing, username, msg):\n typecheck(thing, unicode, username, unicode, msg, unicode)\n assert thing in (\"credit card\", \"bank account\"), thing\n ERROR = \"\"\"\\\n\n UPDATE participants\n SET last_%s_result=%%s\n WHERE username=%%s\n\n \"\"\" % (\"bill\" if thing == \"credit card\" else \"ach\")\n db.run(ERROR, (msg, username))\n\n\nclass BalancedThing(object):\n \"\"\"Represent either a credit card or a bank account.\n \"\"\"\n\n thing_type = None # either 'card' or 'bank_account'\n keys_to_attr_paths = None # set to a mapping in subclasses\n\n _customer = None # underlying balanced.Customer object\n _thing = None # underlying balanced.{BankAccount,Card} object\n\n def __getitem__(self, key):\n \"\"\"Given a name, return a unicode.\n\n Allow subclasses to provide a flat set of keys, which, under the hood,\n might be nested attributes and/or keys. The traversal path is relative\n to _thing (not self!).\n\n \"\"\"\n attr_path = self.keys_to_attr_paths.get(key, key)\n\n out = None\n if self._customer is not None and self._thing is not None:\n out = self._thing\n for val in attr_path.split('.'):\n if type(out) is dict:\n # this lets us reach into the meta dict\n out = out.get(val)\n else:\n try:\n out = getattr(out, val)\n except AttributeError:\n raise KeyError(\"{} not found\".format(val))\n if out is None:\n break\n\n if out is None:\n # Default to ''; see https://github.com/gittip/www.gittip.com/issues/2161.\n out = ''\n\n return out\n\n def __init__(self, balanced_customer_href):\n \"\"\"Given a Balanced account_uri, load data from Balanced.\n \"\"\"\n if balanced_customer_href is None:\n return\n\n # XXX Indexing is borken. See:\n # https://github.com/balanced/balanced-python/issues/10\n\n self._customer = balanced.Customer.fetch(balanced_customer_href)\n\n things = getattr(self._customer, self.thing_type+'s')\\\n .filter(is_valid=True).all()\n nvalid = len(things)\n\n if nvalid == 0:\n self._thing = None\n elif nvalid == 1:\n self._thing = things[0]\n else:\n msg = \"%s has %d valid %ss\"\n msg %= (balanced_customer_href, len(things), self.thing_type)\n raise RuntimeError(msg)\n\n @property\n def is_setup(self):\n return self._thing is not None\n\n\nclass BalancedCard(BalancedThing):\n \"\"\"This is a dict-like wrapper around a Balanced credit card.\n \"\"\"\n\n thing_type = 'card'\n\n keys_to_attr_paths = {\n 'id': 'customer.href',\n 'address_1': 'address.line1',\n 'address_2': 'meta.address_2',\n 'country': 'meta.country',\n 'city_town': 'meta.city_town',\n 'zip': 'address.postal_code',\n # gittip is saving the state in the meta field\n # for compatibility with legacy customers\n 'state': 'meta.region',\n 'last4': 'number',\n 'last_four': 'number',\n 'expiration_month': 'expiration_month',\n 'expiration_year': 'expiration_year',\n }\n\n\nclass BalancedBankAccount(BalancedThing):\n \"\"\"This is a dict-like wrapper around a Balanced bank account.\n \"\"\"\n\n thing_type = 'bank_account'\n\n keys_to_attr_paths = {\n 'customer_href': 'customer.href',\n }\n", "path": "gittip/billing/__init__.py"}], "after_files": [{"content": "\"\"\"This module encapsulates billing logic and db access.\n\nThere are three pieces of information for each participant related to billing:\n\n balanced_customer_href\n * NULL - This participant has never been billed.\n * 'deadbeef' - This participant has had a Balanced account created for\n them, either by adding a credit card or a bank account.\n last_bill_result\n * NULL - This participant has not had their credit card charged yet.\n * '' - This participant has a working card.\n * <message> - An error message.\n last_ach_result\n * NULL - This participant has not wired up a bank account yet.\n * '' - This participant has a working bank account.\n * <message> - An error message.\n\n\"\"\"\nfrom __future__ import unicode_literals\n\nimport balanced\nfrom aspen.utils import typecheck\n\n\ndef update_receiving_amounts(db, tipper, error):\n \"\"\"Update receiving amounts of participants tipped by given tipper.\n \"\"\"\n from gittip.models.participant import Participant\n tipper = Participant.from_username(tipper)\n if tipper.is_suspicious or tipper.last_bill_result == error:\n return\n if error == '':\n op = '+'\n else:\n op = '-'\n db.run(\"\"\"\n UPDATE participants\n SET receiving = (receiving {0} amount)\n FROM ( SELECT DISTINCT ON (tippee) tippee, amount\n FROM tips\n WHERE tipper=%(tipper)s\n ORDER BY tippee, mtime DESC\n ) foo\n WHERE tippee = username;\n \"\"\".format(op), dict(tipper=tipper))\n\n\ndef get_balanced_account(db, username, balanced_customer_href):\n \"\"\"Find or create a balanced.Account.\n \"\"\"\n typecheck( username, unicode\n , balanced_customer_href, (unicode, None)\n )\n\n if balanced_customer_href is None:\n customer = balanced.Customer(meta={\n 'username': username,\n }).save()\n BALANCED_ACCOUNT = \"\"\"\\\n\n UPDATE participants\n SET balanced_customer_href=%s\n WHERE username=%s\n\n \"\"\"\n db.run(BALANCED_ACCOUNT, (customer.href, username))\n else:\n customer = balanced.Customer.fetch(balanced_customer_href)\n return customer\n\n\ndef associate(db, thing, username, balanced_customer_href, balanced_thing_uri):\n \"\"\"Given four unicodes, return a unicode.\n\n This function attempts to associate the credit card or bank account details\n referenced by balanced_thing_uri with a Balanced Account. If it fails we\n log and return a unicode describing the failure. Even for failure we keep\n balanced_customer_href; we don't reset it to None/NULL. It's useful for\n loading the previous (bad) info from Balanced in order to prepopulate the\n form.\n\n \"\"\"\n typecheck( username, unicode\n , balanced_customer_href, (unicode, None, balanced.Customer)\n , balanced_thing_uri, unicode\n , thing, unicode\n )\n\n if isinstance(balanced_customer_href, balanced.Customer):\n balanced_account = balanced_customer_href\n else:\n balanced_account = get_balanced_account( db\n , username\n , balanced_customer_href\n )\n invalidate_on_balanced(thing, balanced_account.href)\n SQL = \"UPDATE participants SET last_%s_result=%%s WHERE username=%%s\"\n try:\n if thing == \"credit card\":\n SQL %= \"bill\"\n obj = balanced.Card.fetch(balanced_thing_uri)\n #add = balanced_account.add_card\n\n else:\n assert thing == \"bank account\", thing # sanity check\n SQL %= \"ach\"\n obj = balanced.BankAccount.fetch(balanced_thing_uri)\n #add = balanced_account.add_bank_account\n\n obj.associate_to_customer(balanced_account)\n except balanced.exc.HTTPError as err:\n error = err.message.message.decode('UTF-8') # XXX UTF-8?\n else:\n error = ''\n typecheck(error, unicode)\n\n db.run(SQL, (error, username))\n if thing == \"credit card\":\n update_receiving_amounts(db, username, error)\n return error\n\n\ndef invalidate_on_balanced(thing, balanced_customer_href):\n \"\"\"XXX Things in balanced cannot be deleted at the moment.\n\n Instead we mark all valid cards as invalid which will restrict against\n anyone being able to issue charges against them in the future.\n\n See: https://github.com/balanced/balanced-api/issues/22\n\n \"\"\"\n assert thing in (\"credit card\", \"bank account\")\n typecheck(balanced_customer_href, (str, unicode))\n\n customer = balanced.Customer.fetch(balanced_customer_href)\n things = customer.cards if thing == \"credit card\" else customer.bank_accounts\n\n for _thing in things:\n _thing.unstore()\n\n\ndef clear(db, thing, username, balanced_customer_href):\n typecheck( thing, unicode\n , username, unicode\n , balanced_customer_href, (unicode, str)\n )\n assert thing in (\"credit card\", \"bank account\"), thing\n invalidate_on_balanced(thing, balanced_customer_href)\n CLEAR = \"\"\"\\\n\n UPDATE participants\n SET last_%s_result=NULL\n WHERE username=%%s\n\n \"\"\" % (\"bill\" if thing == \"credit card\" else \"ach\")\n db.run(CLEAR, (username,))\n if thing == \"credit card\":\n update_receiving_amounts(db, username, None)\n\n\ndef store_error(db, thing, username, msg):\n typecheck(thing, unicode, username, unicode, msg, unicode)\n assert thing in (\"credit card\", \"bank account\"), thing\n ERROR = \"\"\"\\\n\n UPDATE participants\n SET last_%s_result=%%s\n WHERE username=%%s\n\n \"\"\" % (\"bill\" if thing == \"credit card\" else \"ach\")\n db.run(ERROR, (msg, username))\n if thing == \"credit card\":\n update_receiving_amounts(db, username, msg)\n\n\nclass BalancedThing(object):\n \"\"\"Represent either a credit card or a bank account.\n \"\"\"\n\n thing_type = None # either 'card' or 'bank_account'\n keys_to_attr_paths = None # set to a mapping in subclasses\n\n _customer = None # underlying balanced.Customer object\n _thing = None # underlying balanced.{BankAccount,Card} object\n\n def __getitem__(self, key):\n \"\"\"Given a name, return a unicode.\n\n Allow subclasses to provide a flat set of keys, which, under the hood,\n might be nested attributes and/or keys. The traversal path is relative\n to _thing (not self!).\n\n \"\"\"\n attr_path = self.keys_to_attr_paths.get(key, key)\n\n out = None\n if self._customer is not None and self._thing is not None:\n out = self._thing\n for val in attr_path.split('.'):\n if type(out) is dict:\n # this lets us reach into the meta dict\n out = out.get(val)\n else:\n try:\n out = getattr(out, val)\n except AttributeError:\n raise KeyError(\"{} not found\".format(val))\n if out is None:\n break\n\n if out is None:\n # Default to ''; see https://github.com/gittip/www.gittip.com/issues/2161.\n out = ''\n\n return out\n\n def __init__(self, balanced_customer_href):\n \"\"\"Given a Balanced account_uri, load data from Balanced.\n \"\"\"\n if balanced_customer_href is None:\n return\n\n # XXX Indexing is borken. See:\n # https://github.com/balanced/balanced-python/issues/10\n\n self._customer = balanced.Customer.fetch(balanced_customer_href)\n\n things = getattr(self._customer, self.thing_type+'s')\\\n .filter(is_valid=True).all()\n nvalid = len(things)\n\n if nvalid == 0:\n self._thing = None\n elif nvalid == 1:\n self._thing = things[0]\n else:\n msg = \"%s has %d valid %ss\"\n msg %= (balanced_customer_href, len(things), self.thing_type)\n raise RuntimeError(msg)\n\n @property\n def is_setup(self):\n return self._thing is not None\n\n\nclass BalancedCard(BalancedThing):\n \"\"\"This is a dict-like wrapper around a Balanced credit card.\n \"\"\"\n\n thing_type = 'card'\n\n keys_to_attr_paths = {\n 'id': 'customer.href',\n 'address_1': 'address.line1',\n 'address_2': 'meta.address_2',\n 'country': 'meta.country',\n 'city_town': 'meta.city_town',\n 'zip': 'address.postal_code',\n # gittip is saving the state in the meta field\n # for compatibility with legacy customers\n 'state': 'meta.region',\n 'last4': 'number',\n 'last_four': 'number',\n 'expiration_month': 'expiration_month',\n 'expiration_year': 'expiration_year',\n }\n\n\nclass BalancedBankAccount(BalancedThing):\n \"\"\"This is a dict-like wrapper around a Balanced bank account.\n \"\"\"\n\n thing_type = 'bank_account'\n\n keys_to_attr_paths = {\n 'customer_href': 'customer.href',\n }\n", "path": "gittip/billing/__init__.py"}]}
| 2,797 | 491 |
gh_patches_debug_6081
|
rasdani/github-patches
|
git_diff
|
CTFd__CTFd-1963
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add CSV examples for CSV Import
Add CSV examples for CSV Import
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `CTFd/utils/csv/__init__.py`
Content:
```
1 import csv
2 import json
3 from io import BytesIO, StringIO
4
5 from CTFd.models import (
6 Flags,
7 Hints,
8 Tags,
9 TeamFields,
10 Teams,
11 UserFields,
12 Users,
13 db,
14 get_class_by_tablename,
15 )
16 from CTFd.plugins.challenges import get_chal_class
17 from CTFd.utils.config import is_teams_mode, is_users_mode
18 from CTFd.utils.scores import get_standings
19
20
21 def get_dumpable_tables():
22 csv_keys = list(CSV_KEYS.keys())
23 db_keys = list(db.metadata.tables.keys())
24 tables = csv_keys + db_keys
25 table_keys = list(zip(tables, tables))
26 return table_keys
27
28
29 def dump_csv(name):
30 dump_func = CSV_KEYS.get(name)
31 if dump_func:
32 return dump_func()
33 elif get_class_by_tablename(name):
34 return dump_database_table(tablename=name)
35 else:
36 raise KeyError
37
38
39 def dump_scoreboard_csv():
40 # TODO: Add fields to scoreboard data
41 temp = StringIO()
42 writer = csv.writer(temp)
43
44 standings = get_standings()
45
46 # Get all user fields in a specific order
47 user_fields = UserFields.query.all()
48 user_field_ids = [f.id for f in user_fields]
49 user_field_names = [f.name for f in user_fields]
50
51 if is_teams_mode():
52 team_fields = TeamFields.query.all()
53 team_field_ids = [f.id for f in team_fields]
54 team_field_names = [f.name for f in team_fields]
55
56 header = (
57 [
58 "place",
59 "team",
60 "team id",
61 "score",
62 "member name",
63 "member id",
64 "member email",
65 "member score",
66 ]
67 + user_field_names
68 + team_field_names
69 )
70 writer.writerow(header)
71
72 for i, standing in enumerate(standings):
73 team = Teams.query.filter_by(id=standing.account_id).first()
74
75 # Build field entries using the order of the field values
76 team_field_entries = {f.field_id: f.value for f in team.field_entries}
77 team_field_values = [
78 team_field_entries.get(f_id, "") for f_id in team_field_ids
79 ]
80 team_row = [
81 i + 1,
82 team.name,
83 team.id,
84 standing.score,
85 "",
86 "",
87 ] + team_field_values
88
89 writer.writerow(team_row)
90
91 for member in team.members:
92 user_field_entries = {f.field_id: f.value for f in member.field_entries}
93 user_field_values = [
94 user_field_entries.get(f_id, "") for f_id in user_field_ids
95 ]
96 user_row = [
97 "",
98 "",
99 "",
100 "",
101 member.name,
102 member.id,
103 member.email,
104 member.score,
105 ] + user_field_values
106 writer.writerow(user_row)
107 elif is_users_mode():
108 header = ["place", "user", "score"] + user_field_names
109 writer.writerow(header)
110
111 for i, standing in enumerate(standings):
112 user = Users.query.filter_by(id=standing.account_id).first()
113
114 # Build field entries using the order of the field values
115 user_field_entries = {f.field_id: f.value for f in user.field_entries}
116 user_field_values = [
117 user_field_entries.get(f_id, "") for f_id in user_field_ids
118 ]
119 user_row = [i + 1, user.name, standing.score] + user_field_values
120 writer.writerow(user_row)
121
122 # In Python 3 send_file requires bytes
123 output = BytesIO()
124 output.write(temp.getvalue().encode("utf-8"))
125 output.seek(0)
126 temp.close()
127
128 return output
129
130
131 def dump_users_with_fields_csv():
132 temp = StringIO()
133 writer = csv.writer(temp)
134
135 user_fields = UserFields.query.all()
136 user_field_ids = [f.id for f in user_fields]
137 user_field_names = [f.name for f in user_fields]
138
139 header = [column.name for column in Users.__mapper__.columns] + user_field_names
140 writer.writerow(header)
141
142 responses = Users.query.all()
143
144 for curr in responses:
145 user_field_entries = {f.field_id: f.value for f in curr.field_entries}
146 user_field_values = [
147 user_field_entries.get(f_id, "") for f_id in user_field_ids
148 ]
149 user_row = [
150 getattr(curr, column.name) for column in Users.__mapper__.columns
151 ] + user_field_values
152 writer.writerow(user_row)
153
154 temp.seek(0)
155
156 # In Python 3 send_file requires bytes
157 output = BytesIO()
158 output.write(temp.getvalue().encode("utf-8"))
159 output.seek(0)
160 temp.close()
161
162 return output
163
164
165 def dump_teams_with_fields_csv():
166 temp = StringIO()
167 writer = csv.writer(temp)
168
169 team_fields = TeamFields.query.all()
170 team_field_ids = [f.id for f in team_fields]
171 team_field_names = [f.name for f in team_fields]
172
173 header = [column.name for column in Teams.__mapper__.columns] + team_field_names
174 writer.writerow(header)
175
176 responses = Teams.query.all()
177
178 for curr in responses:
179 team_field_entries = {f.field_id: f.value for f in curr.field_entries}
180 team_field_values = [
181 team_field_entries.get(f_id, "") for f_id in team_field_ids
182 ]
183
184 team_row = [
185 getattr(curr, column.name) for column in Teams.__mapper__.columns
186 ] + team_field_values
187
188 writer.writerow(team_row)
189
190 temp.seek(0)
191
192 # In Python 3 send_file requires bytes
193 output = BytesIO()
194 output.write(temp.getvalue().encode("utf-8"))
195 output.seek(0)
196 temp.close()
197
198 return output
199
200
201 def dump_database_table(tablename):
202 # TODO: It might make sense to limit dumpable tables. Config could potentially leak sensitive information.
203 model = get_class_by_tablename(tablename)
204
205 if model is None:
206 raise KeyError("Unknown database table")
207
208 temp = StringIO()
209 writer = csv.writer(temp)
210
211 header = [column.name for column in model.__mapper__.columns]
212 writer.writerow(header)
213
214 responses = model.query.all()
215
216 for curr in responses:
217 writer.writerow(
218 [getattr(curr, column.name) for column in model.__mapper__.columns]
219 )
220
221 temp.seek(0)
222
223 # In Python 3 send_file requires bytes
224 output = BytesIO()
225 output.write(temp.getvalue().encode("utf-8"))
226 output.seek(0)
227 temp.close()
228
229 return output
230
231
232 def load_users_csv(dict_reader):
233 for line in dict_reader:
234 result = Users(**line)
235 db.session.add(result)
236 db.session.commit()
237 return True
238
239
240 def load_teams_csv(dict_reader):
241 for line in dict_reader:
242 result = Teams(**line)
243 db.session.add(result)
244 db.session.commit()
245 return True
246
247
248 def load_challenges_csv(dict_reader):
249 for line in dict_reader:
250 flags = line.pop("flags", None)
251 tags = line.pop("tags", None)
252 hints = line.pop("hints", None)
253 challenge_type = line.pop("type", "standard")
254
255 # Load in custome type_data
256 type_data = json.loads(line.pop("type_data", "{}"))
257 line.update(type_data)
258
259 ChallengeClass = get_chal_class(challenge_type)
260 challenge = ChallengeClass.challenge_model(**line)
261 db.session.add(challenge)
262 db.session.commit()
263
264 if flags:
265 flags = [flag.strip() for flag in flags.split(",")]
266 for flag in flags:
267 f = Flags(type="static", challenge_id=challenge.id, content=flag,)
268 db.session.add(f)
269 db.session.commit()
270
271 if tags:
272 tags = [tag.strip() for tag in tags.split(",")]
273 for tag in tags:
274 t = Tags(challenge_id=challenge.id, value=tag,)
275 db.session.add(t)
276 db.session.commit()
277
278 if hints:
279 hints = [hint.strip() for hint in hints.split(",")]
280 for hint in hints:
281 h = Hints(challenge_id=challenge.id, content=hint,)
282 db.session.add(h)
283 db.session.commit()
284 return True
285
286
287 CSV_KEYS = {
288 "scoreboard": dump_scoreboard_csv,
289 "users+fields": dump_users_with_fields_csv,
290 "teams+fields": dump_teams_with_fields_csv,
291 }
292
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/CTFd/utils/csv/__init__.py b/CTFd/utils/csv/__init__.py
--- a/CTFd/utils/csv/__init__.py
+++ b/CTFd/utils/csv/__init__.py
@@ -252,8 +252,8 @@
hints = line.pop("hints", None)
challenge_type = line.pop("type", "standard")
- # Load in custome type_data
- type_data = json.loads(line.pop("type_data", "{}"))
+ # Load in custom type_data
+ type_data = json.loads(line.pop("type_data", "{}") or "{}")
line.update(type_data)
ChallengeClass = get_chal_class(challenge_type)
|
{"golden_diff": "diff --git a/CTFd/utils/csv/__init__.py b/CTFd/utils/csv/__init__.py\n--- a/CTFd/utils/csv/__init__.py\n+++ b/CTFd/utils/csv/__init__.py\n@@ -252,8 +252,8 @@\n hints = line.pop(\"hints\", None)\n challenge_type = line.pop(\"type\", \"standard\")\n \n- # Load in custome type_data\n- type_data = json.loads(line.pop(\"type_data\", \"{}\"))\n+ # Load in custom type_data\n+ type_data = json.loads(line.pop(\"type_data\", \"{}\") or \"{}\")\n line.update(type_data)\n \n ChallengeClass = get_chal_class(challenge_type)\n", "issue": "Add CSV examples for CSV Import\nAdd CSV examples for CSV Import\n", "before_files": [{"content": "import csv\nimport json\nfrom io import BytesIO, StringIO\n\nfrom CTFd.models import (\n Flags,\n Hints,\n Tags,\n TeamFields,\n Teams,\n UserFields,\n Users,\n db,\n get_class_by_tablename,\n)\nfrom CTFd.plugins.challenges import get_chal_class\nfrom CTFd.utils.config import is_teams_mode, is_users_mode\nfrom CTFd.utils.scores import get_standings\n\n\ndef get_dumpable_tables():\n csv_keys = list(CSV_KEYS.keys())\n db_keys = list(db.metadata.tables.keys())\n tables = csv_keys + db_keys\n table_keys = list(zip(tables, tables))\n return table_keys\n\n\ndef dump_csv(name):\n dump_func = CSV_KEYS.get(name)\n if dump_func:\n return dump_func()\n elif get_class_by_tablename(name):\n return dump_database_table(tablename=name)\n else:\n raise KeyError\n\n\ndef dump_scoreboard_csv():\n # TODO: Add fields to scoreboard data\n temp = StringIO()\n writer = csv.writer(temp)\n\n standings = get_standings()\n\n # Get all user fields in a specific order\n user_fields = UserFields.query.all()\n user_field_ids = [f.id for f in user_fields]\n user_field_names = [f.name for f in user_fields]\n\n if is_teams_mode():\n team_fields = TeamFields.query.all()\n team_field_ids = [f.id for f in team_fields]\n team_field_names = [f.name for f in team_fields]\n\n header = (\n [\n \"place\",\n \"team\",\n \"team id\",\n \"score\",\n \"member name\",\n \"member id\",\n \"member email\",\n \"member score\",\n ]\n + user_field_names\n + team_field_names\n )\n writer.writerow(header)\n\n for i, standing in enumerate(standings):\n team = Teams.query.filter_by(id=standing.account_id).first()\n\n # Build field entries using the order of the field values\n team_field_entries = {f.field_id: f.value for f in team.field_entries}\n team_field_values = [\n team_field_entries.get(f_id, \"\") for f_id in team_field_ids\n ]\n team_row = [\n i + 1,\n team.name,\n team.id,\n standing.score,\n \"\",\n \"\",\n ] + team_field_values\n\n writer.writerow(team_row)\n\n for member in team.members:\n user_field_entries = {f.field_id: f.value for f in member.field_entries}\n user_field_values = [\n user_field_entries.get(f_id, \"\") for f_id in user_field_ids\n ]\n user_row = [\n \"\",\n \"\",\n \"\",\n \"\",\n member.name,\n member.id,\n member.email,\n member.score,\n ] + user_field_values\n writer.writerow(user_row)\n elif is_users_mode():\n header = [\"place\", \"user\", \"score\"] + user_field_names\n writer.writerow(header)\n\n for i, standing in enumerate(standings):\n user = Users.query.filter_by(id=standing.account_id).first()\n\n # Build field entries using the order of the field values\n user_field_entries = {f.field_id: f.value for f in user.field_entries}\n user_field_values = [\n user_field_entries.get(f_id, \"\") for f_id in user_field_ids\n ]\n user_row = [i + 1, user.name, standing.score] + user_field_values\n writer.writerow(user_row)\n\n # In Python 3 send_file requires bytes\n output = BytesIO()\n output.write(temp.getvalue().encode(\"utf-8\"))\n output.seek(0)\n temp.close()\n\n return output\n\n\ndef dump_users_with_fields_csv():\n temp = StringIO()\n writer = csv.writer(temp)\n\n user_fields = UserFields.query.all()\n user_field_ids = [f.id for f in user_fields]\n user_field_names = [f.name for f in user_fields]\n\n header = [column.name for column in Users.__mapper__.columns] + user_field_names\n writer.writerow(header)\n\n responses = Users.query.all()\n\n for curr in responses:\n user_field_entries = {f.field_id: f.value for f in curr.field_entries}\n user_field_values = [\n user_field_entries.get(f_id, \"\") for f_id in user_field_ids\n ]\n user_row = [\n getattr(curr, column.name) for column in Users.__mapper__.columns\n ] + user_field_values\n writer.writerow(user_row)\n\n temp.seek(0)\n\n # In Python 3 send_file requires bytes\n output = BytesIO()\n output.write(temp.getvalue().encode(\"utf-8\"))\n output.seek(0)\n temp.close()\n\n return output\n\n\ndef dump_teams_with_fields_csv():\n temp = StringIO()\n writer = csv.writer(temp)\n\n team_fields = TeamFields.query.all()\n team_field_ids = [f.id for f in team_fields]\n team_field_names = [f.name for f in team_fields]\n\n header = [column.name for column in Teams.__mapper__.columns] + team_field_names\n writer.writerow(header)\n\n responses = Teams.query.all()\n\n for curr in responses:\n team_field_entries = {f.field_id: f.value for f in curr.field_entries}\n team_field_values = [\n team_field_entries.get(f_id, \"\") for f_id in team_field_ids\n ]\n\n team_row = [\n getattr(curr, column.name) for column in Teams.__mapper__.columns\n ] + team_field_values\n\n writer.writerow(team_row)\n\n temp.seek(0)\n\n # In Python 3 send_file requires bytes\n output = BytesIO()\n output.write(temp.getvalue().encode(\"utf-8\"))\n output.seek(0)\n temp.close()\n\n return output\n\n\ndef dump_database_table(tablename):\n # TODO: It might make sense to limit dumpable tables. Config could potentially leak sensitive information.\n model = get_class_by_tablename(tablename)\n\n if model is None:\n raise KeyError(\"Unknown database table\")\n\n temp = StringIO()\n writer = csv.writer(temp)\n\n header = [column.name for column in model.__mapper__.columns]\n writer.writerow(header)\n\n responses = model.query.all()\n\n for curr in responses:\n writer.writerow(\n [getattr(curr, column.name) for column in model.__mapper__.columns]\n )\n\n temp.seek(0)\n\n # In Python 3 send_file requires bytes\n output = BytesIO()\n output.write(temp.getvalue().encode(\"utf-8\"))\n output.seek(0)\n temp.close()\n\n return output\n\n\ndef load_users_csv(dict_reader):\n for line in dict_reader:\n result = Users(**line)\n db.session.add(result)\n db.session.commit()\n return True\n\n\ndef load_teams_csv(dict_reader):\n for line in dict_reader:\n result = Teams(**line)\n db.session.add(result)\n db.session.commit()\n return True\n\n\ndef load_challenges_csv(dict_reader):\n for line in dict_reader:\n flags = line.pop(\"flags\", None)\n tags = line.pop(\"tags\", None)\n hints = line.pop(\"hints\", None)\n challenge_type = line.pop(\"type\", \"standard\")\n\n # Load in custome type_data\n type_data = json.loads(line.pop(\"type_data\", \"{}\"))\n line.update(type_data)\n\n ChallengeClass = get_chal_class(challenge_type)\n challenge = ChallengeClass.challenge_model(**line)\n db.session.add(challenge)\n db.session.commit()\n\n if flags:\n flags = [flag.strip() for flag in flags.split(\",\")]\n for flag in flags:\n f = Flags(type=\"static\", challenge_id=challenge.id, content=flag,)\n db.session.add(f)\n db.session.commit()\n\n if tags:\n tags = [tag.strip() for tag in tags.split(\",\")]\n for tag in tags:\n t = Tags(challenge_id=challenge.id, value=tag,)\n db.session.add(t)\n db.session.commit()\n\n if hints:\n hints = [hint.strip() for hint in hints.split(\",\")]\n for hint in hints:\n h = Hints(challenge_id=challenge.id, content=hint,)\n db.session.add(h)\n db.session.commit()\n return True\n\n\nCSV_KEYS = {\n \"scoreboard\": dump_scoreboard_csv,\n \"users+fields\": dump_users_with_fields_csv,\n \"teams+fields\": dump_teams_with_fields_csv,\n}\n", "path": "CTFd/utils/csv/__init__.py"}], "after_files": [{"content": "import csv\nimport json\nfrom io import BytesIO, StringIO\n\nfrom CTFd.models import (\n Flags,\n Hints,\n Tags,\n TeamFields,\n Teams,\n UserFields,\n Users,\n db,\n get_class_by_tablename,\n)\nfrom CTFd.plugins.challenges import get_chal_class\nfrom CTFd.utils.config import is_teams_mode, is_users_mode\nfrom CTFd.utils.scores import get_standings\n\n\ndef get_dumpable_tables():\n csv_keys = list(CSV_KEYS.keys())\n db_keys = list(db.metadata.tables.keys())\n tables = csv_keys + db_keys\n table_keys = list(zip(tables, tables))\n return table_keys\n\n\ndef dump_csv(name):\n dump_func = CSV_KEYS.get(name)\n if dump_func:\n return dump_func()\n elif get_class_by_tablename(name):\n return dump_database_table(tablename=name)\n else:\n raise KeyError\n\n\ndef dump_scoreboard_csv():\n # TODO: Add fields to scoreboard data\n temp = StringIO()\n writer = csv.writer(temp)\n\n standings = get_standings()\n\n # Get all user fields in a specific order\n user_fields = UserFields.query.all()\n user_field_ids = [f.id for f in user_fields]\n user_field_names = [f.name for f in user_fields]\n\n if is_teams_mode():\n team_fields = TeamFields.query.all()\n team_field_ids = [f.id for f in team_fields]\n team_field_names = [f.name for f in team_fields]\n\n header = (\n [\n \"place\",\n \"team\",\n \"team id\",\n \"score\",\n \"member name\",\n \"member id\",\n \"member email\",\n \"member score\",\n ]\n + user_field_names\n + team_field_names\n )\n writer.writerow(header)\n\n for i, standing in enumerate(standings):\n team = Teams.query.filter_by(id=standing.account_id).first()\n\n # Build field entries using the order of the field values\n team_field_entries = {f.field_id: f.value for f in team.field_entries}\n team_field_values = [\n team_field_entries.get(f_id, \"\") for f_id in team_field_ids\n ]\n team_row = [\n i + 1,\n team.name,\n team.id,\n standing.score,\n \"\",\n \"\",\n ] + team_field_values\n\n writer.writerow(team_row)\n\n for member in team.members:\n user_field_entries = {f.field_id: f.value for f in member.field_entries}\n user_field_values = [\n user_field_entries.get(f_id, \"\") for f_id in user_field_ids\n ]\n user_row = [\n \"\",\n \"\",\n \"\",\n \"\",\n member.name,\n member.id,\n member.email,\n member.score,\n ] + user_field_values\n writer.writerow(user_row)\n elif is_users_mode():\n header = [\"place\", \"user\", \"score\"] + user_field_names\n writer.writerow(header)\n\n for i, standing in enumerate(standings):\n user = Users.query.filter_by(id=standing.account_id).first()\n\n # Build field entries using the order of the field values\n user_field_entries = {f.field_id: f.value for f in user.field_entries}\n user_field_values = [\n user_field_entries.get(f_id, \"\") for f_id in user_field_ids\n ]\n user_row = [i + 1, user.name, standing.score] + user_field_values\n writer.writerow(user_row)\n\n # In Python 3 send_file requires bytes\n output = BytesIO()\n output.write(temp.getvalue().encode(\"utf-8\"))\n output.seek(0)\n temp.close()\n\n return output\n\n\ndef dump_users_with_fields_csv():\n temp = StringIO()\n writer = csv.writer(temp)\n\n user_fields = UserFields.query.all()\n user_field_ids = [f.id for f in user_fields]\n user_field_names = [f.name for f in user_fields]\n\n header = [column.name for column in Users.__mapper__.columns] + user_field_names\n writer.writerow(header)\n\n responses = Users.query.all()\n\n for curr in responses:\n user_field_entries = {f.field_id: f.value for f in curr.field_entries}\n user_field_values = [\n user_field_entries.get(f_id, \"\") for f_id in user_field_ids\n ]\n user_row = [\n getattr(curr, column.name) for column in Users.__mapper__.columns\n ] + user_field_values\n writer.writerow(user_row)\n\n temp.seek(0)\n\n # In Python 3 send_file requires bytes\n output = BytesIO()\n output.write(temp.getvalue().encode(\"utf-8\"))\n output.seek(0)\n temp.close()\n\n return output\n\n\ndef dump_teams_with_fields_csv():\n temp = StringIO()\n writer = csv.writer(temp)\n\n team_fields = TeamFields.query.all()\n team_field_ids = [f.id for f in team_fields]\n team_field_names = [f.name for f in team_fields]\n\n header = [column.name for column in Teams.__mapper__.columns] + team_field_names\n writer.writerow(header)\n\n responses = Teams.query.all()\n\n for curr in responses:\n team_field_entries = {f.field_id: f.value for f in curr.field_entries}\n team_field_values = [\n team_field_entries.get(f_id, \"\") for f_id in team_field_ids\n ]\n\n team_row = [\n getattr(curr, column.name) for column in Teams.__mapper__.columns\n ] + team_field_values\n\n writer.writerow(team_row)\n\n temp.seek(0)\n\n # In Python 3 send_file requires bytes\n output = BytesIO()\n output.write(temp.getvalue().encode(\"utf-8\"))\n output.seek(0)\n temp.close()\n\n return output\n\n\ndef dump_database_table(tablename):\n # TODO: It might make sense to limit dumpable tables. Config could potentially leak sensitive information.\n model = get_class_by_tablename(tablename)\n\n if model is None:\n raise KeyError(\"Unknown database table\")\n\n temp = StringIO()\n writer = csv.writer(temp)\n\n header = [column.name for column in model.__mapper__.columns]\n writer.writerow(header)\n\n responses = model.query.all()\n\n for curr in responses:\n writer.writerow(\n [getattr(curr, column.name) for column in model.__mapper__.columns]\n )\n\n temp.seek(0)\n\n # In Python 3 send_file requires bytes\n output = BytesIO()\n output.write(temp.getvalue().encode(\"utf-8\"))\n output.seek(0)\n temp.close()\n\n return output\n\n\ndef load_users_csv(dict_reader):\n for line in dict_reader:\n result = Users(**line)\n db.session.add(result)\n db.session.commit()\n return True\n\n\ndef load_teams_csv(dict_reader):\n for line in dict_reader:\n result = Teams(**line)\n db.session.add(result)\n db.session.commit()\n return True\n\n\ndef load_challenges_csv(dict_reader):\n for line in dict_reader:\n flags = line.pop(\"flags\", None)\n tags = line.pop(\"tags\", None)\n hints = line.pop(\"hints\", None)\n challenge_type = line.pop(\"type\", \"standard\")\n\n # Load in custom type_data\n type_data = json.loads(line.pop(\"type_data\", \"{}\") or \"{}\")\n line.update(type_data)\n\n ChallengeClass = get_chal_class(challenge_type)\n challenge = ChallengeClass.challenge_model(**line)\n db.session.add(challenge)\n db.session.commit()\n\n if flags:\n flags = [flag.strip() for flag in flags.split(\",\")]\n for flag in flags:\n f = Flags(type=\"static\", challenge_id=challenge.id, content=flag,)\n db.session.add(f)\n db.session.commit()\n\n if tags:\n tags = [tag.strip() for tag in tags.split(\",\")]\n for tag in tags:\n t = Tags(challenge_id=challenge.id, value=tag,)\n db.session.add(t)\n db.session.commit()\n\n if hints:\n hints = [hint.strip() for hint in hints.split(\",\")]\n for hint in hints:\n h = Hints(challenge_id=challenge.id, content=hint,)\n db.session.add(h)\n db.session.commit()\n return True\n\n\nCSV_KEYS = {\n \"scoreboard\": dump_scoreboard_csv,\n \"users+fields\": dump_users_with_fields_csv,\n \"teams+fields\": dump_teams_with_fields_csv,\n}\n", "path": "CTFd/utils/csv/__init__.py"}]}
| 2,891 | 156 |
gh_patches_debug_22999
|
rasdani/github-patches
|
git_diff
|
cal-itp__benefits-924
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add verifier property to Events
Soon users will have multiple pathways to verify eligibility, and it would be useful to know how many users are taking which pathway(s) in our analytics. We can get ahead of this now by adding an event property with the user's selected verifier (if any).
Let's add this to the base Event definition so that we can later filter any event type by this property, similar to the Transit Agency information attached to each event.
Linking this Amplitude reference again: https://help.amplitude.com/hc/en-us/articles/115002380567-User-properties-and-event-properties
## Acceptance Criteria
<!-- Remember to consider edge cases -->
- [ ] The base Event has an `eligibility_verifier` event property that gets its value from the user's `session.verifier()`
- [ ] The Amplitude Tracking Plan is updated to accept this event property
- [ ] The Amplitude --> Warehouse --> Metabase pipeline is updated to ingest this property
## Additional context
This task was broken out from #342 and is focused on adding the verifier information to _existing_ events. #342 is now focused on introducing _new_ events for some forthcoming UI.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `benefits/core/analytics.py`
Content:
```
1 """
2 The core application: analytics implementation.
3 """
4 import itertools
5 import json
6 import logging
7 import re
8 import time
9 import uuid
10
11 from django.conf import settings
12 import requests
13
14 from benefits import VERSION
15 from . import session
16
17
18 logger = logging.getLogger(__name__)
19
20
21 class Event:
22 """Base analytics event of a given type, including attributes from request's session."""
23
24 _counter = itertools.count()
25 _domain_re = re.compile(r"^(?:https?:\/\/)?(?:[^@\n]+@)?(?:www\.)?([^:\/\n?]+)", re.IGNORECASE)
26
27 def __init__(self, request, event_type, **kwargs):
28 self.app_version = VERSION
29 # device_id is generated based on the user_id, and both are set explicitly (per session)
30 self.device_id = session.did(request)
31 self.event_properties = {}
32 self.event_type = str(event_type).lower()
33 self.insert_id = str(uuid.uuid4())
34 self.language = session.language(request)
35 # Amplitude tracks sessions using the start time as the session_id
36 self.session_id = session.start(request)
37 self.time = int(time.time() * 1000)
38 # Although Amplitude advises *against* setting user_id for anonymous users, here a value is set on anonymous
39 # users anyway, as the users never sign-in and become de-anonymized to this app / Amplitude.
40 self.user_id = session.uid(request)
41 self.user_properties = {}
42 self.__dict__.update(kwargs)
43
44 agency = session.agency(request)
45 agency_name = agency.long_name if agency else None
46
47 self.update_event_properties(path=request.path, transit_agency=agency_name)
48
49 uagent = request.headers.get("user-agent")
50
51 ref = request.headers.get("referer")
52 match = Event._domain_re.match(ref) if ref else None
53 refdom = match.group(1) if match else None
54
55 self.update_user_properties(referrer=ref, referring_domain=refdom, user_agent=uagent, transit_agency=agency_name)
56
57 # event is initialized, consume next counter
58 self.event_id = next(Event._counter)
59
60 def __str__(self):
61 return json.dumps(self.__dict__)
62
63 def update_event_properties(self, **kwargs):
64 """Merge kwargs into the self.event_properties dict."""
65 self.event_properties.update(kwargs)
66
67 def update_user_properties(self, **kwargs):
68 """Merge kwargs into the self.user_properties dict."""
69 self.user_properties.update(kwargs)
70
71
72 class ViewedPageEvent(Event):
73 """Analytics event representing a single page view."""
74
75 def __init__(self, request):
76 super().__init__(request, "viewed page")
77
78
79 class ChangedLanguageEvent(Event):
80 """Analytics event representing a change in the app's language."""
81
82 def __init__(self, request, new_lang):
83 super().__init__(request, "changed language")
84 self.update_event_properties(language=new_lang)
85
86
87 class Client:
88 """Analytics API client"""
89
90 def __init__(self, api_key):
91 self.api_key = api_key
92 self.headers = {"Accept": "*/*", "Content-type": "application/json"}
93 self.url = "https://api2.amplitude.com/2/httpapi"
94 logger.debug(f"Initialize Client for {self.url}")
95
96 def _payload(self, events):
97 if not isinstance(events, list):
98 events = [events]
99 return {"api_key": self.api_key, "events": [e.__dict__ for e in events]}
100
101 def send(self, event):
102 """Send an analytics event."""
103 if not isinstance(event, Event):
104 raise ValueError("event must be an Event instance")
105
106 if not self.api_key:
107 logger.warning(f"api_key is not configured, cannot send event: {event}")
108 return
109
110 try:
111 payload = self._payload(event)
112 logger.debug(f"Sending event payload: {payload}")
113
114 r = requests.post(self.url, headers=self.headers, json=payload)
115 if r.status_code == 200:
116 logger.debug(f"Event sent successfully: {r.json()}")
117 elif r.status_code == 400:
118 logger.error(f"Event request was invalid: {r.json()}")
119 elif r.status_code == 413:
120 logger.error(f"Event payload was too large: {r.json()}")
121 elif r.status_code == 429:
122 logger.error(f"Event contained too many requests for some users: {r.json()}")
123 else:
124 logger.error(f"Failed to send event: {r.json()}")
125
126 except Exception:
127 logger.error(f"Failed to send event: {event}")
128
129
130 client = Client(settings.ANALYTICS_KEY)
131
132
133 def send_event(event):
134 """Send an analytics event."""
135 if isinstance(event, Event):
136 client.send(event)
137 else:
138 raise ValueError("event must be an Event instance")
139
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/benefits/core/analytics.py b/benefits/core/analytics.py
--- a/benefits/core/analytics.py
+++ b/benefits/core/analytics.py
@@ -43,8 +43,10 @@
agency = session.agency(request)
agency_name = agency.long_name if agency else None
+ verifier = session.verifier(request)
+ verifier_name = verifier.name if verifier else None
- self.update_event_properties(path=request.path, transit_agency=agency_name)
+ self.update_event_properties(path=request.path, transit_agency=agency_name, eligibility_verifier=verifier_name)
uagent = request.headers.get("user-agent")
@@ -52,7 +54,13 @@
match = Event._domain_re.match(ref) if ref else None
refdom = match.group(1) if match else None
- self.update_user_properties(referrer=ref, referring_domain=refdom, user_agent=uagent, transit_agency=agency_name)
+ self.update_user_properties(
+ referrer=ref,
+ referring_domain=refdom,
+ user_agent=uagent,
+ transit_agency=agency_name,
+ eligibility_verifier=verifier_name,
+ )
# event is initialized, consume next counter
self.event_id = next(Event._counter)
|
{"golden_diff": "diff --git a/benefits/core/analytics.py b/benefits/core/analytics.py\n--- a/benefits/core/analytics.py\n+++ b/benefits/core/analytics.py\n@@ -43,8 +43,10 @@\n \n agency = session.agency(request)\n agency_name = agency.long_name if agency else None\n+ verifier = session.verifier(request)\n+ verifier_name = verifier.name if verifier else None\n \n- self.update_event_properties(path=request.path, transit_agency=agency_name)\n+ self.update_event_properties(path=request.path, transit_agency=agency_name, eligibility_verifier=verifier_name)\n \n uagent = request.headers.get(\"user-agent\")\n \n@@ -52,7 +54,13 @@\n match = Event._domain_re.match(ref) if ref else None\n refdom = match.group(1) if match else None\n \n- self.update_user_properties(referrer=ref, referring_domain=refdom, user_agent=uagent, transit_agency=agency_name)\n+ self.update_user_properties(\n+ referrer=ref,\n+ referring_domain=refdom,\n+ user_agent=uagent,\n+ transit_agency=agency_name,\n+ eligibility_verifier=verifier_name,\n+ )\n \n # event is initialized, consume next counter\n self.event_id = next(Event._counter)\n", "issue": "Add verifier property to Events\nSoon users will have multiple pathways to verify eligibility, and it would be useful to know how many users are taking which pathway(s) in our analytics. We can get ahead of this now by adding an event property with the user's selected verifier (if any).\r\n\r\nLet's add this to the base Event definition so that we can later filter any event type by this property, similar to the Transit Agency information attached to each event.\r\n\r\nLinking this Amplitude reference again: https://help.amplitude.com/hc/en-us/articles/115002380567-User-properties-and-event-properties\r\n\r\n## Acceptance Criteria\r\n\r\n<!-- Remember to consider edge cases -->\r\n\r\n- [ ] The base Event has an `eligibility_verifier` event property that gets its value from the user's `session.verifier()`\r\n- [ ] The Amplitude Tracking Plan is updated to accept this event property\r\n- [ ] The Amplitude --> Warehouse --> Metabase pipeline is updated to ingest this property\r\n\r\n## Additional context\r\n\r\nThis task was broken out from #342 and is focused on adding the verifier information to _existing_ events. #342 is now focused on introducing _new_ events for some forthcoming UI.\r\n\n", "before_files": [{"content": "\"\"\"\nThe core application: analytics implementation.\n\"\"\"\nimport itertools\nimport json\nimport logging\nimport re\nimport time\nimport uuid\n\nfrom django.conf import settings\nimport requests\n\nfrom benefits import VERSION\nfrom . import session\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Event:\n \"\"\"Base analytics event of a given type, including attributes from request's session.\"\"\"\n\n _counter = itertools.count()\n _domain_re = re.compile(r\"^(?:https?:\\/\\/)?(?:[^@\\n]+@)?(?:www\\.)?([^:\\/\\n?]+)\", re.IGNORECASE)\n\n def __init__(self, request, event_type, **kwargs):\n self.app_version = VERSION\n # device_id is generated based on the user_id, and both are set explicitly (per session)\n self.device_id = session.did(request)\n self.event_properties = {}\n self.event_type = str(event_type).lower()\n self.insert_id = str(uuid.uuid4())\n self.language = session.language(request)\n # Amplitude tracks sessions using the start time as the session_id\n self.session_id = session.start(request)\n self.time = int(time.time() * 1000)\n # Although Amplitude advises *against* setting user_id for anonymous users, here a value is set on anonymous\n # users anyway, as the users never sign-in and become de-anonymized to this app / Amplitude.\n self.user_id = session.uid(request)\n self.user_properties = {}\n self.__dict__.update(kwargs)\n\n agency = session.agency(request)\n agency_name = agency.long_name if agency else None\n\n self.update_event_properties(path=request.path, transit_agency=agency_name)\n\n uagent = request.headers.get(\"user-agent\")\n\n ref = request.headers.get(\"referer\")\n match = Event._domain_re.match(ref) if ref else None\n refdom = match.group(1) if match else None\n\n self.update_user_properties(referrer=ref, referring_domain=refdom, user_agent=uagent, transit_agency=agency_name)\n\n # event is initialized, consume next counter\n self.event_id = next(Event._counter)\n\n def __str__(self):\n return json.dumps(self.__dict__)\n\n def update_event_properties(self, **kwargs):\n \"\"\"Merge kwargs into the self.event_properties dict.\"\"\"\n self.event_properties.update(kwargs)\n\n def update_user_properties(self, **kwargs):\n \"\"\"Merge kwargs into the self.user_properties dict.\"\"\"\n self.user_properties.update(kwargs)\n\n\nclass ViewedPageEvent(Event):\n \"\"\"Analytics event representing a single page view.\"\"\"\n\n def __init__(self, request):\n super().__init__(request, \"viewed page\")\n\n\nclass ChangedLanguageEvent(Event):\n \"\"\"Analytics event representing a change in the app's language.\"\"\"\n\n def __init__(self, request, new_lang):\n super().__init__(request, \"changed language\")\n self.update_event_properties(language=new_lang)\n\n\nclass Client:\n \"\"\"Analytics API client\"\"\"\n\n def __init__(self, api_key):\n self.api_key = api_key\n self.headers = {\"Accept\": \"*/*\", \"Content-type\": \"application/json\"}\n self.url = \"https://api2.amplitude.com/2/httpapi\"\n logger.debug(f\"Initialize Client for {self.url}\")\n\n def _payload(self, events):\n if not isinstance(events, list):\n events = [events]\n return {\"api_key\": self.api_key, \"events\": [e.__dict__ for e in events]}\n\n def send(self, event):\n \"\"\"Send an analytics event.\"\"\"\n if not isinstance(event, Event):\n raise ValueError(\"event must be an Event instance\")\n\n if not self.api_key:\n logger.warning(f\"api_key is not configured, cannot send event: {event}\")\n return\n\n try:\n payload = self._payload(event)\n logger.debug(f\"Sending event payload: {payload}\")\n\n r = requests.post(self.url, headers=self.headers, json=payload)\n if r.status_code == 200:\n logger.debug(f\"Event sent successfully: {r.json()}\")\n elif r.status_code == 400:\n logger.error(f\"Event request was invalid: {r.json()}\")\n elif r.status_code == 413:\n logger.error(f\"Event payload was too large: {r.json()}\")\n elif r.status_code == 429:\n logger.error(f\"Event contained too many requests for some users: {r.json()}\")\n else:\n logger.error(f\"Failed to send event: {r.json()}\")\n\n except Exception:\n logger.error(f\"Failed to send event: {event}\")\n\n\nclient = Client(settings.ANALYTICS_KEY)\n\n\ndef send_event(event):\n \"\"\"Send an analytics event.\"\"\"\n if isinstance(event, Event):\n client.send(event)\n else:\n raise ValueError(\"event must be an Event instance\")\n", "path": "benefits/core/analytics.py"}], "after_files": [{"content": "\"\"\"\nThe core application: analytics implementation.\n\"\"\"\nimport itertools\nimport json\nimport logging\nimport re\nimport time\nimport uuid\n\nfrom django.conf import settings\nimport requests\n\nfrom benefits import VERSION\nfrom . import session\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Event:\n \"\"\"Base analytics event of a given type, including attributes from request's session.\"\"\"\n\n _counter = itertools.count()\n _domain_re = re.compile(r\"^(?:https?:\\/\\/)?(?:[^@\\n]+@)?(?:www\\.)?([^:\\/\\n?]+)\", re.IGNORECASE)\n\n def __init__(self, request, event_type, **kwargs):\n self.app_version = VERSION\n # device_id is generated based on the user_id, and both are set explicitly (per session)\n self.device_id = session.did(request)\n self.event_properties = {}\n self.event_type = str(event_type).lower()\n self.insert_id = str(uuid.uuid4())\n self.language = session.language(request)\n # Amplitude tracks sessions using the start time as the session_id\n self.session_id = session.start(request)\n self.time = int(time.time() * 1000)\n # Although Amplitude advises *against* setting user_id for anonymous users, here a value is set on anonymous\n # users anyway, as the users never sign-in and become de-anonymized to this app / Amplitude.\n self.user_id = session.uid(request)\n self.user_properties = {}\n self.__dict__.update(kwargs)\n\n agency = session.agency(request)\n agency_name = agency.long_name if agency else None\n verifier = session.verifier(request)\n verifier_name = verifier.name if verifier else None\n\n self.update_event_properties(path=request.path, transit_agency=agency_name, eligibility_verifier=verifier_name)\n\n uagent = request.headers.get(\"user-agent\")\n\n ref = request.headers.get(\"referer\")\n match = Event._domain_re.match(ref) if ref else None\n refdom = match.group(1) if match else None\n\n self.update_user_properties(\n referrer=ref,\n referring_domain=refdom,\n user_agent=uagent,\n transit_agency=agency_name,\n eligibility_verifier=verifier_name,\n )\n\n # event is initialized, consume next counter\n self.event_id = next(Event._counter)\n\n def __str__(self):\n return json.dumps(self.__dict__)\n\n def update_event_properties(self, **kwargs):\n \"\"\"Merge kwargs into the self.event_properties dict.\"\"\"\n self.event_properties.update(kwargs)\n\n def update_user_properties(self, **kwargs):\n \"\"\"Merge kwargs into the self.user_properties dict.\"\"\"\n self.user_properties.update(kwargs)\n\n\nclass ViewedPageEvent(Event):\n \"\"\"Analytics event representing a single page view.\"\"\"\n\n def __init__(self, request):\n super().__init__(request, \"viewed page\")\n\n\nclass ChangedLanguageEvent(Event):\n \"\"\"Analytics event representing a change in the app's language.\"\"\"\n\n def __init__(self, request, new_lang):\n super().__init__(request, \"changed language\")\n self.update_event_properties(language=new_lang)\n\n\nclass Client:\n \"\"\"Analytics API client\"\"\"\n\n def __init__(self, api_key):\n self.api_key = api_key\n self.headers = {\"Accept\": \"*/*\", \"Content-type\": \"application/json\"}\n self.url = \"https://api2.amplitude.com/2/httpapi\"\n logger.debug(f\"Initialize Client for {self.url}\")\n\n def _payload(self, events):\n if not isinstance(events, list):\n events = [events]\n return {\"api_key\": self.api_key, \"events\": [e.__dict__ for e in events]}\n\n def send(self, event):\n \"\"\"Send an analytics event.\"\"\"\n if not isinstance(event, Event):\n raise ValueError(\"event must be an Event instance\")\n\n if not self.api_key:\n logger.warning(f\"api_key is not configured, cannot send event: {event}\")\n return\n\n try:\n payload = self._payload(event)\n logger.debug(f\"Sending event payload: {payload}\")\n\n r = requests.post(self.url, headers=self.headers, json=payload)\n if r.status_code == 200:\n logger.debug(f\"Event sent successfully: {r.json()}\")\n elif r.status_code == 400:\n logger.error(f\"Event request was invalid: {r.json()}\")\n elif r.status_code == 413:\n logger.error(f\"Event payload was too large: {r.json()}\")\n elif r.status_code == 429:\n logger.error(f\"Event contained too many requests for some users: {r.json()}\")\n else:\n logger.error(f\"Failed to send event: {r.json()}\")\n\n except Exception:\n logger.error(f\"Failed to send event: {event}\")\n\n\nclient = Client(settings.ANALYTICS_KEY)\n\n\ndef send_event(event):\n \"\"\"Send an analytics event.\"\"\"\n if isinstance(event, Event):\n client.send(event)\n else:\n raise ValueError(\"event must be an Event instance\")\n", "path": "benefits/core/analytics.py"}]}
| 1,881 | 293 |
gh_patches_debug_9450
|
rasdani/github-patches
|
git_diff
|
celery__celery-5720
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Getting error when write a unit test using PyTest for a celery task -> pytest.PytestUnknownMarkWarning: Unknown pytest.mark.celery
I wrote the following bare minimum unit test class for celery
```python
import pytest
@pytest.fixture
def celery_config():
return {
"broker_url": "redis://localhost:6379/0",
"result_backend": "redis://localhost:6379/0"
}
@pytest.mark.celery(result_backend="redis://")
class GetHash:
def test_some(self):
pass
```
I am getting the following error when executing the test
```
test_get_hash.py:12: in <module>
@pytest.mark.celery(result_backend="redis://")
/home/work/.virtualenvs/dev_env/lib/python3.6/site-packages/_pytest/mark/structures.py:324: in __getattr__
PytestUnknownMarkWarning,
E pytest.PytestUnknownMarkWarning: Unknown pytest.mark.celery - is this a typo?
```
These are the items in the `_mark` set `structures.py` file
```python
<class 'set'>: {
'tryfirst',
'skip',
'black',
'filterwarnings',
'parametrize',
'usefixtures',
'skipif',
'xfail',
'no_cover',
'trylast'
}
```
These are the installed python libraries
```
amqp==2.5.1
anyjson==0.3.3
apipkg==1.5
appdirs==1.4.3
atomicwrites==1.3.0
attrs==19.1.0
autoflake==1.3
Babel==2.7.0
bandit==1.6.2
billiard==3.6.1.0
black==19.3b0
celery==4.3.0
Cerberus==1.3.1
certifi==2019.6.16
chardet==3.0.4
checksumdir==1.1.6
Click==7.0
coverage==4.5.3
execnet==1.6.0
Flask==1.0.2
Flask-Cors==3.0.8
flower==0.9.3
gitdb2==2.0.5
GitPython==2.1.13
idna==2.8
importlib-metadata==0.19
isort==4.3.20
itsdangerous==1.1.0
Jinja2==2.10.1
kombu==4.6.4
MarkupSafe==1.1.1
mock==3.0.5
more-itertools==7.0.0
mysql-connector-python==8.0.16
Nuitka==0.6.5
packaging==19.1
pbr==5.4.2
pluggy==0.12.0
protobuf==3.7.1
py==1.8.0
pyflakes==2.1.1
pyparsing==2.4.2
pytest==5.1.1
pytest-black==0.3.7
pytest-cov==2.7.1
pytest-forked==1.0.2
pytest-runner==5.1
pytest-xdist==1.29.0
python-dateutil==2.8.0
python-dotenv==0.10.1
pytz==2019.2
PyYAML==5.1.2
redis==3.3.8
requests==2.22.0
rq==1.1.0
six==1.12.0
smmap2==2.0.5
SQLAlchemy==1.3.3
stevedore==1.30.1
toml==0.10.0
tornado==5.1.1
urllib3==1.25.3
vine==1.3.0
wcwidth==0.1.7
Werkzeug==0.15.2
```
Is the documentation missing an additional package?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `celery/contrib/pytest.py`
Content:
```
1 """Fixtures and testing utilities for :pypi:`py.test <pytest>`."""
2 from __future__ import absolute_import, unicode_literals
3
4 import os
5 from contextlib import contextmanager
6
7 import pytest
8
9 from .testing import worker
10 from .testing.app import TestApp, setup_default_app
11
12 NO_WORKER = os.environ.get('NO_WORKER')
13
14 # pylint: disable=redefined-outer-name
15 # Well, they're called fixtures....
16
17
18 @contextmanager
19 def _create_app(enable_logging=False,
20 use_trap=False,
21 parameters=None,
22 **config):
23 # type: (Any, **Any) -> Celery
24 """Utility context used to setup Celery app for pytest fixtures."""
25 parameters = {} if not parameters else parameters
26 test_app = TestApp(
27 set_as_current=False,
28 enable_logging=enable_logging,
29 config=config,
30 **parameters
31 )
32 with setup_default_app(test_app, use_trap=use_trap):
33 yield test_app
34
35
36 @pytest.fixture(scope='session')
37 def use_celery_app_trap():
38 # type: () -> bool
39 """You can override this fixture to enable the app trap.
40
41 The app trap raises an exception whenever something attempts
42 to use the current or default apps.
43 """
44 return False
45
46
47 @pytest.fixture(scope='session')
48 def celery_session_app(request,
49 celery_config,
50 celery_parameters,
51 celery_enable_logging,
52 use_celery_app_trap):
53 # type: (Any) -> Celery
54 """Session Fixture: Return app for session fixtures."""
55 mark = request.node.get_closest_marker('celery')
56 config = dict(celery_config, **mark.kwargs if mark else {})
57 with _create_app(enable_logging=celery_enable_logging,
58 use_trap=use_celery_app_trap,
59 parameters=celery_parameters,
60 **config) as app:
61 if not use_celery_app_trap:
62 app.set_default()
63 app.set_current()
64 yield app
65
66
67 @pytest.fixture(scope='session')
68 def celery_session_worker(request,
69 celery_session_app,
70 celery_includes,
71 celery_worker_pool,
72 celery_worker_parameters):
73 # type: (Any, Celery, Sequence[str], str) -> WorkController
74 """Session Fixture: Start worker that lives throughout test suite."""
75 if not NO_WORKER:
76 for module in celery_includes:
77 celery_session_app.loader.import_task_module(module)
78 with worker.start_worker(celery_session_app,
79 pool=celery_worker_pool,
80 **celery_worker_parameters) as w:
81 yield w
82
83
84 @pytest.fixture(scope='session')
85 def celery_enable_logging():
86 # type: () -> bool
87 """You can override this fixture to enable logging."""
88 return False
89
90
91 @pytest.fixture(scope='session')
92 def celery_includes():
93 # type: () -> Sequence[str]
94 """You can override this include modules when a worker start.
95
96 You can have this return a list of module names to import,
97 these can be task modules, modules registering signals, and so on.
98 """
99 return ()
100
101
102 @pytest.fixture(scope='session')
103 def celery_worker_pool():
104 # type: () -> Union[str, Any]
105 """You can override this fixture to set the worker pool.
106
107 The "solo" pool is used by default, but you can set this to
108 return e.g. "prefork".
109 """
110 return 'solo'
111
112
113 @pytest.fixture(scope='session')
114 def celery_config():
115 # type: () -> Mapping[str, Any]
116 """Redefine this fixture to configure the test Celery app.
117
118 The config returned by your fixture will then be used
119 to configure the :func:`celery_app` fixture.
120 """
121 return {}
122
123
124 @pytest.fixture(scope='session')
125 def celery_parameters():
126 # type: () -> Mapping[str, Any]
127 """Redefine this fixture to change the init parameters of test Celery app.
128
129 The dict returned by your fixture will then be used
130 as parameters when instantiating :class:`~celery.Celery`.
131 """
132 return {}
133
134
135 @pytest.fixture(scope='session')
136 def celery_worker_parameters():
137 # type: () -> Mapping[str, Any]
138 """Redefine this fixture to change the init parameters of Celery workers.
139
140 This can be used e. g. to define queues the worker will consume tasks from.
141
142 The dict returned by your fixture will then be used
143 as parameters when instantiating :class:`~celery.worker.WorkController`.
144 """
145 return {}
146
147
148 @pytest.fixture()
149 def celery_app(request,
150 celery_config,
151 celery_parameters,
152 celery_enable_logging,
153 use_celery_app_trap):
154 """Fixture creating a Celery application instance."""
155 mark = request.node.get_closest_marker('celery')
156 config = dict(celery_config, **mark.kwargs if mark else {})
157 with _create_app(enable_logging=celery_enable_logging,
158 use_trap=use_celery_app_trap,
159 parameters=celery_parameters,
160 **config) as app:
161 yield app
162
163
164 @pytest.fixture()
165 def celery_worker(request,
166 celery_app,
167 celery_includes,
168 celery_worker_pool,
169 celery_worker_parameters):
170 # type: (Any, Celery, Sequence[str], str) -> WorkController
171 """Fixture: Start worker in a thread, stop it when the test returns."""
172 if not NO_WORKER:
173 for module in celery_includes:
174 celery_app.loader.import_task_module(module)
175 with worker.start_worker(celery_app,
176 pool=celery_worker_pool,
177 **celery_worker_parameters) as w:
178 yield w
179
180
181 @pytest.fixture()
182 def depends_on_current_app(celery_app):
183 """Fixture that sets app as current."""
184 celery_app.set_current()
185
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/celery/contrib/pytest.py b/celery/contrib/pytest.py
--- a/celery/contrib/pytest.py
+++ b/celery/contrib/pytest.py
@@ -15,6 +15,16 @@
# Well, they're called fixtures....
+def pytest_configure(config):
+ """Register additional pytest configuration."""
+ # add the pytest.mark.celery() marker registration to the pytest.ini [markers] section
+ # this prevents pytest 4.5 and newer from issueing a warning about an unknown marker
+ # and shows helpful marker documentation when running pytest --markers.
+ config.addinivalue_line(
+ "markers", "celery(**overrides): override celery configuration for a test case"
+ )
+
+
@contextmanager
def _create_app(enable_logging=False,
use_trap=False,
|
{"golden_diff": "diff --git a/celery/contrib/pytest.py b/celery/contrib/pytest.py\n--- a/celery/contrib/pytest.py\n+++ b/celery/contrib/pytest.py\n@@ -15,6 +15,16 @@\n # Well, they're called fixtures....\n \n \n+def pytest_configure(config):\n+ \"\"\"Register additional pytest configuration.\"\"\"\n+ # add the pytest.mark.celery() marker registration to the pytest.ini [markers] section\n+ # this prevents pytest 4.5 and newer from issueing a warning about an unknown marker\n+ # and shows helpful marker documentation when running pytest --markers.\n+ config.addinivalue_line(\n+ \"markers\", \"celery(**overrides): override celery configuration for a test case\"\n+ )\n+\n+\n @contextmanager\n def _create_app(enable_logging=False,\n use_trap=False,\n", "issue": "Getting error when write a unit test using PyTest for a celery task -> pytest.PytestUnknownMarkWarning: Unknown pytest.mark.celery\nI wrote the following bare minimum unit test class for celery\r\n\r\n```python\r\nimport pytest\r\n\r\n\r\[email protected]\r\ndef celery_config():\r\n return {\r\n \"broker_url\": \"redis://localhost:6379/0\",\r\n \"result_backend\": \"redis://localhost:6379/0\"\r\n }\r\n\r\n\r\[email protected](result_backend=\"redis://\")\r\nclass GetHash:\r\n\r\n def test_some(self):\r\n pass\r\n```\r\n\r\nI am getting the following error when executing the test\r\n```\r\ntest_get_hash.py:12: in <module>\r\n @pytest.mark.celery(result_backend=\"redis://\")\r\n/home/work/.virtualenvs/dev_env/lib/python3.6/site-packages/_pytest/mark/structures.py:324: in __getattr__\r\n PytestUnknownMarkWarning,\r\nE pytest.PytestUnknownMarkWarning: Unknown pytest.mark.celery - is this a typo?\r\n```\r\n\r\nThese are the items in the `_mark` set `structures.py` file \r\n```python\r\n<class 'set'>: {\r\n 'tryfirst', \r\n 'skip',\r\n 'black',\r\n 'filterwarnings',\r\n 'parametrize',\r\n 'usefixtures',\r\n 'skipif',\r\n 'xfail',\r\n 'no_cover',\r\n 'trylast'\r\n}\r\n```\r\n\r\nThese are the installed python libraries \r\n```\r\namqp==2.5.1\r\nanyjson==0.3.3\r\napipkg==1.5\r\nappdirs==1.4.3\r\natomicwrites==1.3.0\r\nattrs==19.1.0\r\nautoflake==1.3\r\nBabel==2.7.0\r\nbandit==1.6.2\r\nbilliard==3.6.1.0\r\nblack==19.3b0\r\ncelery==4.3.0\r\nCerberus==1.3.1\r\ncertifi==2019.6.16\r\nchardet==3.0.4\r\nchecksumdir==1.1.6\r\nClick==7.0\r\ncoverage==4.5.3\r\nexecnet==1.6.0\r\nFlask==1.0.2\r\nFlask-Cors==3.0.8\r\nflower==0.9.3\r\ngitdb2==2.0.5\r\nGitPython==2.1.13\r\nidna==2.8\r\nimportlib-metadata==0.19\r\nisort==4.3.20\r\nitsdangerous==1.1.0\r\nJinja2==2.10.1\r\nkombu==4.6.4\r\nMarkupSafe==1.1.1\r\nmock==3.0.5\r\nmore-itertools==7.0.0\r\nmysql-connector-python==8.0.16\r\nNuitka==0.6.5\r\npackaging==19.1\r\npbr==5.4.2\r\npluggy==0.12.0\r\nprotobuf==3.7.1\r\npy==1.8.0\r\npyflakes==2.1.1\r\npyparsing==2.4.2\r\npytest==5.1.1\r\npytest-black==0.3.7\r\npytest-cov==2.7.1\r\npytest-forked==1.0.2\r\npytest-runner==5.1\r\npytest-xdist==1.29.0\r\npython-dateutil==2.8.0\r\npython-dotenv==0.10.1\r\npytz==2019.2\r\nPyYAML==5.1.2\r\nredis==3.3.8\r\nrequests==2.22.0\r\nrq==1.1.0\r\nsix==1.12.0\r\nsmmap2==2.0.5\r\nSQLAlchemy==1.3.3\r\nstevedore==1.30.1\r\ntoml==0.10.0\r\ntornado==5.1.1\r\nurllib3==1.25.3\r\nvine==1.3.0\r\nwcwidth==0.1.7\r\nWerkzeug==0.15.2\r\n\r\n```\r\nIs the documentation missing an additional package?\n", "before_files": [{"content": "\"\"\"Fixtures and testing utilities for :pypi:`py.test <pytest>`.\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nimport os\nfrom contextlib import contextmanager\n\nimport pytest\n\nfrom .testing import worker\nfrom .testing.app import TestApp, setup_default_app\n\nNO_WORKER = os.environ.get('NO_WORKER')\n\n# pylint: disable=redefined-outer-name\n# Well, they're called fixtures....\n\n\n@contextmanager\ndef _create_app(enable_logging=False,\n use_trap=False,\n parameters=None,\n **config):\n # type: (Any, **Any) -> Celery\n \"\"\"Utility context used to setup Celery app for pytest fixtures.\"\"\"\n parameters = {} if not parameters else parameters\n test_app = TestApp(\n set_as_current=False,\n enable_logging=enable_logging,\n config=config,\n **parameters\n )\n with setup_default_app(test_app, use_trap=use_trap):\n yield test_app\n\n\[email protected](scope='session')\ndef use_celery_app_trap():\n # type: () -> bool\n \"\"\"You can override this fixture to enable the app trap.\n\n The app trap raises an exception whenever something attempts\n to use the current or default apps.\n \"\"\"\n return False\n\n\[email protected](scope='session')\ndef celery_session_app(request,\n celery_config,\n celery_parameters,\n celery_enable_logging,\n use_celery_app_trap):\n # type: (Any) -> Celery\n \"\"\"Session Fixture: Return app for session fixtures.\"\"\"\n mark = request.node.get_closest_marker('celery')\n config = dict(celery_config, **mark.kwargs if mark else {})\n with _create_app(enable_logging=celery_enable_logging,\n use_trap=use_celery_app_trap,\n parameters=celery_parameters,\n **config) as app:\n if not use_celery_app_trap:\n app.set_default()\n app.set_current()\n yield app\n\n\[email protected](scope='session')\ndef celery_session_worker(request,\n celery_session_app,\n celery_includes,\n celery_worker_pool,\n celery_worker_parameters):\n # type: (Any, Celery, Sequence[str], str) -> WorkController\n \"\"\"Session Fixture: Start worker that lives throughout test suite.\"\"\"\n if not NO_WORKER:\n for module in celery_includes:\n celery_session_app.loader.import_task_module(module)\n with worker.start_worker(celery_session_app,\n pool=celery_worker_pool,\n **celery_worker_parameters) as w:\n yield w\n\n\[email protected](scope='session')\ndef celery_enable_logging():\n # type: () -> bool\n \"\"\"You can override this fixture to enable logging.\"\"\"\n return False\n\n\[email protected](scope='session')\ndef celery_includes():\n # type: () -> Sequence[str]\n \"\"\"You can override this include modules when a worker start.\n\n You can have this return a list of module names to import,\n these can be task modules, modules registering signals, and so on.\n \"\"\"\n return ()\n\n\[email protected](scope='session')\ndef celery_worker_pool():\n # type: () -> Union[str, Any]\n \"\"\"You can override this fixture to set the worker pool.\n\n The \"solo\" pool is used by default, but you can set this to\n return e.g. \"prefork\".\n \"\"\"\n return 'solo'\n\n\[email protected](scope='session')\ndef celery_config():\n # type: () -> Mapping[str, Any]\n \"\"\"Redefine this fixture to configure the test Celery app.\n\n The config returned by your fixture will then be used\n to configure the :func:`celery_app` fixture.\n \"\"\"\n return {}\n\n\[email protected](scope='session')\ndef celery_parameters():\n # type: () -> Mapping[str, Any]\n \"\"\"Redefine this fixture to change the init parameters of test Celery app.\n\n The dict returned by your fixture will then be used\n as parameters when instantiating :class:`~celery.Celery`.\n \"\"\"\n return {}\n\n\[email protected](scope='session')\ndef celery_worker_parameters():\n # type: () -> Mapping[str, Any]\n \"\"\"Redefine this fixture to change the init parameters of Celery workers.\n\n This can be used e. g. to define queues the worker will consume tasks from.\n\n The dict returned by your fixture will then be used\n as parameters when instantiating :class:`~celery.worker.WorkController`.\n \"\"\"\n return {}\n\n\[email protected]()\ndef celery_app(request,\n celery_config,\n celery_parameters,\n celery_enable_logging,\n use_celery_app_trap):\n \"\"\"Fixture creating a Celery application instance.\"\"\"\n mark = request.node.get_closest_marker('celery')\n config = dict(celery_config, **mark.kwargs if mark else {})\n with _create_app(enable_logging=celery_enable_logging,\n use_trap=use_celery_app_trap,\n parameters=celery_parameters,\n **config) as app:\n yield app\n\n\[email protected]()\ndef celery_worker(request,\n celery_app,\n celery_includes,\n celery_worker_pool,\n celery_worker_parameters):\n # type: (Any, Celery, Sequence[str], str) -> WorkController\n \"\"\"Fixture: Start worker in a thread, stop it when the test returns.\"\"\"\n if not NO_WORKER:\n for module in celery_includes:\n celery_app.loader.import_task_module(module)\n with worker.start_worker(celery_app,\n pool=celery_worker_pool,\n **celery_worker_parameters) as w:\n yield w\n\n\[email protected]()\ndef depends_on_current_app(celery_app):\n \"\"\"Fixture that sets app as current.\"\"\"\n celery_app.set_current()\n", "path": "celery/contrib/pytest.py"}], "after_files": [{"content": "\"\"\"Fixtures and testing utilities for :pypi:`py.test <pytest>`.\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nimport os\nfrom contextlib import contextmanager\n\nimport pytest\n\nfrom .testing import worker\nfrom .testing.app import TestApp, setup_default_app\n\nNO_WORKER = os.environ.get('NO_WORKER')\n\n# pylint: disable=redefined-outer-name\n# Well, they're called fixtures....\n\n\ndef pytest_configure(config):\n \"\"\"Register additional pytest configuration.\"\"\"\n # add the pytest.mark.celery() marker registration to the pytest.ini [markers] section\n # this prevents pytest 4.5 and newer from issueing a warning about an unknown marker\n # and shows helpful marker documentation when running pytest --markers.\n config.addinivalue_line(\n \"markers\", \"celery(**overrides): override celery configuration for a test case\"\n )\n\n\n@contextmanager\ndef _create_app(enable_logging=False,\n use_trap=False,\n parameters=None,\n **config):\n # type: (Any, **Any) -> Celery\n \"\"\"Utility context used to setup Celery app for pytest fixtures.\"\"\"\n parameters = {} if not parameters else parameters\n test_app = TestApp(\n set_as_current=False,\n enable_logging=enable_logging,\n config=config,\n **parameters\n )\n with setup_default_app(test_app, use_trap=use_trap):\n yield test_app\n\n\[email protected](scope='session')\ndef use_celery_app_trap():\n # type: () -> bool\n \"\"\"You can override this fixture to enable the app trap.\n\n The app trap raises an exception whenever something attempts\n to use the current or default apps.\n \"\"\"\n return False\n\n\[email protected](scope='session')\ndef celery_session_app(request,\n celery_config,\n celery_parameters,\n celery_enable_logging,\n use_celery_app_trap):\n # type: (Any) -> Celery\n \"\"\"Session Fixture: Return app for session fixtures.\"\"\"\n mark = request.node.get_closest_marker('celery')\n config = dict(celery_config, **mark.kwargs if mark else {})\n with _create_app(enable_logging=celery_enable_logging,\n use_trap=use_celery_app_trap,\n parameters=celery_parameters,\n **config) as app:\n if not use_celery_app_trap:\n app.set_default()\n app.set_current()\n yield app\n\n\[email protected](scope='session')\ndef celery_session_worker(request,\n celery_session_app,\n celery_includes,\n celery_worker_pool,\n celery_worker_parameters):\n # type: (Any, Celery, Sequence[str], str) -> WorkController\n \"\"\"Session Fixture: Start worker that lives throughout test suite.\"\"\"\n if not NO_WORKER:\n for module in celery_includes:\n celery_session_app.loader.import_task_module(module)\n with worker.start_worker(celery_session_app,\n pool=celery_worker_pool,\n **celery_worker_parameters) as w:\n yield w\n\n\[email protected](scope='session')\ndef celery_enable_logging():\n # type: () -> bool\n \"\"\"You can override this fixture to enable logging.\"\"\"\n return False\n\n\[email protected](scope='session')\ndef celery_includes():\n # type: () -> Sequence[str]\n \"\"\"You can override this include modules when a worker start.\n\n You can have this return a list of module names to import,\n these can be task modules, modules registering signals, and so on.\n \"\"\"\n return ()\n\n\[email protected](scope='session')\ndef celery_worker_pool():\n # type: () -> Union[str, Any]\n \"\"\"You can override this fixture to set the worker pool.\n\n The \"solo\" pool is used by default, but you can set this to\n return e.g. \"prefork\".\n \"\"\"\n return 'solo'\n\n\[email protected](scope='session')\ndef celery_config():\n # type: () -> Mapping[str, Any]\n \"\"\"Redefine this fixture to configure the test Celery app.\n\n The config returned by your fixture will then be used\n to configure the :func:`celery_app` fixture.\n \"\"\"\n return {}\n\n\[email protected](scope='session')\ndef celery_parameters():\n # type: () -> Mapping[str, Any]\n \"\"\"Redefine this fixture to change the init parameters of test Celery app.\n\n The dict returned by your fixture will then be used\n as parameters when instantiating :class:`~celery.Celery`.\n \"\"\"\n return {}\n\n\[email protected](scope='session')\ndef celery_worker_parameters():\n # type: () -> Mapping[str, Any]\n \"\"\"Redefine this fixture to change the init parameters of Celery workers.\n\n This can be used e. g. to define queues the worker will consume tasks from.\n\n The dict returned by your fixture will then be used\n as parameters when instantiating :class:`~celery.worker.WorkController`.\n \"\"\"\n return {}\n\n\[email protected]()\ndef celery_app(request,\n celery_config,\n celery_parameters,\n celery_enable_logging,\n use_celery_app_trap):\n \"\"\"Fixture creating a Celery application instance.\"\"\"\n mark = request.node.get_closest_marker('celery')\n config = dict(celery_config, **mark.kwargs if mark else {})\n with _create_app(enable_logging=celery_enable_logging,\n use_trap=use_celery_app_trap,\n parameters=celery_parameters,\n **config) as app:\n yield app\n\n\[email protected]()\ndef celery_worker(request,\n celery_app,\n celery_includes,\n celery_worker_pool,\n celery_worker_parameters):\n # type: (Any, Celery, Sequence[str], str) -> WorkController\n \"\"\"Fixture: Start worker in a thread, stop it when the test returns.\"\"\"\n if not NO_WORKER:\n for module in celery_includes:\n celery_app.loader.import_task_module(module)\n with worker.start_worker(celery_app,\n pool=celery_worker_pool,\n **celery_worker_parameters) as w:\n yield w\n\n\[email protected]()\ndef depends_on_current_app(celery_app):\n \"\"\"Fixture that sets app as current.\"\"\"\n celery_app.set_current()\n", "path": "celery/contrib/pytest.py"}]}
| 2,868 | 188 |
gh_patches_debug_19565
|
rasdani/github-patches
|
git_diff
|
certbot__certbot-1591
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Log when simple-verify fails
Half #1409. When we fail to self verify a challenge, we should log the problem and continue instead of exiting.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `letsencrypt/plugins/manual.py`
Content:
```
1 """Manual plugin."""
2 import os
3 import logging
4 import pipes
5 import shutil
6 import signal
7 import socket
8 import subprocess
9 import sys
10 import tempfile
11 import time
12
13 import zope.component
14 import zope.interface
15
16 from acme import challenges
17
18 from letsencrypt import errors
19 from letsencrypt import interfaces
20 from letsencrypt.plugins import common
21
22
23 logger = logging.getLogger(__name__)
24
25
26 class Authenticator(common.Plugin):
27 """Manual Authenticator.
28
29 This plugin requires user's manual intervention in setting up a HTTP
30 server for solving http-01 challenges and thus does not need to be
31 run as a privileged process. Alternatively shows instructions on how
32 to use Python's built-in HTTP server.
33
34 .. todo:: Support for `~.challenges.TLSSNI01`.
35
36 """
37 zope.interface.implements(interfaces.IAuthenticator)
38 zope.interface.classProvides(interfaces.IPluginFactory)
39 hidden = True
40
41 description = "Manually configure an HTTP server"
42
43 MESSAGE_TEMPLATE = """\
44 Make sure your web server displays the following content at
45 {uri} before continuing:
46
47 {validation}
48
49 Content-Type header MUST be set to {ct}.
50
51 If you don't have HTTP server configured, you can run the following
52 command on the target server (as root):
53
54 {command}
55 """
56
57 # a disclaimer about your current IP being transmitted to Let's Encrypt's servers.
58 IP_DISCLAIMER = """\
59 NOTE: The IP of this machine will be publicly logged as having requested this certificate. \
60 If you're running letsencrypt in manual mode on a machine that is not your server, \
61 please ensure you're okay with that.
62
63 Are you OK with your IP being logged?
64 """
65
66 # "cd /tmp/letsencrypt" makes sure user doesn't serve /root,
67 # separate "public_html" ensures that cert.pem/key.pem are not
68 # served and makes it more obvious that Python command will serve
69 # anything recursively under the cwd
70
71 CMD_TEMPLATE = """\
72 mkdir -p {root}/public_html/{achall.URI_ROOT_PATH}
73 cd {root}/public_html
74 printf "%s" {validation} > {achall.URI_ROOT_PATH}/{encoded_token}
75 # run only once per server:
76 $(command -v python2 || command -v python2.7 || command -v python2.6) -c \\
77 "import BaseHTTPServer, SimpleHTTPServer; \\
78 SimpleHTTPServer.SimpleHTTPRequestHandler.extensions_map = {{'': '{ct}'}}; \\
79 s = BaseHTTPServer.HTTPServer(('', {port}), SimpleHTTPServer.SimpleHTTPRequestHandler); \\
80 s.serve_forever()" """
81 """Command template."""
82
83 def __init__(self, *args, **kwargs):
84 super(Authenticator, self).__init__(*args, **kwargs)
85 self._root = (tempfile.mkdtemp() if self.conf("test-mode")
86 else "/tmp/letsencrypt")
87 self._httpd = None
88
89 @classmethod
90 def add_parser_arguments(cls, add):
91 add("test-mode", action="store_true",
92 help="Test mode. Executes the manual command in subprocess.")
93
94 def prepare(self): # pylint: disable=missing-docstring,no-self-use
95 pass # pragma: no cover
96
97 def more_info(self): # pylint: disable=missing-docstring,no-self-use
98 return ("This plugin requires user's manual intervention in setting "
99 "up an HTTP server for solving http-01 challenges and thus "
100 "does not need to be run as a privileged process. "
101 "Alternatively shows instructions on how to use Python's "
102 "built-in HTTP server.")
103
104 def get_chall_pref(self, domain):
105 # pylint: disable=missing-docstring,no-self-use,unused-argument
106 return [challenges.HTTP01]
107
108 def perform(self, achalls): # pylint: disable=missing-docstring
109 responses = []
110 # TODO: group achalls by the same socket.gethostbyname(_ex)
111 # and prompt only once per server (one "echo -n" per domain)
112 for achall in achalls:
113 responses.append(self._perform_single(achall))
114 return responses
115
116 @classmethod
117 def _test_mode_busy_wait(cls, port):
118 while True:
119 time.sleep(1)
120 sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
121 try:
122 sock.connect(("localhost", port))
123 except socket.error: # pragma: no cover
124 pass
125 else:
126 break
127 finally:
128 sock.close()
129
130 def _perform_single(self, achall):
131 # same path for each challenge response would be easier for
132 # users, but will not work if multiple domains point at the
133 # same server: default command doesn't support virtual hosts
134 response, validation = achall.response_and_validation()
135
136 port = (response.port if self.config.http01_port is None
137 else int(self.config.http01_port))
138 command = self.CMD_TEMPLATE.format(
139 root=self._root, achall=achall, response=response,
140 # TODO(kuba): pipes still necessary?
141 validation=pipes.quote(validation),
142 encoded_token=achall.chall.encode("token"),
143 ct=achall.CONTENT_TYPE, port=port)
144 if self.conf("test-mode"):
145 logger.debug("Test mode. Executing the manual command: %s", command)
146 # sh shipped with OS X does't support echo -n, but supports printf
147 try:
148 self._httpd = subprocess.Popen(
149 command,
150 # don't care about setting stdout and stderr,
151 # we're in test mode anyway
152 shell=True,
153 executable=None,
154 # "preexec_fn" is UNIX specific, but so is "command"
155 preexec_fn=os.setsid)
156 except OSError as error: # ValueError should not happen!
157 logger.debug(
158 "Couldn't execute manual command: %s", error, exc_info=True)
159 return False
160 logger.debug("Manual command running as PID %s.", self._httpd.pid)
161 # give it some time to bootstrap, before we try to verify
162 # (cert generation in case of simpleHttpS might take time)
163 self._test_mode_busy_wait(port)
164 if self._httpd.poll() is not None:
165 raise errors.Error("Couldn't execute manual command")
166 else:
167 if not zope.component.getUtility(interfaces.IDisplay).yesno(
168 self.IP_DISCLAIMER, "Yes", "No"):
169 raise errors.PluginError("Must agree to IP logging to proceed")
170
171 self._notify_and_wait(self.MESSAGE_TEMPLATE.format(
172 validation=validation, response=response,
173 uri=achall.chall.uri(achall.domain),
174 ct=achall.CONTENT_TYPE, command=command))
175
176 if response.simple_verify(
177 achall.chall, achall.domain,
178 achall.account_key.public_key(), self.config.http01_port):
179 return response
180 else:
181 logger.error(
182 "Self-verify of challenge failed, authorization abandoned.")
183 if self.conf("test-mode") and self._httpd.poll() is not None:
184 # simply verify cause command failure...
185 return False
186 return None
187
188 def _notify_and_wait(self, message): # pylint: disable=no-self-use
189 # TODO: IDisplay wraps messages, breaking the command
190 #answer = zope.component.getUtility(interfaces.IDisplay).notification(
191 # message=message, height=25, pause=True)
192 sys.stdout.write(message)
193 raw_input("Press ENTER to continue")
194
195 def cleanup(self, achalls):
196 # pylint: disable=missing-docstring,no-self-use,unused-argument
197 if self.conf("test-mode"):
198 assert self._httpd is not None, (
199 "cleanup() must be called after perform()")
200 if self._httpd.poll() is None:
201 logger.debug("Terminating manual command process")
202 os.killpg(self._httpd.pid, signal.SIGTERM)
203 else:
204 logger.debug("Manual command process already terminated "
205 "with %s code", self._httpd.returncode)
206 shutil.rmtree(self._root)
207
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/letsencrypt/plugins/manual.py b/letsencrypt/plugins/manual.py
--- a/letsencrypt/plugins/manual.py
+++ b/letsencrypt/plugins/manual.py
@@ -173,17 +173,12 @@
uri=achall.chall.uri(achall.domain),
ct=achall.CONTENT_TYPE, command=command))
- if response.simple_verify(
+ if not response.simple_verify(
achall.chall, achall.domain,
achall.account_key.public_key(), self.config.http01_port):
- return response
- else:
- logger.error(
- "Self-verify of challenge failed, authorization abandoned.")
- if self.conf("test-mode") and self._httpd.poll() is not None:
- # simply verify cause command failure...
- return False
- return None
+ logger.warning("Self-verify of challenge failed.")
+
+ return response
def _notify_and_wait(self, message): # pylint: disable=no-self-use
# TODO: IDisplay wraps messages, breaking the command
|
{"golden_diff": "diff --git a/letsencrypt/plugins/manual.py b/letsencrypt/plugins/manual.py\n--- a/letsencrypt/plugins/manual.py\n+++ b/letsencrypt/plugins/manual.py\n@@ -173,17 +173,12 @@\n uri=achall.chall.uri(achall.domain),\n ct=achall.CONTENT_TYPE, command=command))\n \n- if response.simple_verify(\n+ if not response.simple_verify(\n achall.chall, achall.domain,\n achall.account_key.public_key(), self.config.http01_port):\n- return response\n- else:\n- logger.error(\n- \"Self-verify of challenge failed, authorization abandoned.\")\n- if self.conf(\"test-mode\") and self._httpd.poll() is not None:\n- # simply verify cause command failure...\n- return False\n- return None\n+ logger.warning(\"Self-verify of challenge failed.\")\n+\n+ return response\n \n def _notify_and_wait(self, message): # pylint: disable=no-self-use\n # TODO: IDisplay wraps messages, breaking the command\n", "issue": "Log when simple-verify fails\nHalf #1409. When we fail to self verify a challenge, we should log the problem and continue instead of exiting.\n\n", "before_files": [{"content": "\"\"\"Manual plugin.\"\"\"\nimport os\nimport logging\nimport pipes\nimport shutil\nimport signal\nimport socket\nimport subprocess\nimport sys\nimport tempfile\nimport time\n\nimport zope.component\nimport zope.interface\n\nfrom acme import challenges\n\nfrom letsencrypt import errors\nfrom letsencrypt import interfaces\nfrom letsencrypt.plugins import common\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Authenticator(common.Plugin):\n \"\"\"Manual Authenticator.\n\n This plugin requires user's manual intervention in setting up a HTTP\n server for solving http-01 challenges and thus does not need to be\n run as a privileged process. Alternatively shows instructions on how\n to use Python's built-in HTTP server.\n\n .. todo:: Support for `~.challenges.TLSSNI01`.\n\n \"\"\"\n zope.interface.implements(interfaces.IAuthenticator)\n zope.interface.classProvides(interfaces.IPluginFactory)\n hidden = True\n\n description = \"Manually configure an HTTP server\"\n\n MESSAGE_TEMPLATE = \"\"\"\\\nMake sure your web server displays the following content at\n{uri} before continuing:\n\n{validation}\n\nContent-Type header MUST be set to {ct}.\n\nIf you don't have HTTP server configured, you can run the following\ncommand on the target server (as root):\n\n{command}\n\"\"\"\n\n # a disclaimer about your current IP being transmitted to Let's Encrypt's servers.\n IP_DISCLAIMER = \"\"\"\\\nNOTE: The IP of this machine will be publicly logged as having requested this certificate. \\\nIf you're running letsencrypt in manual mode on a machine that is not your server, \\\nplease ensure you're okay with that.\n\nAre you OK with your IP being logged?\n\"\"\"\n\n # \"cd /tmp/letsencrypt\" makes sure user doesn't serve /root,\n # separate \"public_html\" ensures that cert.pem/key.pem are not\n # served and makes it more obvious that Python command will serve\n # anything recursively under the cwd\n\n CMD_TEMPLATE = \"\"\"\\\nmkdir -p {root}/public_html/{achall.URI_ROOT_PATH}\ncd {root}/public_html\nprintf \"%s\" {validation} > {achall.URI_ROOT_PATH}/{encoded_token}\n# run only once per server:\n$(command -v python2 || command -v python2.7 || command -v python2.6) -c \\\\\n\"import BaseHTTPServer, SimpleHTTPServer; \\\\\nSimpleHTTPServer.SimpleHTTPRequestHandler.extensions_map = {{'': '{ct}'}}; \\\\\ns = BaseHTTPServer.HTTPServer(('', {port}), SimpleHTTPServer.SimpleHTTPRequestHandler); \\\\\ns.serve_forever()\" \"\"\"\n \"\"\"Command template.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(Authenticator, self).__init__(*args, **kwargs)\n self._root = (tempfile.mkdtemp() if self.conf(\"test-mode\")\n else \"/tmp/letsencrypt\")\n self._httpd = None\n\n @classmethod\n def add_parser_arguments(cls, add):\n add(\"test-mode\", action=\"store_true\",\n help=\"Test mode. Executes the manual command in subprocess.\")\n\n def prepare(self): # pylint: disable=missing-docstring,no-self-use\n pass # pragma: no cover\n\n def more_info(self): # pylint: disable=missing-docstring,no-self-use\n return (\"This plugin requires user's manual intervention in setting \"\n \"up an HTTP server for solving http-01 challenges and thus \"\n \"does not need to be run as a privileged process. \"\n \"Alternatively shows instructions on how to use Python's \"\n \"built-in HTTP server.\")\n\n def get_chall_pref(self, domain):\n # pylint: disable=missing-docstring,no-self-use,unused-argument\n return [challenges.HTTP01]\n\n def perform(self, achalls): # pylint: disable=missing-docstring\n responses = []\n # TODO: group achalls by the same socket.gethostbyname(_ex)\n # and prompt only once per server (one \"echo -n\" per domain)\n for achall in achalls:\n responses.append(self._perform_single(achall))\n return responses\n\n @classmethod\n def _test_mode_busy_wait(cls, port):\n while True:\n time.sleep(1)\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n sock.connect((\"localhost\", port))\n except socket.error: # pragma: no cover\n pass\n else:\n break\n finally:\n sock.close()\n\n def _perform_single(self, achall):\n # same path for each challenge response would be easier for\n # users, but will not work if multiple domains point at the\n # same server: default command doesn't support virtual hosts\n response, validation = achall.response_and_validation()\n\n port = (response.port if self.config.http01_port is None\n else int(self.config.http01_port))\n command = self.CMD_TEMPLATE.format(\n root=self._root, achall=achall, response=response,\n # TODO(kuba): pipes still necessary?\n validation=pipes.quote(validation),\n encoded_token=achall.chall.encode(\"token\"),\n ct=achall.CONTENT_TYPE, port=port)\n if self.conf(\"test-mode\"):\n logger.debug(\"Test mode. Executing the manual command: %s\", command)\n # sh shipped with OS X does't support echo -n, but supports printf\n try:\n self._httpd = subprocess.Popen(\n command,\n # don't care about setting stdout and stderr,\n # we're in test mode anyway\n shell=True,\n executable=None,\n # \"preexec_fn\" is UNIX specific, but so is \"command\"\n preexec_fn=os.setsid)\n except OSError as error: # ValueError should not happen!\n logger.debug(\n \"Couldn't execute manual command: %s\", error, exc_info=True)\n return False\n logger.debug(\"Manual command running as PID %s.\", self._httpd.pid)\n # give it some time to bootstrap, before we try to verify\n # (cert generation in case of simpleHttpS might take time)\n self._test_mode_busy_wait(port)\n if self._httpd.poll() is not None:\n raise errors.Error(\"Couldn't execute manual command\")\n else:\n if not zope.component.getUtility(interfaces.IDisplay).yesno(\n self.IP_DISCLAIMER, \"Yes\", \"No\"):\n raise errors.PluginError(\"Must agree to IP logging to proceed\")\n\n self._notify_and_wait(self.MESSAGE_TEMPLATE.format(\n validation=validation, response=response,\n uri=achall.chall.uri(achall.domain),\n ct=achall.CONTENT_TYPE, command=command))\n\n if response.simple_verify(\n achall.chall, achall.domain,\n achall.account_key.public_key(), self.config.http01_port):\n return response\n else:\n logger.error(\n \"Self-verify of challenge failed, authorization abandoned.\")\n if self.conf(\"test-mode\") and self._httpd.poll() is not None:\n # simply verify cause command failure...\n return False\n return None\n\n def _notify_and_wait(self, message): # pylint: disable=no-self-use\n # TODO: IDisplay wraps messages, breaking the command\n #answer = zope.component.getUtility(interfaces.IDisplay).notification(\n # message=message, height=25, pause=True)\n sys.stdout.write(message)\n raw_input(\"Press ENTER to continue\")\n\n def cleanup(self, achalls):\n # pylint: disable=missing-docstring,no-self-use,unused-argument\n if self.conf(\"test-mode\"):\n assert self._httpd is not None, (\n \"cleanup() must be called after perform()\")\n if self._httpd.poll() is None:\n logger.debug(\"Terminating manual command process\")\n os.killpg(self._httpd.pid, signal.SIGTERM)\n else:\n logger.debug(\"Manual command process already terminated \"\n \"with %s code\", self._httpd.returncode)\n shutil.rmtree(self._root)\n", "path": "letsencrypt/plugins/manual.py"}], "after_files": [{"content": "\"\"\"Manual plugin.\"\"\"\nimport os\nimport logging\nimport pipes\nimport shutil\nimport signal\nimport socket\nimport subprocess\nimport sys\nimport tempfile\nimport time\n\nimport zope.component\nimport zope.interface\n\nfrom acme import challenges\n\nfrom letsencrypt import errors\nfrom letsencrypt import interfaces\nfrom letsencrypt.plugins import common\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Authenticator(common.Plugin):\n \"\"\"Manual Authenticator.\n\n This plugin requires user's manual intervention in setting up a HTTP\n server for solving http-01 challenges and thus does not need to be\n run as a privileged process. Alternatively shows instructions on how\n to use Python's built-in HTTP server.\n\n .. todo:: Support for `~.challenges.TLSSNI01`.\n\n \"\"\"\n zope.interface.implements(interfaces.IAuthenticator)\n zope.interface.classProvides(interfaces.IPluginFactory)\n hidden = True\n\n description = \"Manually configure an HTTP server\"\n\n MESSAGE_TEMPLATE = \"\"\"\\\nMake sure your web server displays the following content at\n{uri} before continuing:\n\n{validation}\n\nContent-Type header MUST be set to {ct}.\n\nIf you don't have HTTP server configured, you can run the following\ncommand on the target server (as root):\n\n{command}\n\"\"\"\n\n # a disclaimer about your current IP being transmitted to Let's Encrypt's servers.\n IP_DISCLAIMER = \"\"\"\\\nNOTE: The IP of this machine will be publicly logged as having requested this certificate. \\\nIf you're running letsencrypt in manual mode on a machine that is not your server, \\\nplease ensure you're okay with that.\n\nAre you OK with your IP being logged?\n\"\"\"\n\n # \"cd /tmp/letsencrypt\" makes sure user doesn't serve /root,\n # separate \"public_html\" ensures that cert.pem/key.pem are not\n # served and makes it more obvious that Python command will serve\n # anything recursively under the cwd\n\n CMD_TEMPLATE = \"\"\"\\\nmkdir -p {root}/public_html/{achall.URI_ROOT_PATH}\ncd {root}/public_html\nprintf \"%s\" {validation} > {achall.URI_ROOT_PATH}/{encoded_token}\n# run only once per server:\n$(command -v python2 || command -v python2.7 || command -v python2.6) -c \\\\\n\"import BaseHTTPServer, SimpleHTTPServer; \\\\\nSimpleHTTPServer.SimpleHTTPRequestHandler.extensions_map = {{'': '{ct}'}}; \\\\\ns = BaseHTTPServer.HTTPServer(('', {port}), SimpleHTTPServer.SimpleHTTPRequestHandler); \\\\\ns.serve_forever()\" \"\"\"\n \"\"\"Command template.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(Authenticator, self).__init__(*args, **kwargs)\n self._root = (tempfile.mkdtemp() if self.conf(\"test-mode\")\n else \"/tmp/letsencrypt\")\n self._httpd = None\n\n @classmethod\n def add_parser_arguments(cls, add):\n add(\"test-mode\", action=\"store_true\",\n help=\"Test mode. Executes the manual command in subprocess.\")\n\n def prepare(self): # pylint: disable=missing-docstring,no-self-use\n pass # pragma: no cover\n\n def more_info(self): # pylint: disable=missing-docstring,no-self-use\n return (\"This plugin requires user's manual intervention in setting \"\n \"up an HTTP server for solving http-01 challenges and thus \"\n \"does not need to be run as a privileged process. \"\n \"Alternatively shows instructions on how to use Python's \"\n \"built-in HTTP server.\")\n\n def get_chall_pref(self, domain):\n # pylint: disable=missing-docstring,no-self-use,unused-argument\n return [challenges.HTTP01]\n\n def perform(self, achalls): # pylint: disable=missing-docstring\n responses = []\n # TODO: group achalls by the same socket.gethostbyname(_ex)\n # and prompt only once per server (one \"echo -n\" per domain)\n for achall in achalls:\n responses.append(self._perform_single(achall))\n return responses\n\n @classmethod\n def _test_mode_busy_wait(cls, port):\n while True:\n time.sleep(1)\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n sock.connect((\"localhost\", port))\n except socket.error: # pragma: no cover\n pass\n else:\n break\n finally:\n sock.close()\n\n def _perform_single(self, achall):\n # same path for each challenge response would be easier for\n # users, but will not work if multiple domains point at the\n # same server: default command doesn't support virtual hosts\n response, validation = achall.response_and_validation()\n\n port = (response.port if self.config.http01_port is None\n else int(self.config.http01_port))\n command = self.CMD_TEMPLATE.format(\n root=self._root, achall=achall, response=response,\n # TODO(kuba): pipes still necessary?\n validation=pipes.quote(validation),\n encoded_token=achall.chall.encode(\"token\"),\n ct=achall.CONTENT_TYPE, port=port)\n if self.conf(\"test-mode\"):\n logger.debug(\"Test mode. Executing the manual command: %s\", command)\n # sh shipped with OS X does't support echo -n, but supports printf\n try:\n self._httpd = subprocess.Popen(\n command,\n # don't care about setting stdout and stderr,\n # we're in test mode anyway\n shell=True,\n executable=None,\n # \"preexec_fn\" is UNIX specific, but so is \"command\"\n preexec_fn=os.setsid)\n except OSError as error: # ValueError should not happen!\n logger.debug(\n \"Couldn't execute manual command: %s\", error, exc_info=True)\n return False\n logger.debug(\"Manual command running as PID %s.\", self._httpd.pid)\n # give it some time to bootstrap, before we try to verify\n # (cert generation in case of simpleHttpS might take time)\n self._test_mode_busy_wait(port)\n if self._httpd.poll() is not None:\n raise errors.Error(\"Couldn't execute manual command\")\n else:\n if not zope.component.getUtility(interfaces.IDisplay).yesno(\n self.IP_DISCLAIMER, \"Yes\", \"No\"):\n raise errors.PluginError(\"Must agree to IP logging to proceed\")\n\n self._notify_and_wait(self.MESSAGE_TEMPLATE.format(\n validation=validation, response=response,\n uri=achall.chall.uri(achall.domain),\n ct=achall.CONTENT_TYPE, command=command))\n\n if not response.simple_verify(\n achall.chall, achall.domain,\n achall.account_key.public_key(), self.config.http01_port):\n logger.warning(\"Self-verify of challenge failed.\")\n\n return response\n\n def _notify_and_wait(self, message): # pylint: disable=no-self-use\n # TODO: IDisplay wraps messages, breaking the command\n #answer = zope.component.getUtility(interfaces.IDisplay).notification(\n # message=message, height=25, pause=True)\n sys.stdout.write(message)\n raw_input(\"Press ENTER to continue\")\n\n def cleanup(self, achalls):\n # pylint: disable=missing-docstring,no-self-use,unused-argument\n if self.conf(\"test-mode\"):\n assert self._httpd is not None, (\n \"cleanup() must be called after perform()\")\n if self._httpd.poll() is None:\n logger.debug(\"Terminating manual command process\")\n os.killpg(self._httpd.pid, signal.SIGTERM)\n else:\n logger.debug(\"Manual command process already terminated \"\n \"with %s code\", self._httpd.returncode)\n shutil.rmtree(self._root)\n", "path": "letsencrypt/plugins/manual.py"}]}
| 2,544 | 234 |
gh_patches_debug_32748
|
rasdani/github-patches
|
git_diff
|
microsoft__botbuilder-python-1888
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
An activity with attachments raises an exception in telemetry logger
## Version
4.14.1
## Describe the bug
Activity.attachments is a list. Therefore, telemetry logger raises an exception (see below) if trying to call `strip()` on attachements.
```
File "/Users/ivo/Projects/alvao-chatbot/bot/venv/lib/python3.8/site-packages/botbuilder/core/telemetry_logger_middleware.py", line 220, in fill_send_event_properties
if activity.attachments and activity.attachments.strip():
AttributeError: 'list' object has no attribute 'strip'
```
This line produces the bug: https://github.com/microsoft/botbuilder-python/blob/main/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py#L220
## To Reproduce
Steps to reproduce the behavior:
1. Use TelemetryLoggerMiddleware
2. Create a card message: `MessageFactory.attachment(Attachment(content_type="application/vnd.microsoft.card.adaptive", content=card))`
3. Send this activity from bot to user
4. When this activity is being logged via TelemetryLoggerMiddleware, it raises an exception
## Expected behavior
There should be another check than `strip()`, possible `len() > 0`. Function `strip` returns a string without leading and trailing whitespaces. There is no sense to do that a) in a condition, b) on a list.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3 """Middleware Component for logging Activity messages."""
4 from typing import Awaitable, Callable, List, Dict
5 from botbuilder.schema import Activity, ConversationReference, ActivityTypes
6 from botbuilder.schema.teams import TeamsChannelData, TeamInfo
7 from botframework.connector import Channels
8
9 from .bot_telemetry_client import BotTelemetryClient
10 from .bot_assert import BotAssert
11 from .middleware_set import Middleware
12 from .null_telemetry_client import NullTelemetryClient
13 from .turn_context import TurnContext
14 from .telemetry_constants import TelemetryConstants
15 from .telemetry_logger_constants import TelemetryLoggerConstants
16
17
18 # pylint: disable=line-too-long
19 class TelemetryLoggerMiddleware(Middleware):
20 """Middleware for logging incoming, outgoing, updated or deleted Activity messages."""
21
22 def __init__(
23 self, telemetry_client: BotTelemetryClient, log_personal_information: bool
24 ) -> None:
25 super(TelemetryLoggerMiddleware, self).__init__()
26 self._telemetry_client = telemetry_client or NullTelemetryClient()
27 self._log_personal_information = log_personal_information
28
29 @property
30 def telemetry_client(self) -> BotTelemetryClient:
31 """Gets the currently configured BotTelemetryClient."""
32 return self._telemetry_client
33
34 @property
35 def log_personal_information(self) -> bool:
36 """ Gets a value indicating whether determines whether to log personal
37 information that came from the user."""
38 return self._log_personal_information
39
40 # pylint: disable=arguments-differ
41 async def on_turn(
42 self, context: TurnContext, logic_fn: Callable[[TurnContext], Awaitable]
43 ) -> None:
44 """Logs events based on incoming and outgoing activities using
45 BotTelemetryClient base class
46
47 :param turn_context: The context object for this turn.
48 :param logic: Callable to continue the bot middleware pipeline
49
50 :return: None
51 """
52 BotAssert.context_not_none(context)
53
54 # Log incoming activity at beginning of turn
55 if context.activity:
56 activity = context.activity
57 # Log Bot Message Received
58 await self.on_receive_activity(activity)
59
60 # hook up onSend pipeline
61 # pylint: disable=unused-argument
62 async def send_activities_handler(
63 ctx: TurnContext,
64 activities: List[Activity],
65 next_send: Callable[[], Awaitable[None]],
66 ):
67 # Run full pipeline
68 responses = await next_send()
69 for activity in activities:
70 await self.on_send_activity(activity)
71 return responses
72
73 context.on_send_activities(send_activities_handler)
74
75 # hook up update activity pipeline
76 async def update_activity_handler(
77 ctx: TurnContext, activity: Activity, next_update: Callable[[], Awaitable]
78 ):
79 # Run full pipeline
80 response = await next_update()
81 await self.on_update_activity(activity)
82 return response
83
84 context.on_update_activity(update_activity_handler)
85
86 # hook up delete activity pipeline
87 async def delete_activity_handler(
88 ctx: TurnContext,
89 reference: ConversationReference,
90 next_delete: Callable[[], Awaitable],
91 ):
92 # Run full pipeline
93 await next_delete()
94
95 delete_msg = Activity(
96 type=ActivityTypes.message_delete, id=reference.activity_id
97 )
98 deleted_activity: Activity = TurnContext.apply_conversation_reference(
99 delete_msg, reference, False
100 )
101 await self.on_delete_activity(deleted_activity)
102
103 context.on_delete_activity(delete_activity_handler)
104
105 if logic_fn:
106 await logic_fn()
107
108 async def on_receive_activity(self, activity: Activity) -> None:
109 """Invoked when a message is received from the user.
110 Performs logging of telemetry data using the BotTelemetryClient.track_event() method.
111 This event name used is "BotMessageReceived".
112 :param activity: Current activity sent from user.
113 """
114 self.telemetry_client.track_event(
115 TelemetryLoggerConstants.BOT_MSG_RECEIVE_EVENT,
116 await self.fill_receive_event_properties(activity),
117 )
118
119 async def on_send_activity(self, activity: Activity) -> None:
120 """Invoked when the bot sends a message to the user.
121 Performs logging of telemetry data using the BotTelemetryClient.track_event() method.
122 This event name used is "BotMessageSend".
123 :param activity: Current activity sent from bot.
124 """
125 self.telemetry_client.track_event(
126 TelemetryLoggerConstants.BOT_MSG_SEND_EVENT,
127 await self.fill_send_event_properties(activity),
128 )
129
130 async def on_update_activity(self, activity: Activity) -> None:
131 """Invoked when the bot updates a message.
132 Performs logging of telemetry data using the BotTelemetryClient.track_event() method.
133 This event name used is "BotMessageUpdate".
134 :param activity: Current activity sent from user.
135 """
136 self.telemetry_client.track_event(
137 TelemetryLoggerConstants.BOT_MSG_UPDATE_EVENT,
138 await self.fill_update_event_properties(activity),
139 )
140
141 async def on_delete_activity(self, activity: Activity) -> None:
142 """Invoked when the bot deletes a message.
143 Performs logging of telemetry data using the BotTelemetryClient.track_event() method.
144 This event name used is "BotMessageDelete".
145 :param activity: Current activity sent from user.
146 """
147 self.telemetry_client.track_event(
148 TelemetryLoggerConstants.BOT_MSG_DELETE_EVENT,
149 await self.fill_delete_event_properties(activity),
150 )
151
152 async def fill_receive_event_properties(
153 self, activity: Activity, additional_properties: Dict[str, str] = None
154 ) -> Dict[str, str]:
155 """Fills the event properties for the BotMessageReceived.
156 Adheres to the LogPersonalInformation flag to filter Name, Text and Speak properties.
157 :param activity: activity sent from user.
158 :param additional_properties: Additional properties to add to the event.
159 Additional properties can override "stock" properties.
160
161 :return: A dictionary that is sent as "Properties" to
162 BotTelemetryClient.track_event method for the BotMessageReceived event.
163 """
164 properties = {
165 TelemetryConstants.FROM_ID_PROPERTY: activity.from_property.id
166 if activity.from_property
167 else None,
168 TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,
169 TelemetryConstants.LOCALE_PROPERTY: activity.locale,
170 TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,
171 TelemetryConstants.RECIPIENT_NAME_PROPERTY: activity.recipient.name,
172 }
173
174 if self.log_personal_information:
175 if (
176 activity.from_property
177 and activity.from_property.name
178 and activity.from_property.name.strip()
179 ):
180 properties[
181 TelemetryConstants.FROM_NAME_PROPERTY
182 ] = activity.from_property.name
183 if activity.text and activity.text.strip():
184 properties[TelemetryConstants.TEXT_PROPERTY] = activity.text
185 if activity.speak and activity.speak.strip():
186 properties[TelemetryConstants.SPEAK_PROPERTY] = activity.speak
187
188 TelemetryLoggerMiddleware.__populate_additional_channel_properties(
189 activity, properties
190 )
191
192 # Additional properties can override "stock" properties
193 if additional_properties:
194 for prop in additional_properties:
195 properties[prop.key] = prop.value
196
197 return properties
198
199 async def fill_send_event_properties(
200 self, activity: Activity, additional_properties: Dict[str, str] = None
201 ) -> Dict[str, str]:
202 """Fills the event properties for the BotMessageSend.
203 These properties are logged when an activity message is sent by the Bot to the user.
204 :param activity: activity sent from user.
205 :param additional_properties: Additional properties to add to the event.
206 Additional properties can override "stock" properties.
207
208 :return: A dictionary that is sent as "Properties" to the
209 BotTelemetryClient.track_event method for the BotMessageSend event.
210 """
211 properties = {
212 TelemetryConstants.REPLY_ACTIVITY_ID_PROPERTY: activity.reply_to_id,
213 TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,
214 TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,
215 TelemetryConstants.LOCALE_PROPERTY: activity.locale,
216 }
217
218 # Use the LogPersonalInformation flag to toggle logging PII data, text and user name are common examples
219 if self.log_personal_information:
220 if activity.attachments and activity.attachments.strip():
221 properties[
222 TelemetryConstants.ATTACHMENTS_PROPERTY
223 ] = activity.attachments
224 if activity.from_property.name and activity.from_property.name.strip():
225 properties[
226 TelemetryConstants.FROM_NAME_PROPERTY
227 ] = activity.from_property.name
228 if activity.text and activity.text.strip():
229 properties[TelemetryConstants.TEXT_PROPERTY] = activity.text
230 if activity.speak and activity.speak.strip():
231 properties[TelemetryConstants.SPEAK_PROPERTY] = activity.speak
232
233 # Additional properties can override "stock" properties
234 if additional_properties:
235 for prop in additional_properties:
236 properties[prop.key] = prop.value
237
238 return properties
239
240 async def fill_update_event_properties(
241 self, activity: Activity, additional_properties: Dict[str, str] = None
242 ) -> Dict[str, str]:
243 """Fills the event properties for the BotMessageUpdate.
244 These properties are logged when an activity message is updated by the Bot.
245 For example, if a card is interacted with by the use, and the card needs
246 to be updated to reflect some interaction.
247 :param activity: activity sent from user.
248 :param additional_properties: Additional properties to add to the event.
249 Additional properties can override "stock" properties.
250
251 :return: A dictionary that is sent as "Properties" to the
252 BotTelemetryClient.track_event method for the BotMessageUpdate event.
253 """
254 properties = {
255 TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,
256 TelemetryConstants.CONVERSATION_ID_PROPERTY: activity.conversation.id,
257 TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,
258 TelemetryConstants.LOCALE_PROPERTY: activity.locale,
259 }
260
261 # Use the LogPersonalInformation flag to toggle logging PII data, text is a common examples
262 if self.log_personal_information:
263 if activity.text and activity.text.strip():
264 properties[TelemetryConstants.TEXT_PROPERTY] = activity.text
265
266 # Additional properties can override "stock" properties
267 if additional_properties:
268 for prop in additional_properties:
269 properties[prop.key] = prop.value
270
271 return properties
272
273 async def fill_delete_event_properties(
274 self, activity: Activity, additional_properties: Dict[str, str] = None
275 ) -> Dict[str, str]:
276 """Fills the event properties for the BotMessageDelete.
277 These properties are logged when an activity message is deleted by the Bot.
278 :param activity: activity sent from user.
279 :param additional_properties: Additional properties to add to the event.
280 Additional properties can override "stock" properties.
281
282 :return: A dictionary that is sent as "Properties" to the
283 BotTelemetryClient.track_event method for the BotMessageUpdate event.
284 """
285 properties = {
286 TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,
287 TelemetryConstants.CONVERSATION_ID_PROPERTY: activity.conversation.id,
288 TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,
289 }
290
291 # Additional properties can override "stock" properties
292 if additional_properties:
293 for prop in additional_properties:
294 properties[prop.key] = prop.value
295
296 return properties
297
298 @staticmethod
299 def __populate_additional_channel_properties(
300 activity: Activity, properties: dict,
301 ):
302 if activity.channel_id == Channels.ms_teams:
303 teams_channel_data: TeamsChannelData = activity.channel_data
304
305 properties["TeamsTenantId"] = (
306 teams_channel_data.tenant
307 if teams_channel_data and teams_channel_data.tenant
308 else ""
309 )
310
311 properties["TeamsUserAadObjectId"] = (
312 activity.from_property.aad_object_id if activity.from_property else ""
313 )
314
315 if teams_channel_data and teams_channel_data.team:
316 properties["TeamsTeamInfo"] = TeamInfo.serialize(
317 teams_channel_data.team
318 )
319
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py b/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py
--- a/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py
+++ b/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py
@@ -2,6 +2,8 @@
# Licensed under the MIT License.
"""Middleware Component for logging Activity messages."""
from typing import Awaitable, Callable, List, Dict
+from jsonpickle import encode
+
from botbuilder.schema import Activity, ConversationReference, ActivityTypes
from botbuilder.schema.teams import TeamsChannelData, TeamInfo
from botframework.connector import Channels
@@ -33,7 +35,7 @@
@property
def log_personal_information(self) -> bool:
- """ Gets a value indicating whether determines whether to log personal
+ """Gets a value indicating whether determines whether to log personal
information that came from the user."""
return self._log_personal_information
@@ -217,10 +219,10 @@
# Use the LogPersonalInformation flag to toggle logging PII data, text and user name are common examples
if self.log_personal_information:
- if activity.attachments and activity.attachments.strip():
- properties[
- TelemetryConstants.ATTACHMENTS_PROPERTY
- ] = activity.attachments
+ if activity.attachments and len(activity.attachments) > 0:
+ properties[TelemetryConstants.ATTACHMENTS_PROPERTY] = encode(
+ activity.attachments
+ )
if activity.from_property.name and activity.from_property.name.strip():
properties[
TelemetryConstants.FROM_NAME_PROPERTY
|
{"golden_diff": "diff --git a/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py b/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py\n--- a/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py\n+++ b/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py\n@@ -2,6 +2,8 @@\n # Licensed under the MIT License.\n \"\"\"Middleware Component for logging Activity messages.\"\"\"\n from typing import Awaitable, Callable, List, Dict\n+from jsonpickle import encode\n+\n from botbuilder.schema import Activity, ConversationReference, ActivityTypes\n from botbuilder.schema.teams import TeamsChannelData, TeamInfo\n from botframework.connector import Channels\n@@ -33,7 +35,7 @@\n \n @property\n def log_personal_information(self) -> bool:\n- \"\"\" Gets a value indicating whether determines whether to log personal\n+ \"\"\"Gets a value indicating whether determines whether to log personal\n information that came from the user.\"\"\"\n return self._log_personal_information\n \n@@ -217,10 +219,10 @@\n \n # Use the LogPersonalInformation flag to toggle logging PII data, text and user name are common examples\n if self.log_personal_information:\n- if activity.attachments and activity.attachments.strip():\n- properties[\n- TelemetryConstants.ATTACHMENTS_PROPERTY\n- ] = activity.attachments\n+ if activity.attachments and len(activity.attachments) > 0:\n+ properties[TelemetryConstants.ATTACHMENTS_PROPERTY] = encode(\n+ activity.attachments\n+ )\n if activity.from_property.name and activity.from_property.name.strip():\n properties[\n TelemetryConstants.FROM_NAME_PROPERTY\n", "issue": "An activity with attachments raises an exception in telemetry logger\n## Version\r\n4.14.1\r\n\r\n## Describe the bug\r\n\r\nActivity.attachments is a list. Therefore, telemetry logger raises an exception (see below) if trying to call `strip()` on attachements.\r\n\r\n```\r\n File \"/Users/ivo/Projects/alvao-chatbot/bot/venv/lib/python3.8/site-packages/botbuilder/core/telemetry_logger_middleware.py\", line 220, in fill_send_event_properties\r\n if activity.attachments and activity.attachments.strip():\r\nAttributeError: 'list' object has no attribute 'strip'\r\n```\r\n\r\nThis line produces the bug: https://github.com/microsoft/botbuilder-python/blob/main/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py#L220\r\n\r\n## To Reproduce\r\nSteps to reproduce the behavior:\r\n1. Use TelemetryLoggerMiddleware\r\n2. Create a card message: `MessageFactory.attachment(Attachment(content_type=\"application/vnd.microsoft.card.adaptive\", content=card))`\r\n3. Send this activity from bot to user\r\n4. When this activity is being logged via TelemetryLoggerMiddleware, it raises an exception\r\n\r\n## Expected behavior\r\nThere should be another check than `strip()`, possible `len() > 0`. Function `strip` returns a string without leading and trailing whitespaces. There is no sense to do that a) in a condition, b) on a list.\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\"\"\"Middleware Component for logging Activity messages.\"\"\"\nfrom typing import Awaitable, Callable, List, Dict\nfrom botbuilder.schema import Activity, ConversationReference, ActivityTypes\nfrom botbuilder.schema.teams import TeamsChannelData, TeamInfo\nfrom botframework.connector import Channels\n\nfrom .bot_telemetry_client import BotTelemetryClient\nfrom .bot_assert import BotAssert\nfrom .middleware_set import Middleware\nfrom .null_telemetry_client import NullTelemetryClient\nfrom .turn_context import TurnContext\nfrom .telemetry_constants import TelemetryConstants\nfrom .telemetry_logger_constants import TelemetryLoggerConstants\n\n\n# pylint: disable=line-too-long\nclass TelemetryLoggerMiddleware(Middleware):\n \"\"\"Middleware for logging incoming, outgoing, updated or deleted Activity messages.\"\"\"\n\n def __init__(\n self, telemetry_client: BotTelemetryClient, log_personal_information: bool\n ) -> None:\n super(TelemetryLoggerMiddleware, self).__init__()\n self._telemetry_client = telemetry_client or NullTelemetryClient()\n self._log_personal_information = log_personal_information\n\n @property\n def telemetry_client(self) -> BotTelemetryClient:\n \"\"\"Gets the currently configured BotTelemetryClient.\"\"\"\n return self._telemetry_client\n\n @property\n def log_personal_information(self) -> bool:\n \"\"\" Gets a value indicating whether determines whether to log personal\n information that came from the user.\"\"\"\n return self._log_personal_information\n\n # pylint: disable=arguments-differ\n async def on_turn(\n self, context: TurnContext, logic_fn: Callable[[TurnContext], Awaitable]\n ) -> None:\n \"\"\"Logs events based on incoming and outgoing activities using\n BotTelemetryClient base class\n\n :param turn_context: The context object for this turn.\n :param logic: Callable to continue the bot middleware pipeline\n\n :return: None\n \"\"\"\n BotAssert.context_not_none(context)\n\n # Log incoming activity at beginning of turn\n if context.activity:\n activity = context.activity\n # Log Bot Message Received\n await self.on_receive_activity(activity)\n\n # hook up onSend pipeline\n # pylint: disable=unused-argument\n async def send_activities_handler(\n ctx: TurnContext,\n activities: List[Activity],\n next_send: Callable[[], Awaitable[None]],\n ):\n # Run full pipeline\n responses = await next_send()\n for activity in activities:\n await self.on_send_activity(activity)\n return responses\n\n context.on_send_activities(send_activities_handler)\n\n # hook up update activity pipeline\n async def update_activity_handler(\n ctx: TurnContext, activity: Activity, next_update: Callable[[], Awaitable]\n ):\n # Run full pipeline\n response = await next_update()\n await self.on_update_activity(activity)\n return response\n\n context.on_update_activity(update_activity_handler)\n\n # hook up delete activity pipeline\n async def delete_activity_handler(\n ctx: TurnContext,\n reference: ConversationReference,\n next_delete: Callable[[], Awaitable],\n ):\n # Run full pipeline\n await next_delete()\n\n delete_msg = Activity(\n type=ActivityTypes.message_delete, id=reference.activity_id\n )\n deleted_activity: Activity = TurnContext.apply_conversation_reference(\n delete_msg, reference, False\n )\n await self.on_delete_activity(deleted_activity)\n\n context.on_delete_activity(delete_activity_handler)\n\n if logic_fn:\n await logic_fn()\n\n async def on_receive_activity(self, activity: Activity) -> None:\n \"\"\"Invoked when a message is received from the user.\n Performs logging of telemetry data using the BotTelemetryClient.track_event() method.\n This event name used is \"BotMessageReceived\".\n :param activity: Current activity sent from user.\n \"\"\"\n self.telemetry_client.track_event(\n TelemetryLoggerConstants.BOT_MSG_RECEIVE_EVENT,\n await self.fill_receive_event_properties(activity),\n )\n\n async def on_send_activity(self, activity: Activity) -> None:\n \"\"\"Invoked when the bot sends a message to the user.\n Performs logging of telemetry data using the BotTelemetryClient.track_event() method.\n This event name used is \"BotMessageSend\".\n :param activity: Current activity sent from bot.\n \"\"\"\n self.telemetry_client.track_event(\n TelemetryLoggerConstants.BOT_MSG_SEND_EVENT,\n await self.fill_send_event_properties(activity),\n )\n\n async def on_update_activity(self, activity: Activity) -> None:\n \"\"\"Invoked when the bot updates a message.\n Performs logging of telemetry data using the BotTelemetryClient.track_event() method.\n This event name used is \"BotMessageUpdate\".\n :param activity: Current activity sent from user.\n \"\"\"\n self.telemetry_client.track_event(\n TelemetryLoggerConstants.BOT_MSG_UPDATE_EVENT,\n await self.fill_update_event_properties(activity),\n )\n\n async def on_delete_activity(self, activity: Activity) -> None:\n \"\"\"Invoked when the bot deletes a message.\n Performs logging of telemetry data using the BotTelemetryClient.track_event() method.\n This event name used is \"BotMessageDelete\".\n :param activity: Current activity sent from user.\n \"\"\"\n self.telemetry_client.track_event(\n TelemetryLoggerConstants.BOT_MSG_DELETE_EVENT,\n await self.fill_delete_event_properties(activity),\n )\n\n async def fill_receive_event_properties(\n self, activity: Activity, additional_properties: Dict[str, str] = None\n ) -> Dict[str, str]:\n \"\"\"Fills the event properties for the BotMessageReceived.\n Adheres to the LogPersonalInformation flag to filter Name, Text and Speak properties.\n :param activity: activity sent from user.\n :param additional_properties: Additional properties to add to the event.\n Additional properties can override \"stock\" properties.\n\n :return: A dictionary that is sent as \"Properties\" to\n BotTelemetryClient.track_event method for the BotMessageReceived event.\n \"\"\"\n properties = {\n TelemetryConstants.FROM_ID_PROPERTY: activity.from_property.id\n if activity.from_property\n else None,\n TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,\n TelemetryConstants.LOCALE_PROPERTY: activity.locale,\n TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,\n TelemetryConstants.RECIPIENT_NAME_PROPERTY: activity.recipient.name,\n }\n\n if self.log_personal_information:\n if (\n activity.from_property\n and activity.from_property.name\n and activity.from_property.name.strip()\n ):\n properties[\n TelemetryConstants.FROM_NAME_PROPERTY\n ] = activity.from_property.name\n if activity.text and activity.text.strip():\n properties[TelemetryConstants.TEXT_PROPERTY] = activity.text\n if activity.speak and activity.speak.strip():\n properties[TelemetryConstants.SPEAK_PROPERTY] = activity.speak\n\n TelemetryLoggerMiddleware.__populate_additional_channel_properties(\n activity, properties\n )\n\n # Additional properties can override \"stock\" properties\n if additional_properties:\n for prop in additional_properties:\n properties[prop.key] = prop.value\n\n return properties\n\n async def fill_send_event_properties(\n self, activity: Activity, additional_properties: Dict[str, str] = None\n ) -> Dict[str, str]:\n \"\"\"Fills the event properties for the BotMessageSend.\n These properties are logged when an activity message is sent by the Bot to the user.\n :param activity: activity sent from user.\n :param additional_properties: Additional properties to add to the event.\n Additional properties can override \"stock\" properties.\n\n :return: A dictionary that is sent as \"Properties\" to the\n BotTelemetryClient.track_event method for the BotMessageSend event.\n \"\"\"\n properties = {\n TelemetryConstants.REPLY_ACTIVITY_ID_PROPERTY: activity.reply_to_id,\n TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,\n TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,\n TelemetryConstants.LOCALE_PROPERTY: activity.locale,\n }\n\n # Use the LogPersonalInformation flag to toggle logging PII data, text and user name are common examples\n if self.log_personal_information:\n if activity.attachments and activity.attachments.strip():\n properties[\n TelemetryConstants.ATTACHMENTS_PROPERTY\n ] = activity.attachments\n if activity.from_property.name and activity.from_property.name.strip():\n properties[\n TelemetryConstants.FROM_NAME_PROPERTY\n ] = activity.from_property.name\n if activity.text and activity.text.strip():\n properties[TelemetryConstants.TEXT_PROPERTY] = activity.text\n if activity.speak and activity.speak.strip():\n properties[TelemetryConstants.SPEAK_PROPERTY] = activity.speak\n\n # Additional properties can override \"stock\" properties\n if additional_properties:\n for prop in additional_properties:\n properties[prop.key] = prop.value\n\n return properties\n\n async def fill_update_event_properties(\n self, activity: Activity, additional_properties: Dict[str, str] = None\n ) -> Dict[str, str]:\n \"\"\"Fills the event properties for the BotMessageUpdate.\n These properties are logged when an activity message is updated by the Bot.\n For example, if a card is interacted with by the use, and the card needs\n to be updated to reflect some interaction.\n :param activity: activity sent from user.\n :param additional_properties: Additional properties to add to the event.\n Additional properties can override \"stock\" properties.\n\n :return: A dictionary that is sent as \"Properties\" to the\n BotTelemetryClient.track_event method for the BotMessageUpdate event.\n \"\"\"\n properties = {\n TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,\n TelemetryConstants.CONVERSATION_ID_PROPERTY: activity.conversation.id,\n TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,\n TelemetryConstants.LOCALE_PROPERTY: activity.locale,\n }\n\n # Use the LogPersonalInformation flag to toggle logging PII data, text is a common examples\n if self.log_personal_information:\n if activity.text and activity.text.strip():\n properties[TelemetryConstants.TEXT_PROPERTY] = activity.text\n\n # Additional properties can override \"stock\" properties\n if additional_properties:\n for prop in additional_properties:\n properties[prop.key] = prop.value\n\n return properties\n\n async def fill_delete_event_properties(\n self, activity: Activity, additional_properties: Dict[str, str] = None\n ) -> Dict[str, str]:\n \"\"\"Fills the event properties for the BotMessageDelete.\n These properties are logged when an activity message is deleted by the Bot.\n :param activity: activity sent from user.\n :param additional_properties: Additional properties to add to the event.\n Additional properties can override \"stock\" properties.\n\n :return: A dictionary that is sent as \"Properties\" to the\n BotTelemetryClient.track_event method for the BotMessageUpdate event.\n \"\"\"\n properties = {\n TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,\n TelemetryConstants.CONVERSATION_ID_PROPERTY: activity.conversation.id,\n TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,\n }\n\n # Additional properties can override \"stock\" properties\n if additional_properties:\n for prop in additional_properties:\n properties[prop.key] = prop.value\n\n return properties\n\n @staticmethod\n def __populate_additional_channel_properties(\n activity: Activity, properties: dict,\n ):\n if activity.channel_id == Channels.ms_teams:\n teams_channel_data: TeamsChannelData = activity.channel_data\n\n properties[\"TeamsTenantId\"] = (\n teams_channel_data.tenant\n if teams_channel_data and teams_channel_data.tenant\n else \"\"\n )\n\n properties[\"TeamsUserAadObjectId\"] = (\n activity.from_property.aad_object_id if activity.from_property else \"\"\n )\n\n if teams_channel_data and teams_channel_data.team:\n properties[\"TeamsTeamInfo\"] = TeamInfo.serialize(\n teams_channel_data.team\n )\n", "path": "libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\"\"\"Middleware Component for logging Activity messages.\"\"\"\nfrom typing import Awaitable, Callable, List, Dict\nfrom jsonpickle import encode\n\nfrom botbuilder.schema import Activity, ConversationReference, ActivityTypes\nfrom botbuilder.schema.teams import TeamsChannelData, TeamInfo\nfrom botframework.connector import Channels\n\nfrom .bot_telemetry_client import BotTelemetryClient\nfrom .bot_assert import BotAssert\nfrom .middleware_set import Middleware\nfrom .null_telemetry_client import NullTelemetryClient\nfrom .turn_context import TurnContext\nfrom .telemetry_constants import TelemetryConstants\nfrom .telemetry_logger_constants import TelemetryLoggerConstants\n\n\n# pylint: disable=line-too-long\nclass TelemetryLoggerMiddleware(Middleware):\n \"\"\"Middleware for logging incoming, outgoing, updated or deleted Activity messages.\"\"\"\n\n def __init__(\n self, telemetry_client: BotTelemetryClient, log_personal_information: bool\n ) -> None:\n super(TelemetryLoggerMiddleware, self).__init__()\n self._telemetry_client = telemetry_client or NullTelemetryClient()\n self._log_personal_information = log_personal_information\n\n @property\n def telemetry_client(self) -> BotTelemetryClient:\n \"\"\"Gets the currently configured BotTelemetryClient.\"\"\"\n return self._telemetry_client\n\n @property\n def log_personal_information(self) -> bool:\n \"\"\"Gets a value indicating whether determines whether to log personal\n information that came from the user.\"\"\"\n return self._log_personal_information\n\n # pylint: disable=arguments-differ\n async def on_turn(\n self, context: TurnContext, logic_fn: Callable[[TurnContext], Awaitable]\n ) -> None:\n \"\"\"Logs events based on incoming and outgoing activities using\n BotTelemetryClient base class\n\n :param turn_context: The context object for this turn.\n :param logic: Callable to continue the bot middleware pipeline\n\n :return: None\n \"\"\"\n BotAssert.context_not_none(context)\n\n # Log incoming activity at beginning of turn\n if context.activity:\n activity = context.activity\n # Log Bot Message Received\n await self.on_receive_activity(activity)\n\n # hook up onSend pipeline\n # pylint: disable=unused-argument\n async def send_activities_handler(\n ctx: TurnContext,\n activities: List[Activity],\n next_send: Callable[[], Awaitable[None]],\n ):\n # Run full pipeline\n responses = await next_send()\n for activity in activities:\n await self.on_send_activity(activity)\n return responses\n\n context.on_send_activities(send_activities_handler)\n\n # hook up update activity pipeline\n async def update_activity_handler(\n ctx: TurnContext, activity: Activity, next_update: Callable[[], Awaitable]\n ):\n # Run full pipeline\n response = await next_update()\n await self.on_update_activity(activity)\n return response\n\n context.on_update_activity(update_activity_handler)\n\n # hook up delete activity pipeline\n async def delete_activity_handler(\n ctx: TurnContext,\n reference: ConversationReference,\n next_delete: Callable[[], Awaitable],\n ):\n # Run full pipeline\n await next_delete()\n\n delete_msg = Activity(\n type=ActivityTypes.message_delete, id=reference.activity_id\n )\n deleted_activity: Activity = TurnContext.apply_conversation_reference(\n delete_msg, reference, False\n )\n await self.on_delete_activity(deleted_activity)\n\n context.on_delete_activity(delete_activity_handler)\n\n if logic_fn:\n await logic_fn()\n\n async def on_receive_activity(self, activity: Activity) -> None:\n \"\"\"Invoked when a message is received from the user.\n Performs logging of telemetry data using the BotTelemetryClient.track_event() method.\n This event name used is \"BotMessageReceived\".\n :param activity: Current activity sent from user.\n \"\"\"\n self.telemetry_client.track_event(\n TelemetryLoggerConstants.BOT_MSG_RECEIVE_EVENT,\n await self.fill_receive_event_properties(activity),\n )\n\n async def on_send_activity(self, activity: Activity) -> None:\n \"\"\"Invoked when the bot sends a message to the user.\n Performs logging of telemetry data using the BotTelemetryClient.track_event() method.\n This event name used is \"BotMessageSend\".\n :param activity: Current activity sent from bot.\n \"\"\"\n self.telemetry_client.track_event(\n TelemetryLoggerConstants.BOT_MSG_SEND_EVENT,\n await self.fill_send_event_properties(activity),\n )\n\n async def on_update_activity(self, activity: Activity) -> None:\n \"\"\"Invoked when the bot updates a message.\n Performs logging of telemetry data using the BotTelemetryClient.track_event() method.\n This event name used is \"BotMessageUpdate\".\n :param activity: Current activity sent from user.\n \"\"\"\n self.telemetry_client.track_event(\n TelemetryLoggerConstants.BOT_MSG_UPDATE_EVENT,\n await self.fill_update_event_properties(activity),\n )\n\n async def on_delete_activity(self, activity: Activity) -> None:\n \"\"\"Invoked when the bot deletes a message.\n Performs logging of telemetry data using the BotTelemetryClient.track_event() method.\n This event name used is \"BotMessageDelete\".\n :param activity: Current activity sent from user.\n \"\"\"\n self.telemetry_client.track_event(\n TelemetryLoggerConstants.BOT_MSG_DELETE_EVENT,\n await self.fill_delete_event_properties(activity),\n )\n\n async def fill_receive_event_properties(\n self, activity: Activity, additional_properties: Dict[str, str] = None\n ) -> Dict[str, str]:\n \"\"\"Fills the event properties for the BotMessageReceived.\n Adheres to the LogPersonalInformation flag to filter Name, Text and Speak properties.\n :param activity: activity sent from user.\n :param additional_properties: Additional properties to add to the event.\n Additional properties can override \"stock\" properties.\n\n :return: A dictionary that is sent as \"Properties\" to\n BotTelemetryClient.track_event method for the BotMessageReceived event.\n \"\"\"\n properties = {\n TelemetryConstants.FROM_ID_PROPERTY: activity.from_property.id\n if activity.from_property\n else None,\n TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,\n TelemetryConstants.LOCALE_PROPERTY: activity.locale,\n TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,\n TelemetryConstants.RECIPIENT_NAME_PROPERTY: activity.recipient.name,\n }\n\n if self.log_personal_information:\n if (\n activity.from_property\n and activity.from_property.name\n and activity.from_property.name.strip()\n ):\n properties[\n TelemetryConstants.FROM_NAME_PROPERTY\n ] = activity.from_property.name\n if activity.text and activity.text.strip():\n properties[TelemetryConstants.TEXT_PROPERTY] = activity.text\n if activity.speak and activity.speak.strip():\n properties[TelemetryConstants.SPEAK_PROPERTY] = activity.speak\n\n TelemetryLoggerMiddleware.__populate_additional_channel_properties(\n activity, properties\n )\n\n # Additional properties can override \"stock\" properties\n if additional_properties:\n for prop in additional_properties:\n properties[prop.key] = prop.value\n\n return properties\n\n async def fill_send_event_properties(\n self, activity: Activity, additional_properties: Dict[str, str] = None\n ) -> Dict[str, str]:\n \"\"\"Fills the event properties for the BotMessageSend.\n These properties are logged when an activity message is sent by the Bot to the user.\n :param activity: activity sent from user.\n :param additional_properties: Additional properties to add to the event.\n Additional properties can override \"stock\" properties.\n\n :return: A dictionary that is sent as \"Properties\" to the\n BotTelemetryClient.track_event method for the BotMessageSend event.\n \"\"\"\n properties = {\n TelemetryConstants.REPLY_ACTIVITY_ID_PROPERTY: activity.reply_to_id,\n TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,\n TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,\n TelemetryConstants.LOCALE_PROPERTY: activity.locale,\n }\n\n # Use the LogPersonalInformation flag to toggle logging PII data, text and user name are common examples\n if self.log_personal_information:\n if activity.attachments and len(activity.attachments) > 0:\n properties[TelemetryConstants.ATTACHMENTS_PROPERTY] = encode(\n activity.attachments\n )\n if activity.from_property.name and activity.from_property.name.strip():\n properties[\n TelemetryConstants.FROM_NAME_PROPERTY\n ] = activity.from_property.name\n if activity.text and activity.text.strip():\n properties[TelemetryConstants.TEXT_PROPERTY] = activity.text\n if activity.speak and activity.speak.strip():\n properties[TelemetryConstants.SPEAK_PROPERTY] = activity.speak\n\n # Additional properties can override \"stock\" properties\n if additional_properties:\n for prop in additional_properties:\n properties[prop.key] = prop.value\n\n return properties\n\n async def fill_update_event_properties(\n self, activity: Activity, additional_properties: Dict[str, str] = None\n ) -> Dict[str, str]:\n \"\"\"Fills the event properties for the BotMessageUpdate.\n These properties are logged when an activity message is updated by the Bot.\n For example, if a card is interacted with by the use, and the card needs\n to be updated to reflect some interaction.\n :param activity: activity sent from user.\n :param additional_properties: Additional properties to add to the event.\n Additional properties can override \"stock\" properties.\n\n :return: A dictionary that is sent as \"Properties\" to the\n BotTelemetryClient.track_event method for the BotMessageUpdate event.\n \"\"\"\n properties = {\n TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,\n TelemetryConstants.CONVERSATION_ID_PROPERTY: activity.conversation.id,\n TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,\n TelemetryConstants.LOCALE_PROPERTY: activity.locale,\n }\n\n # Use the LogPersonalInformation flag to toggle logging PII data, text is a common examples\n if self.log_personal_information:\n if activity.text and activity.text.strip():\n properties[TelemetryConstants.TEXT_PROPERTY] = activity.text\n\n # Additional properties can override \"stock\" properties\n if additional_properties:\n for prop in additional_properties:\n properties[prop.key] = prop.value\n\n return properties\n\n async def fill_delete_event_properties(\n self, activity: Activity, additional_properties: Dict[str, str] = None\n ) -> Dict[str, str]:\n \"\"\"Fills the event properties for the BotMessageDelete.\n These properties are logged when an activity message is deleted by the Bot.\n :param activity: activity sent from user.\n :param additional_properties: Additional properties to add to the event.\n Additional properties can override \"stock\" properties.\n\n :return: A dictionary that is sent as \"Properties\" to the\n BotTelemetryClient.track_event method for the BotMessageUpdate event.\n \"\"\"\n properties = {\n TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,\n TelemetryConstants.CONVERSATION_ID_PROPERTY: activity.conversation.id,\n TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,\n }\n\n # Additional properties can override \"stock\" properties\n if additional_properties:\n for prop in additional_properties:\n properties[prop.key] = prop.value\n\n return properties\n\n @staticmethod\n def __populate_additional_channel_properties(\n activity: Activity, properties: dict,\n ):\n if activity.channel_id == Channels.ms_teams:\n teams_channel_data: TeamsChannelData = activity.channel_data\n\n properties[\"TeamsTenantId\"] = (\n teams_channel_data.tenant\n if teams_channel_data and teams_channel_data.tenant\n else \"\"\n )\n\n properties[\"TeamsUserAadObjectId\"] = (\n activity.from_property.aad_object_id if activity.from_property else \"\"\n )\n\n if teams_channel_data and teams_channel_data.team:\n properties[\"TeamsTeamInfo\"] = TeamInfo.serialize(\n teams_channel_data.team\n )\n", "path": "libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py"}]}
| 4,021 | 378 |
gh_patches_debug_2285
|
rasdani/github-patches
|
git_diff
|
jupyterhub__zero-to-jupyterhub-k8s-1710
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CI: linkcheck of our docs fails
### Bug description
Our test fails [like this](https://travis-ci.org/github/jupyterhub/zero-to-jupyterhub-k8s/jobs/699287339), which is initialized as described in `.travis.yml` under `docs:linkcheck`.
```
/home/travis/build/jupyterhub/zero-to-jupyterhub-k8s/doc/source/customizing/user-environment.rst:4: WARNING: Duplicate explicit target name: "jupyterlab".
```
### How to reproduce
I think something like this, but check `.travis.yml` for details on how it is done there.
- Clone repo, enter the `doc/` folder.
- Install doc-requirements.txt with pip.
- `make linkcheck`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `doc/source/conf.py`
Content:
```
1
2 # Configuration file for the Sphinx documentation builder.
3 #
4 # This file only contains a selection of the most common options. For a full
5 # list see the documentation:
6 # https://www.sphinx-doc.org/en/master/usage/configuration.html
7
8 # -- Path setup --------------------------------------------------------------
9
10 # If extensions (or modules to document with autodoc) are in another directory,
11 # add these directories to sys.path here. If the directory is relative to the
12 # documentation root, use os.path.abspath to make it absolute, like shown here.
13 #
14 # import os
15 # import sys
16 # sys.path.insert(0, os.path.abspath('.'))
17
18
19 # -- Project specific imports ------------------------------------------------
20
21 from datetime import date
22
23 import yaml
24
25
26 # -- Sphinx setup function ---------------------------------------------------
27 # ref: http://www.sphinx-doc.org/en/latest/extdev/tutorial.html#the-setup-function
28
29 def setup(app):
30 app.add_css_file('custom.css')
31
32
33 # -- Project information -----------------------------------------------------
34 # ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#project-information
35
36 project = 'Zero to JupyterHub with Kubernetes'
37 copyright = '{year}, Project Jupyter Contributors'.format(year=date.today().year)
38 author = 'Project Jupyter Contributors'
39
40 # The version info for the project you're documenting, acts as replacement for
41 # |version| and |release|, also used in various other places throughout the
42 # built documents.
43 #
44 with open('../../jupyterhub/Chart.yaml') as f:
45 chart = yaml.safe_load(f)
46 version = chart['version'].split('-', 1)[0]
47 release = chart['version']
48
49 # Project specific variables
50 # ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#confval-rst_epilog
51 rst_epilog = """
52 .. |hub_version| replace:: {v}
53 """.format(v=chart['appVersion'])
54
55
56 # -- General configuration ---------------------------------------------------
57 # ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#general-configuration
58
59 # Set the default role so we can use `foo` instead of ``foo``
60 default_role = 'literal'
61
62 # Add any Sphinx extension module names here, as strings. They can be
63 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
64 # ones.
65 extensions = ['sphinx.ext.mathjax',
66 'sphinx_copybutton',
67 'myst_parser']
68
69 # List of patterns, relative to source directory, that match files and
70 # directories to ignore when looking for source files.
71 # This pattern also affects html_static_path and html_extra_path.
72 exclude_patterns = []
73
74 # The master toctree document.
75 master_doc = 'index'
76
77 # The suffix(es) of source filenames.
78 source_suffix = ['.rst', '.md']
79
80 # Add any paths that contain templates here, relative to this directory.
81 templates_path = ['_templates']
82
83
84 # -- Generate the Helm chart configuration reference from a schema file ------
85
86 # header
87 with open('reference/reference.txt', 'r') as f:
88 header_md = f.readlines()
89 header_md = header_md[1:]
90 header_md = [ln.strip('\n') for ln in header_md]
91
92 # schema
93 with open('../../jupyterhub/schema.yaml', 'r') as f:
94 data = yaml.safe_load(f)
95 def parse_schema(d, md=[], depth=0, pre=''):
96 """
97 Generate markdown headers from a passed python dictionary created by
98 parsing a schema.yaml file.
99 """
100 if 'properties' in d:
101 depth += 1
102 # Create markdown headers for each schema level
103 for key, val in d['properties'].items():
104 md.append('#'*(depth + 1) + ' ' + pre + key)
105 md.append('')
106 if 'description' in val:
107 for ln in val['description'].split('\n'):
108 md.append(ln)
109 md.append('')
110
111 parse_schema(val, md, depth, pre+'{}.'.format(key))
112 depth -= 1
113 return md
114 schema_md = parse_schema(data)
115
116 # reference = header + schema
117 reference_md = header_md + schema_md
118 with open('reference/reference.md', 'w') as f:
119 f.write('\n'.join(reference_md))
120
121
122 # -- Options for linkcheck builder -------------------------------------------
123 # ref: http://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-the-linkcheck-builder
124 linkcheck_ignore = [
125 r'(.*)github\.com(.*)#', # javascript based anchors
126 r'(.*)/#%21(.*)/(.*)', # /#!forum/jupyter - encoded anchor edge case
127 "https://your-domain.com", # example
128 "https://your-domain-name.com", # example
129 "https://kubernetes.io/docs/tutorials/kubernetes-basics/", # works
130 "https://cloud.ibm.com/kubernetes/catalog/create", # works
131 ]
132 linkcheck_anchors_ignore = [
133 "/#!",
134 "/#%21",
135 ]
136
137
138 # -- Options for HTML output -------------------------------------------------
139 # ref: http://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-html-output
140
141 # The theme to use for HTML and HTML Help pages. See the documentation for
142 # a list of builtin themes.
143 #
144 html_theme = 'pydata_sphinx_theme'
145 html_favicon = '_static/images/logo/favicon.ico'
146 html_logo = '_static/images/logo/logo.png'
147
148 # Add any paths that contain custom static files (such as style sheets) here,
149 # relative to this directory. They are copied after the builtin static files,
150 # so a file named "default.css" will overwrite the builtin "default.css".
151 html_static_path = ['_static']
152
153
154
155
156 # -- Below are options for more esoteric output -------------------------------
157 # -----------------------------------------------------------------------------
158
159
160 # -- Options for HTML help output ---------------------------------------------
161 # ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-html-help-output
162
163 # Output file base name for HTML help builder.
164 htmlhelp_basename = 'ZeroToJupyterhubDoc'
165
166
167 # -- Options for LaTeX output ------------------------------------------------
168 # ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-latex-output
169
170 latex_elements = {
171 # The paper size ('letterpaper' or 'a4paper').
172 #
173 # 'papersize': 'letterpaper',
174
175 # The font size ('10pt', '11pt' or '12pt').
176 #
177 # 'pointsize': '10pt',
178
179 # Additional stuff for the LaTeX preamble.
180 #
181 # 'preamble': '',
182
183 # Latex figure (float) alignment
184 #
185 # 'figure_align': 'htbp',
186 }
187
188 # Grouping the document tree into LaTeX files.
189 latex_documents = [
190 (
191 master_doc, # source start file
192 'ZeroToJupyterhubDoc.tex', # target name
193 'Zero to JupyterHub', # title
194 author, # author
195 'manual' # documentclass [howto, manual, or own class]
196 ),
197 ]
198
199
200 # -- Options for manual page output ------------------------------------------
201 # ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-manual-page-output
202
203 # One entry per manual page.
204 man_pages = [
205 (
206 master_doc, # source start file
207 'zerotojupyterhub', # name
208 'Zero to JupyterHub', # description
209 [author], # authors
210 1, # manual section
211 ),
212 ]
213
214
215 # -- Options for Texinfo output ----------------------------------------------
216 # ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-texinfo-output
217
218 # Grouping the document tree into Texinfo files.
219 texinfo_documents = [
220 (
221 master_doc, # source start file
222 'ZeroToJupyterhubDoc', # target name
223 'Zero to JupyterHub', # title
224 author, # author
225 'ZeroToJupyterhubDoc', # dir menu entry
226 'One line description of project.', # description
227 'Miscellaneous' # category
228 ),
229 ]
230
231
232 # -- Options for epub output -------------------------------------------------
233 # ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-epub-output
234
235 # Bibliographic Dublin Core info.
236 epub_title = project
237 epub_author = author
238 epub_publisher = author
239 epub_copyright = copyright
240
241 # The unique identifier of the text. This can be a ISBN number
242 # or the project homepage.
243 #
244 # epub_identifier = ''
245
246 # A unique identification for the text.
247 #
248 # epub_uid = ''
249
250 # A list of files that should not be packed into the epub file.
251 epub_exclude_files = ['search.html']
252
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/doc/source/conf.py b/doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -77,9 +77,6 @@
# The suffix(es) of source filenames.
source_suffix = ['.rst', '.md']
-# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
-
# -- Generate the Helm chart configuration reference from a schema file ------
|
{"golden_diff": "diff --git a/doc/source/conf.py b/doc/source/conf.py\n--- a/doc/source/conf.py\n+++ b/doc/source/conf.py\n@@ -77,9 +77,6 @@\n # The suffix(es) of source filenames.\n source_suffix = ['.rst', '.md']\n \n-# Add any paths that contain templates here, relative to this directory.\n-templates_path = ['_templates']\n-\n \n # -- Generate the Helm chart configuration reference from a schema file ------\n", "issue": "CI: linkcheck of our docs fails\n### Bug description\r\nOur test fails [like this](https://travis-ci.org/github/jupyterhub/zero-to-jupyterhub-k8s/jobs/699287339), which is initialized as described in `.travis.yml` under `docs:linkcheck`.\r\n\r\n```\r\n/home/travis/build/jupyterhub/zero-to-jupyterhub-k8s/doc/source/customizing/user-environment.rst:4: WARNING: Duplicate explicit target name: \"jupyterlab\".\r\n```\r\n\r\n### How to reproduce\r\n\r\nI think something like this, but check `.travis.yml` for details on how it is done there.\r\n\r\n- Clone repo, enter the `doc/` folder.\r\n- Install doc-requirements.txt with pip.\r\n- `make linkcheck`\n", "before_files": [{"content": "\n# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\n\n# -- Project specific imports ------------------------------------------------\n\nfrom datetime import date\n\nimport yaml\n\n\n# -- Sphinx setup function ---------------------------------------------------\n# ref: http://www.sphinx-doc.org/en/latest/extdev/tutorial.html#the-setup-function\n\ndef setup(app):\n app.add_css_file('custom.css')\n\n\n# -- Project information -----------------------------------------------------\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#project-information\n\nproject = 'Zero to JupyterHub with Kubernetes'\ncopyright = '{year}, Project Jupyter Contributors'.format(year=date.today().year)\nauthor = 'Project Jupyter Contributors'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\nwith open('../../jupyterhub/Chart.yaml') as f:\n chart = yaml.safe_load(f)\nversion = chart['version'].split('-', 1)[0]\nrelease = chart['version']\n\n# Project specific variables\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#confval-rst_epilog\nrst_epilog = \"\"\"\n.. |hub_version| replace:: {v}\n\"\"\".format(v=chart['appVersion'])\n\n\n# -- General configuration ---------------------------------------------------\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#general-configuration\n\n# Set the default role so we can use `foo` instead of ``foo``\ndefault_role = 'literal'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.mathjax',\n 'sphinx_copybutton',\n 'myst_parser']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The suffix(es) of source filenames.\nsource_suffix = ['.rst', '.md']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n\n# -- Generate the Helm chart configuration reference from a schema file ------\n\n# header\nwith open('reference/reference.txt', 'r') as f:\n header_md = f.readlines()\nheader_md = header_md[1:]\nheader_md = [ln.strip('\\n') for ln in header_md]\n\n# schema\nwith open('../../jupyterhub/schema.yaml', 'r') as f:\n data = yaml.safe_load(f)\ndef parse_schema(d, md=[], depth=0, pre=''):\n \"\"\"\n Generate markdown headers from a passed python dictionary created by\n parsing a schema.yaml file.\n \"\"\"\n if 'properties' in d:\n depth += 1\n # Create markdown headers for each schema level\n for key, val in d['properties'].items():\n md.append('#'*(depth + 1) + ' ' + pre + key)\n md.append('')\n if 'description' in val:\n for ln in val['description'].split('\\n'):\n md.append(ln)\n md.append('')\n\n parse_schema(val, md, depth, pre+'{}.'.format(key))\n depth -= 1\n return md\nschema_md = parse_schema(data)\n\n# reference = header + schema\nreference_md = header_md + schema_md\nwith open('reference/reference.md', 'w') as f:\n f.write('\\n'.join(reference_md))\n\n\n# -- Options for linkcheck builder -------------------------------------------\n# ref: http://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-the-linkcheck-builder\nlinkcheck_ignore = [\n r'(.*)github\\.com(.*)#', # javascript based anchors\n r'(.*)/#%21(.*)/(.*)', # /#!forum/jupyter - encoded anchor edge case\n \"https://your-domain.com\", # example\n \"https://your-domain-name.com\", # example\n \"https://kubernetes.io/docs/tutorials/kubernetes-basics/\", # works\n \"https://cloud.ibm.com/kubernetes/catalog/create\", # works\n]\nlinkcheck_anchors_ignore = [\n \"/#!\",\n \"/#%21\",\n]\n\n\n# -- Options for HTML output -------------------------------------------------\n# ref: http://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-html-output\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'pydata_sphinx_theme'\nhtml_favicon = '_static/images/logo/favicon.ico'\nhtml_logo = '_static/images/logo/logo.png'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n\n\n\n# -- Below are options for more esoteric output -------------------------------\n# -----------------------------------------------------------------------------\n\n\n# -- Options for HTML help output ---------------------------------------------\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-html-help-output\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'ZeroToJupyterhubDoc'\n\n\n# -- Options for LaTeX output ------------------------------------------------\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-latex-output\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files.\nlatex_documents = [\n (\n master_doc, # source start file\n 'ZeroToJupyterhubDoc.tex', # target name\n 'Zero to JupyterHub', # title\n author, # author\n 'manual' # documentclass [howto, manual, or own class]\n ),\n]\n\n\n# -- Options for manual page output ------------------------------------------\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-manual-page-output\n\n# One entry per manual page.\nman_pages = [\n (\n master_doc, # source start file\n 'zerotojupyterhub', # name\n 'Zero to JupyterHub', # description\n [author], # authors\n 1, # manual section\n ),\n]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-texinfo-output\n\n# Grouping the document tree into Texinfo files.\ntexinfo_documents = [\n (\n master_doc, # source start file\n 'ZeroToJupyterhubDoc', # target name\n 'Zero to JupyterHub', # title\n author, # author\n 'ZeroToJupyterhubDoc', # dir menu entry\n 'One line description of project.', # description\n 'Miscellaneous' # category\n ),\n]\n\n\n# -- Options for epub output -------------------------------------------------\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-epub-output\n\n# Bibliographic Dublin Core info.\nepub_title = project\nepub_author = author\nepub_publisher = author\nepub_copyright = copyright\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#\n# epub_identifier = ''\n\n# A unique identification for the text.\n#\n# epub_uid = ''\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = ['search.html']\n", "path": "doc/source/conf.py"}], "after_files": [{"content": "\n# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\n\n# -- Project specific imports ------------------------------------------------\n\nfrom datetime import date\n\nimport yaml\n\n\n# -- Sphinx setup function ---------------------------------------------------\n# ref: http://www.sphinx-doc.org/en/latest/extdev/tutorial.html#the-setup-function\n\ndef setup(app):\n app.add_css_file('custom.css')\n\n\n# -- Project information -----------------------------------------------------\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#project-information\n\nproject = 'Zero to JupyterHub with Kubernetes'\ncopyright = '{year}, Project Jupyter Contributors'.format(year=date.today().year)\nauthor = 'Project Jupyter Contributors'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\nwith open('../../jupyterhub/Chart.yaml') as f:\n chart = yaml.safe_load(f)\nversion = chart['version'].split('-', 1)[0]\nrelease = chart['version']\n\n# Project specific variables\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#confval-rst_epilog\nrst_epilog = \"\"\"\n.. |hub_version| replace:: {v}\n\"\"\".format(v=chart['appVersion'])\n\n\n# -- General configuration ---------------------------------------------------\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#general-configuration\n\n# Set the default role so we can use `foo` instead of ``foo``\ndefault_role = 'literal'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.mathjax',\n 'sphinx_copybutton',\n 'myst_parser']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The suffix(es) of source filenames.\nsource_suffix = ['.rst', '.md']\n\n\n# -- Generate the Helm chart configuration reference from a schema file ------\n\n# header\nwith open('reference/reference.txt', 'r') as f:\n header_md = f.readlines()\nheader_md = header_md[1:]\nheader_md = [ln.strip('\\n') for ln in header_md]\n\n# schema\nwith open('../../jupyterhub/schema.yaml', 'r') as f:\n data = yaml.safe_load(f)\ndef parse_schema(d, md=[], depth=0, pre=''):\n \"\"\"\n Generate markdown headers from a passed python dictionary created by\n parsing a schema.yaml file.\n \"\"\"\n if 'properties' in d:\n depth += 1\n # Create markdown headers for each schema level\n for key, val in d['properties'].items():\n md.append('#'*(depth + 1) + ' ' + pre + key)\n md.append('')\n if 'description' in val:\n for ln in val['description'].split('\\n'):\n md.append(ln)\n md.append('')\n\n parse_schema(val, md, depth, pre+'{}.'.format(key))\n depth -= 1\n return md\nschema_md = parse_schema(data)\n\n# reference = header + schema\nreference_md = header_md + schema_md\nwith open('reference/reference.md', 'w') as f:\n f.write('\\n'.join(reference_md))\n\n\n# -- Options for linkcheck builder -------------------------------------------\n# ref: http://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-the-linkcheck-builder\nlinkcheck_ignore = [\n r'(.*)github\\.com(.*)#', # javascript based anchors\n r'(.*)/#%21(.*)/(.*)', # /#!forum/jupyter - encoded anchor edge case\n \"https://your-domain.com\", # example\n \"https://your-domain-name.com\", # example\n \"https://kubernetes.io/docs/tutorials/kubernetes-basics/\", # works\n \"https://cloud.ibm.com/kubernetes/catalog/create\", # works\n]\nlinkcheck_anchors_ignore = [\n \"/#!\",\n \"/#%21\",\n]\n\n\n# -- Options for HTML output -------------------------------------------------\n# ref: http://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-html-output\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'pydata_sphinx_theme'\nhtml_favicon = '_static/images/logo/favicon.ico'\nhtml_logo = '_static/images/logo/logo.png'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n\n\n\n# -- Below are options for more esoteric output -------------------------------\n# -----------------------------------------------------------------------------\n\n\n# -- Options for HTML help output ---------------------------------------------\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-html-help-output\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'ZeroToJupyterhubDoc'\n\n\n# -- Options for LaTeX output ------------------------------------------------\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-latex-output\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files.\nlatex_documents = [\n (\n master_doc, # source start file\n 'ZeroToJupyterhubDoc.tex', # target name\n 'Zero to JupyterHub', # title\n author, # author\n 'manual' # documentclass [howto, manual, or own class]\n ),\n]\n\n\n# -- Options for manual page output ------------------------------------------\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-manual-page-output\n\n# One entry per manual page.\nman_pages = [\n (\n master_doc, # source start file\n 'zerotojupyterhub', # name\n 'Zero to JupyterHub', # description\n [author], # authors\n 1, # manual section\n ),\n]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-texinfo-output\n\n# Grouping the document tree into Texinfo files.\ntexinfo_documents = [\n (\n master_doc, # source start file\n 'ZeroToJupyterhubDoc', # target name\n 'Zero to JupyterHub', # title\n author, # author\n 'ZeroToJupyterhubDoc', # dir menu entry\n 'One line description of project.', # description\n 'Miscellaneous' # category\n ),\n]\n\n\n# -- Options for epub output -------------------------------------------------\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-epub-output\n\n# Bibliographic Dublin Core info.\nepub_title = project\nepub_author = author\nepub_publisher = author\nepub_copyright = copyright\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#\n# epub_identifier = ''\n\n# A unique identification for the text.\n#\n# epub_uid = ''\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = ['search.html']\n", "path": "doc/source/conf.py"}]}
| 2,946 | 98 |
gh_patches_debug_6925
|
rasdani/github-patches
|
git_diff
|
mindsdb__mindsdb-2205
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Result of query with same column names with return different column names
Issue exists for any 'internal' tables. If query multiple columns with same name:
```
select 1 as "a", 1 as "a" from information_schema.tables limit 1;
```
then result will be:
```
+------+------+
| a | a_2 |
+------+------+
| 1 | 1 |
+------+------+
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mindsdb/api/mysql/mysql_proxy/utilities/sql.py`
Content:
```
1 import duckdb
2 import numpy as np
3 from mindsdb_sql import parse_sql
4 from mindsdb_sql.parser.ast import Select, Identifier, BinaryOperation, OrderBy
5 from mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender
6
7 from mindsdb.utilities.log import log
8
9
10 def _remove_table_name(root):
11 if isinstance(root, BinaryOperation):
12 _remove_table_name(root.args[0])
13 _remove_table_name(root.args[1])
14 elif isinstance(root, Identifier):
15 root.parts = [root.parts[-1]]
16
17
18 def query_df(df, query):
19 """ Perform simple query ('select' from one table, without subqueries and joins) on DataFrame.
20
21 Args:
22 df (pandas.DataFrame): data
23 query (mindsdb_sql.parser.ast.Select | str): select query
24
25 Returns:
26 pandas.DataFrame
27 """
28
29 if isinstance(query, str):
30 query_ast = parse_sql(query, dialect='mysql')
31 else:
32 query_ast = query
33
34 if isinstance(query_ast, Select) is False or isinstance(query_ast.from_table, Identifier) is False:
35 raise Exception("Only 'SELECT from TABLE' statements supported for internal query")
36
37 query_ast.from_table.parts = ['df_table']
38 for identifier in query_ast.targets:
39 if isinstance(identifier, Identifier):
40 identifier.parts = [identifier.parts[-1]]
41 if isinstance(query_ast.order_by, list):
42 for orderby in query_ast.order_by:
43 if isinstance(orderby, OrderBy) and isinstance(orderby.field, Identifier):
44 orderby.field.parts = [orderby.field.parts[-1]]
45 _remove_table_name(query_ast.where)
46
47 render = SqlalchemyRender('postgres')
48 try:
49 query_str = render.get_string(query_ast, with_failback=False)
50 except Exception as e:
51 log.error(f"Exception during query casting to 'postgres' dialect. Query: {str(query)}. Error: {e}")
52 query_str = render.get_string(query_ast, with_failback=True)
53
54 res = duckdb.query_df(df, 'df_table', query_str)
55 result_df = res.df()
56 result_df = result_df.replace({np.nan: None})
57 return result_df
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mindsdb/api/mysql/mysql_proxy/utilities/sql.py b/mindsdb/api/mysql/mysql_proxy/utilities/sql.py
--- a/mindsdb/api/mysql/mysql_proxy/utilities/sql.py
+++ b/mindsdb/api/mysql/mysql_proxy/utilities/sql.py
@@ -54,4 +54,13 @@
res = duckdb.query_df(df, 'df_table', query_str)
result_df = res.df()
result_df = result_df.replace({np.nan: None})
+
+ new_column_names = {}
+ real_column_names = [x[0] for x in res.description()]
+ for i, duck_column_name in enumerate(result_df.columns):
+ new_column_names[duck_column_name] = real_column_names[i]
+ result_df = result_df.rename(
+ new_column_names,
+ axis='columns'
+ )
return result_df
|
{"golden_diff": "diff --git a/mindsdb/api/mysql/mysql_proxy/utilities/sql.py b/mindsdb/api/mysql/mysql_proxy/utilities/sql.py\n--- a/mindsdb/api/mysql/mysql_proxy/utilities/sql.py\n+++ b/mindsdb/api/mysql/mysql_proxy/utilities/sql.py\n@@ -54,4 +54,13 @@\n res = duckdb.query_df(df, 'df_table', query_str)\n result_df = res.df()\n result_df = result_df.replace({np.nan: None})\n+\n+ new_column_names = {}\n+ real_column_names = [x[0] for x in res.description()]\n+ for i, duck_column_name in enumerate(result_df.columns):\n+ new_column_names[duck_column_name] = real_column_names[i]\n+ result_df = result_df.rename(\n+ new_column_names,\n+ axis='columns'\n+ )\n return result_df\n", "issue": "Result of query with same column names with return different column names\nIssue exists for any 'internal' tables. If query multiple columns with same name:\r\n```\r\nselect 1 as \"a\", 1 as \"a\" from information_schema.tables limit 1;\r\n```\r\nthen result will be:\r\n```\r\n+------+------+\r\n| a | a_2 |\r\n+------+------+\r\n| 1 | 1 |\r\n+------+------+\r\n```\n", "before_files": [{"content": "import duckdb\nimport numpy as np\nfrom mindsdb_sql import parse_sql\nfrom mindsdb_sql.parser.ast import Select, Identifier, BinaryOperation, OrderBy\nfrom mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender\n\nfrom mindsdb.utilities.log import log\n\n\ndef _remove_table_name(root):\n if isinstance(root, BinaryOperation):\n _remove_table_name(root.args[0])\n _remove_table_name(root.args[1])\n elif isinstance(root, Identifier):\n root.parts = [root.parts[-1]]\n\n\ndef query_df(df, query):\n \"\"\" Perform simple query ('select' from one table, without subqueries and joins) on DataFrame.\n\n Args:\n df (pandas.DataFrame): data\n query (mindsdb_sql.parser.ast.Select | str): select query\n\n Returns:\n pandas.DataFrame\n \"\"\"\n\n if isinstance(query, str):\n query_ast = parse_sql(query, dialect='mysql')\n else:\n query_ast = query\n\n if isinstance(query_ast, Select) is False or isinstance(query_ast.from_table, Identifier) is False:\n raise Exception(\"Only 'SELECT from TABLE' statements supported for internal query\")\n\n query_ast.from_table.parts = ['df_table']\n for identifier in query_ast.targets:\n if isinstance(identifier, Identifier):\n identifier.parts = [identifier.parts[-1]]\n if isinstance(query_ast.order_by, list):\n for orderby in query_ast.order_by:\n if isinstance(orderby, OrderBy) and isinstance(orderby.field, Identifier):\n orderby.field.parts = [orderby.field.parts[-1]]\n _remove_table_name(query_ast.where)\n\n render = SqlalchemyRender('postgres')\n try:\n query_str = render.get_string(query_ast, with_failback=False)\n except Exception as e:\n log.error(f\"Exception during query casting to 'postgres' dialect. Query: {str(query)}. Error: {e}\")\n query_str = render.get_string(query_ast, with_failback=True)\n\n res = duckdb.query_df(df, 'df_table', query_str)\n result_df = res.df()\n result_df = result_df.replace({np.nan: None})\n return result_df\n", "path": "mindsdb/api/mysql/mysql_proxy/utilities/sql.py"}], "after_files": [{"content": "import duckdb\nimport numpy as np\nfrom mindsdb_sql import parse_sql\nfrom mindsdb_sql.parser.ast import Select, Identifier, BinaryOperation, OrderBy\nfrom mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender\n\nfrom mindsdb.utilities.log import log\n\n\ndef _remove_table_name(root):\n if isinstance(root, BinaryOperation):\n _remove_table_name(root.args[0])\n _remove_table_name(root.args[1])\n elif isinstance(root, Identifier):\n root.parts = [root.parts[-1]]\n\n\ndef query_df(df, query):\n \"\"\" Perform simple query ('select' from one table, without subqueries and joins) on DataFrame.\n\n Args:\n df (pandas.DataFrame): data\n query (mindsdb_sql.parser.ast.Select | str): select query\n\n Returns:\n pandas.DataFrame\n \"\"\"\n\n if isinstance(query, str):\n query_ast = parse_sql(query, dialect='mysql')\n else:\n query_ast = query\n\n if isinstance(query_ast, Select) is False or isinstance(query_ast.from_table, Identifier) is False:\n raise Exception(\"Only 'SELECT from TABLE' statements supported for internal query\")\n\n query_ast.from_table.parts = ['df_table']\n for identifier in query_ast.targets:\n if isinstance(identifier, Identifier):\n identifier.parts = [identifier.parts[-1]]\n if isinstance(query_ast.order_by, list):\n for orderby in query_ast.order_by:\n if isinstance(orderby, OrderBy) and isinstance(orderby.field, Identifier):\n orderby.field.parts = [orderby.field.parts[-1]]\n _remove_table_name(query_ast.where)\n\n render = SqlalchemyRender('postgres')\n try:\n query_str = render.get_string(query_ast, with_failback=False)\n except Exception as e:\n log.error(f\"Exception during query casting to 'postgres' dialect. Query: {str(query)}. Error: {e}\")\n query_str = render.get_string(query_ast, with_failback=True)\n\n res = duckdb.query_df(df, 'df_table', query_str)\n result_df = res.df()\n result_df = result_df.replace({np.nan: None})\n\n new_column_names = {}\n real_column_names = [x[0] for x in res.description()]\n for i, duck_column_name in enumerate(result_df.columns):\n new_column_names[duck_column_name] = real_column_names[i]\n result_df = result_df.rename(\n new_column_names,\n axis='columns'\n )\n return result_df\n", "path": "mindsdb/api/mysql/mysql_proxy/utilities/sql.py"}]}
| 921 | 189 |
gh_patches_debug_53374
|
rasdani/github-patches
|
git_diff
|
chainer__chainer-7561
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support ellipsis in `Array::At` and `__getitem__`
Depends on #7559 because `py::ellipsis` is supported from v2.3.0.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainerx/_fallback_workarounds.py`
Content:
```
1 # This file defines workaround implementation for
2 # NumPy-compatibility functions that fall back to NumPy/CuPy functions
3 # for native/cuda devices respecitvely.
4 # The workaround does not support backprop, and also requires external
5 # libraries mentioned above.
6 # Functions defined in this file should be considered to have high priority for
7 # genuine implementations.
8 import numpy
9
10 import chainerx
11
12
13 try:
14 import cupy
15 except Exception:
16 cupy = None
17
18
19 class _DummyContext:
20 def __enter__(self):
21 pass
22
23 def __exit__(self, type, value, traceback):
24 pass
25
26
27 _dummy_context = _DummyContext()
28
29
30 def _to_numpy(array):
31 assert isinstance(array, chainerx.ndarray)
32 return chainerx.to_numpy(array, copy=False)
33
34
35 def _from_numpy(array):
36 assert isinstance(array, numpy.ndarray)
37 return chainerx.array(array, copy=False)
38
39
40 def _to_cupy(array):
41 assert cupy is not None
42 # Convert to cupy.ndarray on the same device as source array
43 return chainerx._to_cupy(array)
44
45
46 def _from_cupy(array):
47 assert cupy is not None
48 assert isinstance(array, cupy.ndarray)
49 device = chainerx.get_device('cuda', array.device.id)
50 return chainerx._core._fromrawpointer(
51 array.data.mem.ptr,
52 array.shape,
53 array.dtype,
54 array.strides,
55 device,
56 array.data.ptr - array.data.mem.ptr,
57 array)
58
59
60 def _from_chx(array, check_backprop=True):
61 # Converts chainerx.ndarray to numpy/cupy.ndarray.
62 # Objects with other types are kept intact.
63 # Returns a pair: (xp, cupy device or dummy context, numpy/cupy.ndarray).
64 if not isinstance(array, chainerx.ndarray):
65 if (isinstance(array, numpy.ndarray)
66 or (cupy and isinstance(array, cupy.ndarray))):
67 raise TypeError(
68 'ChainerX function fallback using NumPy/CuPy arrays '
69 'is not supported.')
70 # _from_chx is also called for slice and tuple objects
71 # Used to index a chx array
72 return None, _dummy_context, array
73 if check_backprop and array.is_backprop_required():
74 raise RuntimeError(
75 'ChainerX function fallback using NumPy/CuPy is not '
76 'supported for arrays that are connected to a graph.')
77 backend_name = array.device.backend.name
78 if backend_name == 'native':
79 return numpy, _dummy_context, _to_numpy(array)
80 if backend_name == 'cuda':
81 if cupy is None:
82 raise RuntimeError(
83 'ChainerX fallback implementation for cuda backend requires '
84 'cupy to be installed.')
85 array_cupy = _to_cupy(array)
86 return cupy, array_cupy.device, array_cupy
87 raise RuntimeError(
88 'ChainerX fallback implementation only supports native or cuda '
89 'backends.')
90
91
92 def _to_chx(array):
93 # Converts numpy/cupy.ndarray to chainerx.ndarray.
94 # Objects with other types are kept intact.
95 if isinstance(array, numpy.ndarray):
96 return _from_numpy(array)
97 elif cupy is not None and isinstance(array, cupy.ndarray):
98 return _from_cupy(array)
99 return array
100
101
102 def _populate_module_functions():
103
104 def _fix(arr):
105 xp, dev, arr = _from_chx(arr)
106 with dev:
107 ret = xp.fix(arr)
108 ret = xp.asarray(ret)
109 return _to_chx(ret)
110
111 chainerx.fix = _fix
112
113
114 def _populate_ndarray():
115 ndarray = chainerx.ndarray
116
117 # __getitem__ with advanced indexing
118 old_getitem = ndarray.__getitem__
119
120 def __getitem__(arr, key):
121 try:
122 return old_getitem(arr, key)
123 except (IndexError, chainerx.DimensionError):
124 pass
125
126 is_backprop_required = arr.is_backprop_required()
127
128 xp, dev, arr = _from_chx(arr, check_backprop=False)
129 # The elements used for indexing the array might be
130 # also ChainerX arrays. _from_chx ignores
131 # other types and return them as-is
132 if isinstance(key, tuple):
133 key = tuple([_from_chx(k, check_backprop=False)[2] for k in key])
134 else:
135 _, _, key = _from_chx(key, check_backprop=False)
136
137 with dev:
138 ret = arr[key]
139
140 # Doing this check after the fallback __getitem__ because the error
141 # which caused the fallback might not be due to advanced indexing.
142 # In such case the fallback __getitem__ should also raise the error.
143
144 if is_backprop_required:
145 raise RuntimeError(
146 'ChainerX getitem fallback for advanced indexing is not '
147 'supported for arrays that are connected to a graph.')
148
149 return _to_chx(ret)
150
151 # __setitem__ with advanced indexing
152 def __setitem__(self, key, value):
153 if self.is_backprop_required():
154 raise RuntimeError(
155 'ChainerX setitem fallback for advanced indexing is not '
156 'supported for arrays that are connected to a graph.')
157
158 xp, dev, self = _from_chx(self)
159 if isinstance(key, tuple):
160 key = tuple([_from_chx(k)[2] for k in key])
161 else:
162 _, _, key = _from_chx(key)
163 _, _, value = _from_chx(value)
164
165 with dev:
166 self[key] = value
167
168 ndarray.__setitem__ = __setitem__
169 ndarray.__getitem__ = __getitem__
170
171 def tolist(arr):
172 _, dev, arr = _from_chx(arr)
173 with dev:
174 ret = arr.tolist()
175 return ret
176
177 ndarray.tolist = tolist
178
179
180 def populate():
181 _populate_module_functions()
182 _populate_ndarray()
183
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/chainerx/_fallback_workarounds.py b/chainerx/_fallback_workarounds.py
--- a/chainerx/_fallback_workarounds.py
+++ b/chainerx/_fallback_workarounds.py
@@ -118,10 +118,8 @@
old_getitem = ndarray.__getitem__
def __getitem__(arr, key):
- try:
+ if not isinstance(key, chainerx.ndarray):
return old_getitem(arr, key)
- except (IndexError, chainerx.DimensionError):
- pass
is_backprop_required = arr.is_backprop_required()
|
{"golden_diff": "diff --git a/chainerx/_fallback_workarounds.py b/chainerx/_fallback_workarounds.py\n--- a/chainerx/_fallback_workarounds.py\n+++ b/chainerx/_fallback_workarounds.py\n@@ -118,10 +118,8 @@\n old_getitem = ndarray.__getitem__\n \n def __getitem__(arr, key):\n- try:\n+ if not isinstance(key, chainerx.ndarray):\n return old_getitem(arr, key)\n- except (IndexError, chainerx.DimensionError):\n- pass\n \n is_backprop_required = arr.is_backprop_required()\n", "issue": "Support ellipsis in `Array::At` and `__getitem__`\nDepends on #7559 because `py::ellipsis` is supported from v2.3.0.\n", "before_files": [{"content": "# This file defines workaround implementation for\n# NumPy-compatibility functions that fall back to NumPy/CuPy functions\n# for native/cuda devices respecitvely.\n# The workaround does not support backprop, and also requires external\n# libraries mentioned above.\n# Functions defined in this file should be considered to have high priority for\n# genuine implementations.\nimport numpy\n\nimport chainerx\n\n\ntry:\n import cupy\nexcept Exception:\n cupy = None\n\n\nclass _DummyContext:\n def __enter__(self):\n pass\n\n def __exit__(self, type, value, traceback):\n pass\n\n\n_dummy_context = _DummyContext()\n\n\ndef _to_numpy(array):\n assert isinstance(array, chainerx.ndarray)\n return chainerx.to_numpy(array, copy=False)\n\n\ndef _from_numpy(array):\n assert isinstance(array, numpy.ndarray)\n return chainerx.array(array, copy=False)\n\n\ndef _to_cupy(array):\n assert cupy is not None\n # Convert to cupy.ndarray on the same device as source array\n return chainerx._to_cupy(array)\n\n\ndef _from_cupy(array):\n assert cupy is not None\n assert isinstance(array, cupy.ndarray)\n device = chainerx.get_device('cuda', array.device.id)\n return chainerx._core._fromrawpointer(\n array.data.mem.ptr,\n array.shape,\n array.dtype,\n array.strides,\n device,\n array.data.ptr - array.data.mem.ptr,\n array)\n\n\ndef _from_chx(array, check_backprop=True):\n # Converts chainerx.ndarray to numpy/cupy.ndarray.\n # Objects with other types are kept intact.\n # Returns a pair: (xp, cupy device or dummy context, numpy/cupy.ndarray).\n if not isinstance(array, chainerx.ndarray):\n if (isinstance(array, numpy.ndarray)\n or (cupy and isinstance(array, cupy.ndarray))):\n raise TypeError(\n 'ChainerX function fallback using NumPy/CuPy arrays '\n 'is not supported.')\n # _from_chx is also called for slice and tuple objects\n # Used to index a chx array\n return None, _dummy_context, array\n if check_backprop and array.is_backprop_required():\n raise RuntimeError(\n 'ChainerX function fallback using NumPy/CuPy is not '\n 'supported for arrays that are connected to a graph.')\n backend_name = array.device.backend.name\n if backend_name == 'native':\n return numpy, _dummy_context, _to_numpy(array)\n if backend_name == 'cuda':\n if cupy is None:\n raise RuntimeError(\n 'ChainerX fallback implementation for cuda backend requires '\n 'cupy to be installed.')\n array_cupy = _to_cupy(array)\n return cupy, array_cupy.device, array_cupy\n raise RuntimeError(\n 'ChainerX fallback implementation only supports native or cuda '\n 'backends.')\n\n\ndef _to_chx(array):\n # Converts numpy/cupy.ndarray to chainerx.ndarray.\n # Objects with other types are kept intact.\n if isinstance(array, numpy.ndarray):\n return _from_numpy(array)\n elif cupy is not None and isinstance(array, cupy.ndarray):\n return _from_cupy(array)\n return array\n\n\ndef _populate_module_functions():\n\n def _fix(arr):\n xp, dev, arr = _from_chx(arr)\n with dev:\n ret = xp.fix(arr)\n ret = xp.asarray(ret)\n return _to_chx(ret)\n\n chainerx.fix = _fix\n\n\ndef _populate_ndarray():\n ndarray = chainerx.ndarray\n\n # __getitem__ with advanced indexing\n old_getitem = ndarray.__getitem__\n\n def __getitem__(arr, key):\n try:\n return old_getitem(arr, key)\n except (IndexError, chainerx.DimensionError):\n pass\n\n is_backprop_required = arr.is_backprop_required()\n\n xp, dev, arr = _from_chx(arr, check_backprop=False)\n # The elements used for indexing the array might be\n # also ChainerX arrays. _from_chx ignores\n # other types and return them as-is\n if isinstance(key, tuple):\n key = tuple([_from_chx(k, check_backprop=False)[2] for k in key])\n else:\n _, _, key = _from_chx(key, check_backprop=False)\n\n with dev:\n ret = arr[key]\n\n # Doing this check after the fallback __getitem__ because the error\n # which caused the fallback might not be due to advanced indexing.\n # In such case the fallback __getitem__ should also raise the error.\n\n if is_backprop_required:\n raise RuntimeError(\n 'ChainerX getitem fallback for advanced indexing is not '\n 'supported for arrays that are connected to a graph.')\n\n return _to_chx(ret)\n\n # __setitem__ with advanced indexing\n def __setitem__(self, key, value):\n if self.is_backprop_required():\n raise RuntimeError(\n 'ChainerX setitem fallback for advanced indexing is not '\n 'supported for arrays that are connected to a graph.')\n\n xp, dev, self = _from_chx(self)\n if isinstance(key, tuple):\n key = tuple([_from_chx(k)[2] for k in key])\n else:\n _, _, key = _from_chx(key)\n _, _, value = _from_chx(value)\n\n with dev:\n self[key] = value\n\n ndarray.__setitem__ = __setitem__\n ndarray.__getitem__ = __getitem__\n\n def tolist(arr):\n _, dev, arr = _from_chx(arr)\n with dev:\n ret = arr.tolist()\n return ret\n\n ndarray.tolist = tolist\n\n\ndef populate():\n _populate_module_functions()\n _populate_ndarray()\n", "path": "chainerx/_fallback_workarounds.py"}], "after_files": [{"content": "# This file defines workaround implementation for\n# NumPy-compatibility functions that fall back to NumPy/CuPy functions\n# for native/cuda devices respecitvely.\n# The workaround does not support backprop, and also requires external\n# libraries mentioned above.\n# Functions defined in this file should be considered to have high priority for\n# genuine implementations.\nimport numpy\n\nimport chainerx\n\n\ntry:\n import cupy\nexcept Exception:\n cupy = None\n\n\nclass _DummyContext:\n def __enter__(self):\n pass\n\n def __exit__(self, type, value, traceback):\n pass\n\n\n_dummy_context = _DummyContext()\n\n\ndef _to_numpy(array):\n assert isinstance(array, chainerx.ndarray)\n return chainerx.to_numpy(array, copy=False)\n\n\ndef _from_numpy(array):\n assert isinstance(array, numpy.ndarray)\n return chainerx.array(array, copy=False)\n\n\ndef _to_cupy(array):\n assert cupy is not None\n # Convert to cupy.ndarray on the same device as source array\n return chainerx._to_cupy(array)\n\n\ndef _from_cupy(array):\n assert cupy is not None\n assert isinstance(array, cupy.ndarray)\n device = chainerx.get_device('cuda', array.device.id)\n return chainerx._core._fromrawpointer(\n array.data.mem.ptr,\n array.shape,\n array.dtype,\n array.strides,\n device,\n array.data.ptr - array.data.mem.ptr,\n array)\n\n\ndef _from_chx(array, check_backprop=True):\n # Converts chainerx.ndarray to numpy/cupy.ndarray.\n # Objects with other types are kept intact.\n # Returns a pair: (xp, cupy device or dummy context, numpy/cupy.ndarray).\n if not isinstance(array, chainerx.ndarray):\n if (isinstance(array, numpy.ndarray)\n or (cupy and isinstance(array, cupy.ndarray))):\n raise TypeError(\n 'ChainerX function fallback using NumPy/CuPy arrays '\n 'is not supported.')\n # _from_chx is also called for slice and tuple objects\n # Used to index a chx array\n return None, _dummy_context, array\n if check_backprop and array.is_backprop_required():\n raise RuntimeError(\n 'ChainerX function fallback using NumPy/CuPy is not '\n 'supported for arrays that are connected to a graph.')\n backend_name = array.device.backend.name\n if backend_name == 'native':\n return numpy, _dummy_context, _to_numpy(array)\n if backend_name == 'cuda':\n if cupy is None:\n raise RuntimeError(\n 'ChainerX fallback implementation for cuda backend requires '\n 'cupy to be installed.')\n array_cupy = _to_cupy(array)\n return cupy, array_cupy.device, array_cupy\n raise RuntimeError(\n 'ChainerX fallback implementation only supports native or cuda '\n 'backends.')\n\n\ndef _to_chx(array):\n # Converts numpy/cupy.ndarray to chainerx.ndarray.\n # Objects with other types are kept intact.\n if isinstance(array, numpy.ndarray):\n return _from_numpy(array)\n elif cupy is not None and isinstance(array, cupy.ndarray):\n return _from_cupy(array)\n return array\n\n\ndef _populate_module_functions():\n\n def _fix(arr):\n xp, dev, arr = _from_chx(arr)\n with dev:\n ret = xp.fix(arr)\n ret = xp.asarray(ret)\n return _to_chx(ret)\n\n chainerx.fix = _fix\n\n\ndef _populate_ndarray():\n ndarray = chainerx.ndarray\n\n # __getitem__ with advanced indexing\n old_getitem = ndarray.__getitem__\n\n def __getitem__(arr, key):\n if not isinstance(key, chainerx.ndarray):\n return old_getitem(arr, key)\n\n is_backprop_required = arr.is_backprop_required()\n\n xp, dev, arr = _from_chx(arr, check_backprop=False)\n # The elements used for indexing the array might be\n # also ChainerX arrays. _from_chx ignores\n # other types and return them as-is\n if isinstance(key, tuple):\n key = tuple([_from_chx(k, check_backprop=False)[2] for k in key])\n else:\n _, _, key = _from_chx(key, check_backprop=False)\n\n with dev:\n ret = arr[key]\n\n # Doing this check after the fallback __getitem__ because the error\n # which caused the fallback might not be due to advanced indexing.\n # In such case the fallback __getitem__ should also raise the error.\n\n if is_backprop_required:\n raise RuntimeError(\n 'ChainerX getitem fallback for advanced indexing is not '\n 'supported for arrays that are connected to a graph.')\n\n return _to_chx(ret)\n\n # __setitem__ with advanced indexing\n def __setitem__(self, key, value):\n if self.is_backprop_required():\n raise RuntimeError(\n 'ChainerX setitem fallback for advanced indexing is not '\n 'supported for arrays that are connected to a graph.')\n\n xp, dev, self = _from_chx(self)\n if isinstance(key, tuple):\n key = tuple([_from_chx(k)[2] for k in key])\n else:\n _, _, key = _from_chx(key)\n _, _, value = _from_chx(value)\n\n with dev:\n self[key] = value\n\n ndarray.__setitem__ = __setitem__\n ndarray.__getitem__ = __getitem__\n\n def tolist(arr):\n _, dev, arr = _from_chx(arr)\n with dev:\n ret = arr.tolist()\n return ret\n\n ndarray.tolist = tolist\n\n\ndef populate():\n _populate_module_functions()\n _populate_ndarray()\n", "path": "chainerx/_fallback_workarounds.py"}]}
| 2,035 | 136 |
gh_patches_debug_39133
|
rasdani/github-patches
|
git_diff
|
e-valuation__EvaP-1110
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix results view for courses where results can't be published
The results page for a course where the results can't be published because it didn't get enough votes is not displayed correctly:
- Contributors can't see any non-text answers (that's correct) - but not all contributor cards are collapsed accordingly. If at least one vote exists, the respective contributor card is open, the warning text is not shown and no useful information is shown inside the card (see screenshot 1). The card should be closed in this case.
- Staff users initially see the "public" view and can change the view to `Myself`. On results pages of courses that didn't get enough votes, the `Public` button is missing (that was by design, because a public view does not exist for these courses) - now it's confusing because the initial view is something that's not accessible via the buttons.
Screenshot 1 (Contributor):

Screenshot 2 (Staff):

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `evap/results/views.py`
Content:
```
1 from collections import OrderedDict, namedtuple
2
3 from django.core.exceptions import PermissionDenied
4 from django.shortcuts import get_object_or_404, render
5 from django.contrib.auth.decorators import login_required
6
7 from evap.evaluation.models import Semester, Degree, Contribution
8 from evap.evaluation.auth import internal_required
9 from evap.results.tools import calculate_results, calculate_average_grades_and_deviation, TextResult, RatingResult, HeadingResult, COMMENT_STATES_REQUIRED_FOR_VISIBILITY
10
11
12 @internal_required
13 def index(request):
14 semesters = Semester.get_all_with_published_courses()
15
16 return render(request, "results_index.html", dict(semesters=semesters))
17
18
19 @internal_required
20 def semester_detail(request, semester_id):
21 semester = get_object_or_404(Semester, id=semester_id)
22 if request.user.is_reviewer:
23 courses = list(semester.course_set.filter(state__in=["in_evaluation", "evaluated", "reviewed", "published"]).prefetch_related("degrees"))
24 else:
25 courses = list(semester.course_set.filter(state="published").prefetch_related("degrees"))
26
27 courses = [course for course in courses if course.can_user_see_course(request.user)]
28
29 # Annotate each course object with its grades.
30 for course in courses:
31 course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)
32
33 CourseTuple = namedtuple('CourseTuple', ('courses', 'single_results'))
34
35 courses_by_degree = OrderedDict()
36 for degree in Degree.objects.all():
37 courses_by_degree[degree] = CourseTuple([], [])
38 for course in courses:
39 if course.is_single_result:
40 for degree in course.degrees.all():
41 section = calculate_results(course)[0]
42 result = section.results[0]
43 courses_by_degree[degree].single_results.append((course, result))
44 else:
45 for degree in course.degrees.all():
46 courses_by_degree[degree].courses.append(course)
47
48 template_data = dict(semester=semester, courses_by_degree=courses_by_degree)
49 return render(request, "results_semester_detail.html", template_data)
50
51
52 @login_required
53 def course_detail(request, semester_id, course_id):
54 semester = get_object_or_404(Semester, id=semester_id)
55 course = get_object_or_404(semester.course_set, id=course_id, semester=semester)
56
57 if not course.can_user_see_results(request.user):
58 raise PermissionDenied
59
60 sections = calculate_results(course)
61
62 if request.user.is_reviewer:
63 public_view = request.GET.get('public_view') != 'false' # if parameter is not given, show public view.
64 else:
65 public_view = request.GET.get('public_view') == 'true' # if parameter is not given, show own view.
66
67 represented_users = list(request.user.represented_users.all())
68 represented_users.append(request.user)
69
70 # filter text answers
71 for section in sections:
72 results = []
73 for result in section.results:
74 if isinstance(result, TextResult):
75 answers = [answer for answer in result.answers if user_can_see_text_answer(request.user, represented_users, answer, public_view)]
76 if answers:
77 results.append(TextResult(question=result.question, answers=answers))
78 else:
79 results.append(result)
80 section.results[:] = results
81
82 # filter empty headings
83 for section in sections:
84 filtered_results = []
85 for index in range(len(section.results)):
86 result = section.results[index]
87 # filter out if there are no more questions or the next question is also a heading question
88 if isinstance(result, HeadingResult):
89 if index == len(section.results) - 1 or isinstance(section.results[index + 1], HeadingResult):
90 continue
91 filtered_results.append(result)
92 section.results[:] = filtered_results
93
94 # remove empty sections
95 sections = [section for section in sections if section.results]
96
97 # group by contributor
98 course_sections = []
99 contributor_sections = OrderedDict()
100 for section in sections:
101 if not section.results:
102 continue
103 if section.contributor is None:
104 course_sections.append(section)
105 else:
106 contributor_sections.setdefault(section.contributor,
107 {'total_votes': 0, 'sections': []})['sections'].append(section)
108
109 # Sum up all Sections for this contributor.
110 # If section is not a RatingResult:
111 # Add 1 as we assume it is a TextResult or something similar that should be displayed.
112 contributor_sections[section.contributor]['total_votes'] +=\
113 sum([s.total_count if isinstance(s, RatingResult) else 1 for s in section.results])
114
115 # Show a warning if course is still in evaluation (for reviewer preview).
116 evaluation_warning = course.state != 'published'
117
118 # Results for a course might not be visible because there are not enough answers
119 # but it can still be "published" e.g. to show the comment results to contributors.
120 # Users who can open the results page see a warning message in this case.
121 sufficient_votes_warning = not course.can_publish_grades
122
123 show_grades = request.user.is_reviewer or course.can_publish_grades
124
125 course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)
126
127 template_data = dict(
128 course=course,
129 course_sections=course_sections,
130 contributor_sections=contributor_sections,
131 evaluation_warning=evaluation_warning,
132 sufficient_votes_warning=sufficient_votes_warning,
133 show_grades=show_grades,
134 reviewer=request.user.is_reviewer,
135 contributor=course.is_user_contributor_or_delegate(request.user),
136 can_download_grades=request.user.can_download_grades,
137 public_view=public_view)
138 return render(request, "results_course_detail.html", template_data)
139
140
141 def user_can_see_text_answer(user, represented_users, text_answer, public_view=False):
142 if public_view:
143 return False
144 if text_answer.state not in COMMENT_STATES_REQUIRED_FOR_VISIBILITY:
145 return False
146 if user.is_reviewer:
147 return True
148
149 contributor = text_answer.contribution.contributor
150
151 if text_answer.is_private:
152 return contributor == user
153
154 if text_answer.is_published:
155 if text_answer.contribution.responsible:
156 return contributor == user or user in contributor.delegates.all()
157
158 if contributor in represented_users:
159 return True
160 if text_answer.contribution.course.contributions.filter(
161 contributor__in=represented_users, comment_visibility=Contribution.ALL_COMMENTS).exists():
162 return True
163 if text_answer.contribution.is_general and text_answer.contribution.course.contributions.filter(
164 contributor__in=represented_users, comment_visibility=Contribution.COURSE_COMMENTS).exists():
165 return True
166
167 return False
168
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/evap/results/views.py b/evap/results/views.py
--- a/evap/results/views.py
+++ b/evap/results/views.py
@@ -6,7 +6,8 @@
from evap.evaluation.models import Semester, Degree, Contribution
from evap.evaluation.auth import internal_required
-from evap.results.tools import calculate_results, calculate_average_grades_and_deviation, TextResult, RatingResult, HeadingResult, COMMENT_STATES_REQUIRED_FOR_VISIBILITY
+from evap.results.tools import calculate_results, calculate_average_grades_and_deviation, TextResult, RatingResult, \
+ HeadingResult, COMMENT_STATES_REQUIRED_FOR_VISIBILITY, YesNoResult
@internal_required
@@ -64,9 +65,15 @@
else:
public_view = request.GET.get('public_view') == 'true' # if parameter is not given, show own view.
+ # If grades are not published, there is no public view
+ if not course.can_publish_grades:
+ public_view = False
+
represented_users = list(request.user.represented_users.all())
represented_users.append(request.user)
+ show_grades = request.user.is_reviewer or course.can_publish_grades
+
# filter text answers
for section in sections:
results = []
@@ -106,11 +113,13 @@
contributor_sections.setdefault(section.contributor,
{'total_votes': 0, 'sections': []})['sections'].append(section)
- # Sum up all Sections for this contributor.
- # If section is not a RatingResult:
- # Add 1 as we assume it is a TextResult or something similar that should be displayed.
- contributor_sections[section.contributor]['total_votes'] +=\
- sum([s.total_count if isinstance(s, RatingResult) else 1 for s in section.results])
+ for result in section.results:
+ if isinstance(result, TextResult):
+ contributor_sections[section.contributor]['total_votes'] += 1
+ elif isinstance(result, RatingResult) or isinstance(result, YesNoResult):
+ # Only count rating results if we show the grades.
+ if show_grades:
+ contributor_sections[section.contributor]['total_votes'] += result.total_count
# Show a warning if course is still in evaluation (for reviewer preview).
evaluation_warning = course.state != 'published'
@@ -120,8 +129,6 @@
# Users who can open the results page see a warning message in this case.
sufficient_votes_warning = not course.can_publish_grades
- show_grades = request.user.is_reviewer or course.can_publish_grades
-
course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)
template_data = dict(
|
{"golden_diff": "diff --git a/evap/results/views.py b/evap/results/views.py\n--- a/evap/results/views.py\n+++ b/evap/results/views.py\n@@ -6,7 +6,8 @@\n \n from evap.evaluation.models import Semester, Degree, Contribution\n from evap.evaluation.auth import internal_required\n-from evap.results.tools import calculate_results, calculate_average_grades_and_deviation, TextResult, RatingResult, HeadingResult, COMMENT_STATES_REQUIRED_FOR_VISIBILITY\n+from evap.results.tools import calculate_results, calculate_average_grades_and_deviation, TextResult, RatingResult, \\\n+ HeadingResult, COMMENT_STATES_REQUIRED_FOR_VISIBILITY, YesNoResult\n \n \n @internal_required\n@@ -64,9 +65,15 @@\n else:\n public_view = request.GET.get('public_view') == 'true' # if parameter is not given, show own view.\n \n+ # If grades are not published, there is no public view\n+ if not course.can_publish_grades:\n+ public_view = False\n+\n represented_users = list(request.user.represented_users.all())\n represented_users.append(request.user)\n \n+ show_grades = request.user.is_reviewer or course.can_publish_grades\n+\n # filter text answers\n for section in sections:\n results = []\n@@ -106,11 +113,13 @@\n contributor_sections.setdefault(section.contributor,\n {'total_votes': 0, 'sections': []})['sections'].append(section)\n \n- # Sum up all Sections for this contributor.\n- # If section is not a RatingResult:\n- # Add 1 as we assume it is a TextResult or something similar that should be displayed.\n- contributor_sections[section.contributor]['total_votes'] +=\\\n- sum([s.total_count if isinstance(s, RatingResult) else 1 for s in section.results])\n+ for result in section.results:\n+ if isinstance(result, TextResult):\n+ contributor_sections[section.contributor]['total_votes'] += 1\n+ elif isinstance(result, RatingResult) or isinstance(result, YesNoResult):\n+ # Only count rating results if we show the grades.\n+ if show_grades:\n+ contributor_sections[section.contributor]['total_votes'] += result.total_count\n \n # Show a warning if course is still in evaluation (for reviewer preview).\n evaluation_warning = course.state != 'published'\n@@ -120,8 +129,6 @@\n # Users who can open the results page see a warning message in this case.\n sufficient_votes_warning = not course.can_publish_grades\n \n- show_grades = request.user.is_reviewer or course.can_publish_grades\n-\n course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)\n \n template_data = dict(\n", "issue": "Fix results view for courses where results can't be published\nThe results page for a course where the results can't be published because it didn't get enough votes is not displayed correctly:\r\n- Contributors can't see any non-text answers (that's correct) - but not all contributor cards are collapsed accordingly. If at least one vote exists, the respective contributor card is open, the warning text is not shown and no useful information is shown inside the card (see screenshot 1). The card should be closed in this case.\r\n- Staff users initially see the \"public\" view and can change the view to `Myself`. On results pages of courses that didn't get enough votes, the `Public` button is missing (that was by design, because a public view does not exist for these courses) - now it's confusing because the initial view is something that's not accessible via the buttons.\r\n\r\nScreenshot 1 (Contributor):\r\n\r\n\r\nScreenshot 2 (Staff):\r\n\r\n\n", "before_files": [{"content": "from collections import OrderedDict, namedtuple\n\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404, render\nfrom django.contrib.auth.decorators import login_required\n\nfrom evap.evaluation.models import Semester, Degree, Contribution\nfrom evap.evaluation.auth import internal_required\nfrom evap.results.tools import calculate_results, calculate_average_grades_and_deviation, TextResult, RatingResult, HeadingResult, COMMENT_STATES_REQUIRED_FOR_VISIBILITY\n\n\n@internal_required\ndef index(request):\n semesters = Semester.get_all_with_published_courses()\n\n return render(request, \"results_index.html\", dict(semesters=semesters))\n\n\n@internal_required\ndef semester_detail(request, semester_id):\n semester = get_object_or_404(Semester, id=semester_id)\n if request.user.is_reviewer:\n courses = list(semester.course_set.filter(state__in=[\"in_evaluation\", \"evaluated\", \"reviewed\", \"published\"]).prefetch_related(\"degrees\"))\n else:\n courses = list(semester.course_set.filter(state=\"published\").prefetch_related(\"degrees\"))\n\n courses = [course for course in courses if course.can_user_see_course(request.user)]\n\n # Annotate each course object with its grades.\n for course in courses:\n course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)\n\n CourseTuple = namedtuple('CourseTuple', ('courses', 'single_results'))\n\n courses_by_degree = OrderedDict()\n for degree in Degree.objects.all():\n courses_by_degree[degree] = CourseTuple([], [])\n for course in courses:\n if course.is_single_result:\n for degree in course.degrees.all():\n section = calculate_results(course)[0]\n result = section.results[0]\n courses_by_degree[degree].single_results.append((course, result))\n else:\n for degree in course.degrees.all():\n courses_by_degree[degree].courses.append(course)\n\n template_data = dict(semester=semester, courses_by_degree=courses_by_degree)\n return render(request, \"results_semester_detail.html\", template_data)\n\n\n@login_required\ndef course_detail(request, semester_id, course_id):\n semester = get_object_or_404(Semester, id=semester_id)\n course = get_object_or_404(semester.course_set, id=course_id, semester=semester)\n\n if not course.can_user_see_results(request.user):\n raise PermissionDenied\n\n sections = calculate_results(course)\n\n if request.user.is_reviewer:\n public_view = request.GET.get('public_view') != 'false' # if parameter is not given, show public view.\n else:\n public_view = request.GET.get('public_view') == 'true' # if parameter is not given, show own view.\n\n represented_users = list(request.user.represented_users.all())\n represented_users.append(request.user)\n\n # filter text answers\n for section in sections:\n results = []\n for result in section.results:\n if isinstance(result, TextResult):\n answers = [answer for answer in result.answers if user_can_see_text_answer(request.user, represented_users, answer, public_view)]\n if answers:\n results.append(TextResult(question=result.question, answers=answers))\n else:\n results.append(result)\n section.results[:] = results\n\n # filter empty headings\n for section in sections:\n filtered_results = []\n for index in range(len(section.results)):\n result = section.results[index]\n # filter out if there are no more questions or the next question is also a heading question\n if isinstance(result, HeadingResult):\n if index == len(section.results) - 1 or isinstance(section.results[index + 1], HeadingResult):\n continue\n filtered_results.append(result)\n section.results[:] = filtered_results\n\n # remove empty sections\n sections = [section for section in sections if section.results]\n\n # group by contributor\n course_sections = []\n contributor_sections = OrderedDict()\n for section in sections:\n if not section.results:\n continue\n if section.contributor is None:\n course_sections.append(section)\n else:\n contributor_sections.setdefault(section.contributor,\n {'total_votes': 0, 'sections': []})['sections'].append(section)\n\n # Sum up all Sections for this contributor.\n # If section is not a RatingResult:\n # Add 1 as we assume it is a TextResult or something similar that should be displayed.\n contributor_sections[section.contributor]['total_votes'] +=\\\n sum([s.total_count if isinstance(s, RatingResult) else 1 for s in section.results])\n\n # Show a warning if course is still in evaluation (for reviewer preview).\n evaluation_warning = course.state != 'published'\n\n # Results for a course might not be visible because there are not enough answers\n # but it can still be \"published\" e.g. to show the comment results to contributors.\n # Users who can open the results page see a warning message in this case.\n sufficient_votes_warning = not course.can_publish_grades\n\n show_grades = request.user.is_reviewer or course.can_publish_grades\n\n course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)\n\n template_data = dict(\n course=course,\n course_sections=course_sections,\n contributor_sections=contributor_sections,\n evaluation_warning=evaluation_warning,\n sufficient_votes_warning=sufficient_votes_warning,\n show_grades=show_grades,\n reviewer=request.user.is_reviewer,\n contributor=course.is_user_contributor_or_delegate(request.user),\n can_download_grades=request.user.can_download_grades,\n public_view=public_view)\n return render(request, \"results_course_detail.html\", template_data)\n\n\ndef user_can_see_text_answer(user, represented_users, text_answer, public_view=False):\n if public_view:\n return False\n if text_answer.state not in COMMENT_STATES_REQUIRED_FOR_VISIBILITY:\n return False\n if user.is_reviewer:\n return True\n\n contributor = text_answer.contribution.contributor\n\n if text_answer.is_private:\n return contributor == user\n\n if text_answer.is_published:\n if text_answer.contribution.responsible:\n return contributor == user or user in contributor.delegates.all()\n\n if contributor in represented_users:\n return True\n if text_answer.contribution.course.contributions.filter(\n contributor__in=represented_users, comment_visibility=Contribution.ALL_COMMENTS).exists():\n return True\n if text_answer.contribution.is_general and text_answer.contribution.course.contributions.filter(\n contributor__in=represented_users, comment_visibility=Contribution.COURSE_COMMENTS).exists():\n return True\n\n return False\n", "path": "evap/results/views.py"}], "after_files": [{"content": "from collections import OrderedDict, namedtuple\n\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404, render\nfrom django.contrib.auth.decorators import login_required\n\nfrom evap.evaluation.models import Semester, Degree, Contribution\nfrom evap.evaluation.auth import internal_required\nfrom evap.results.tools import calculate_results, calculate_average_grades_and_deviation, TextResult, RatingResult, \\\n HeadingResult, COMMENT_STATES_REQUIRED_FOR_VISIBILITY, YesNoResult\n\n\n@internal_required\ndef index(request):\n semesters = Semester.get_all_with_published_courses()\n\n return render(request, \"results_index.html\", dict(semesters=semesters))\n\n\n@internal_required\ndef semester_detail(request, semester_id):\n semester = get_object_or_404(Semester, id=semester_id)\n if request.user.is_reviewer:\n courses = list(semester.course_set.filter(state__in=[\"in_evaluation\", \"evaluated\", \"reviewed\", \"published\"]).prefetch_related(\"degrees\"))\n else:\n courses = list(semester.course_set.filter(state=\"published\").prefetch_related(\"degrees\"))\n\n courses = [course for course in courses if course.can_user_see_course(request.user)]\n\n # Annotate each course object with its grades.\n for course in courses:\n course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)\n\n CourseTuple = namedtuple('CourseTuple', ('courses', 'single_results'))\n\n courses_by_degree = OrderedDict()\n for degree in Degree.objects.all():\n courses_by_degree[degree] = CourseTuple([], [])\n for course in courses:\n if course.is_single_result:\n for degree in course.degrees.all():\n section = calculate_results(course)[0]\n result = section.results[0]\n courses_by_degree[degree].single_results.append((course, result))\n else:\n for degree in course.degrees.all():\n courses_by_degree[degree].courses.append(course)\n\n template_data = dict(semester=semester, courses_by_degree=courses_by_degree)\n return render(request, \"results_semester_detail.html\", template_data)\n\n\n@login_required\ndef course_detail(request, semester_id, course_id):\n semester = get_object_or_404(Semester, id=semester_id)\n course = get_object_or_404(semester.course_set, id=course_id, semester=semester)\n\n if not course.can_user_see_results(request.user):\n raise PermissionDenied\n\n sections = calculate_results(course)\n\n if request.user.is_reviewer:\n public_view = request.GET.get('public_view') != 'false' # if parameter is not given, show public view.\n else:\n public_view = request.GET.get('public_view') == 'true' # if parameter is not given, show own view.\n\n # If grades are not published, there is no public view\n if not course.can_publish_grades:\n public_view = False\n\n represented_users = list(request.user.represented_users.all())\n represented_users.append(request.user)\n\n show_grades = request.user.is_reviewer or course.can_publish_grades\n\n # filter text answers\n for section in sections:\n results = []\n for result in section.results:\n if isinstance(result, TextResult):\n answers = [answer for answer in result.answers if user_can_see_text_answer(request.user, represented_users, answer, public_view)]\n if answers:\n results.append(TextResult(question=result.question, answers=answers))\n else:\n results.append(result)\n section.results[:] = results\n\n # filter empty headings\n for section in sections:\n filtered_results = []\n for index in range(len(section.results)):\n result = section.results[index]\n # filter out if there are no more questions or the next question is also a heading question\n if isinstance(result, HeadingResult):\n if index == len(section.results) - 1 or isinstance(section.results[index + 1], HeadingResult):\n continue\n filtered_results.append(result)\n section.results[:] = filtered_results\n\n # remove empty sections\n sections = [section for section in sections if section.results]\n\n # group by contributor\n course_sections = []\n contributor_sections = OrderedDict()\n for section in sections:\n if not section.results:\n continue\n if section.contributor is None:\n course_sections.append(section)\n else:\n contributor_sections.setdefault(section.contributor,\n {'total_votes': 0, 'sections': []})['sections'].append(section)\n\n for result in section.results:\n if isinstance(result, TextResult):\n contributor_sections[section.contributor]['total_votes'] += 1\n elif isinstance(result, RatingResult) or isinstance(result, YesNoResult):\n # Only count rating results if we show the grades.\n if show_grades:\n contributor_sections[section.contributor]['total_votes'] += result.total_count\n\n # Show a warning if course is still in evaluation (for reviewer preview).\n evaluation_warning = course.state != 'published'\n\n # Results for a course might not be visible because there are not enough answers\n # but it can still be \"published\" e.g. to show the comment results to contributors.\n # Users who can open the results page see a warning message in this case.\n sufficient_votes_warning = not course.can_publish_grades\n\n course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)\n\n template_data = dict(\n course=course,\n course_sections=course_sections,\n contributor_sections=contributor_sections,\n evaluation_warning=evaluation_warning,\n sufficient_votes_warning=sufficient_votes_warning,\n show_grades=show_grades,\n reviewer=request.user.is_reviewer,\n contributor=course.is_user_contributor_or_delegate(request.user),\n can_download_grades=request.user.can_download_grades,\n public_view=public_view)\n return render(request, \"results_course_detail.html\", template_data)\n\n\ndef user_can_see_text_answer(user, represented_users, text_answer, public_view=False):\n if public_view:\n return False\n if text_answer.state not in COMMENT_STATES_REQUIRED_FOR_VISIBILITY:\n return False\n if user.is_reviewer:\n return True\n\n contributor = text_answer.contribution.contributor\n\n if text_answer.is_private:\n return contributor == user\n\n if text_answer.is_published:\n if text_answer.contribution.responsible:\n return contributor == user or user in contributor.delegates.all()\n\n if contributor in represented_users:\n return True\n if text_answer.contribution.course.contributions.filter(\n contributor__in=represented_users, comment_visibility=Contribution.ALL_COMMENTS).exists():\n return True\n if text_answer.contribution.is_general and text_answer.contribution.course.contributions.filter(\n contributor__in=represented_users, comment_visibility=Contribution.COURSE_COMMENTS).exists():\n return True\n\n return False\n", "path": "evap/results/views.py"}]}
| 2,402 | 605 |
gh_patches_debug_1190
|
rasdani/github-patches
|
git_diff
|
boto__botocore-1117
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support Python 3.6
Python 3.6 got released, and some distro (like Fedora) are swithcing to it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 import botocore
3 import sys
4
5 from setuptools import setup, find_packages
6
7
8 requires = ['jmespath>=0.7.1,<1.0.0',
9 'python-dateutil>=2.1,<3.0.0',
10 'docutils>=0.10']
11
12
13 if sys.version_info[:2] == (2, 6):
14 # For python2.6 we have a few other dependencies.
15 # First we need an ordered dictionary so we use the
16 # 2.6 backport.
17 requires.append('ordereddict==1.1')
18 # Then we need simplejson. This is because we need
19 # a json version that allows us to specify we want to
20 # use an ordereddict instead of a normal dict for the
21 # JSON objects. The 2.7 json module has this. For 2.6
22 # we need simplejson.
23 requires.append('simplejson==3.3.0')
24
25
26 setup(
27 name='botocore',
28 version=botocore.__version__,
29 description='Low-level, data-driven core of boto 3.',
30 long_description=open('README.rst').read(),
31 author='Amazon Web Services',
32 url='https://github.com/boto/botocore',
33 scripts=[],
34 packages=find_packages(exclude=['tests*']),
35 package_data={'botocore': ['data/*.json', 'data/*/*.json'],
36 'botocore.vendored.requests': ['*.pem']},
37 include_package_data=True,
38 install_requires=requires,
39 extras_require={
40 ':python_version=="2.6"': [
41 'ordereddict==1.1',
42 'simplejson==3.3.0',
43 ]
44 },
45 license="Apache License 2.0",
46 classifiers=(
47 'Development Status :: 5 - Production/Stable',
48 'Intended Audience :: Developers',
49 'Intended Audience :: System Administrators',
50 'Natural Language :: English',
51 'License :: OSI Approved :: Apache Software License',
52 'Programming Language :: Python',
53 'Programming Language :: Python :: 2',
54 'Programming Language :: Python :: 2.6',
55 'Programming Language :: Python :: 2.7',
56 'Programming Language :: Python :: 3',
57 'Programming Language :: Python :: 3.3',
58 'Programming Language :: Python :: 3.4',
59 'Programming Language :: Python :: 3.5',
60 ),
61 )
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -57,5 +57,6 @@
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
+ 'Programming Language :: Python :: 3.6',
),
)
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -57,5 +57,6 @@\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n+ 'Programming Language :: Python :: 3.6',\n ),\n )\n", "issue": "Support Python 3.6\nPython 3.6 got released, and some distro (like Fedora) are swithcing to it.\n", "before_files": [{"content": "#!/usr/bin/env python\nimport botocore\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nrequires = ['jmespath>=0.7.1,<1.0.0',\n 'python-dateutil>=2.1,<3.0.0',\n 'docutils>=0.10']\n\n\nif sys.version_info[:2] == (2, 6):\n # For python2.6 we have a few other dependencies.\n # First we need an ordered dictionary so we use the\n # 2.6 backport.\n requires.append('ordereddict==1.1')\n # Then we need simplejson. This is because we need\n # a json version that allows us to specify we want to\n # use an ordereddict instead of a normal dict for the\n # JSON objects. The 2.7 json module has this. For 2.6\n # we need simplejson.\n requires.append('simplejson==3.3.0')\n\n\nsetup(\n name='botocore',\n version=botocore.__version__,\n description='Low-level, data-driven core of boto 3.',\n long_description=open('README.rst').read(),\n author='Amazon Web Services',\n url='https://github.com/boto/botocore',\n scripts=[],\n packages=find_packages(exclude=['tests*']),\n package_data={'botocore': ['data/*.json', 'data/*/*.json'],\n 'botocore.vendored.requests': ['*.pem']},\n include_package_data=True,\n install_requires=requires,\n extras_require={\n ':python_version==\"2.6\"': [\n 'ordereddict==1.1',\n 'simplejson==3.3.0',\n ]\n },\n license=\"Apache License 2.0\",\n classifiers=(\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n ),\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport botocore\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nrequires = ['jmespath>=0.7.1,<1.0.0',\n 'python-dateutil>=2.1,<3.0.0',\n 'docutils>=0.10']\n\n\nif sys.version_info[:2] == (2, 6):\n # For python2.6 we have a few other dependencies.\n # First we need an ordered dictionary so we use the\n # 2.6 backport.\n requires.append('ordereddict==1.1')\n # Then we need simplejson. This is because we need\n # a json version that allows us to specify we want to\n # use an ordereddict instead of a normal dict for the\n # JSON objects. The 2.7 json module has this. For 2.6\n # we need simplejson.\n requires.append('simplejson==3.3.0')\n\n\nsetup(\n name='botocore',\n version=botocore.__version__,\n description='Low-level, data-driven core of boto 3.',\n long_description=open('README.rst').read(),\n author='Amazon Web Services',\n url='https://github.com/boto/botocore',\n scripts=[],\n packages=find_packages(exclude=['tests*']),\n package_data={'botocore': ['data/*.json', 'data/*/*.json'],\n 'botocore.vendored.requests': ['*.pem']},\n include_package_data=True,\n install_requires=requires,\n extras_require={\n ':python_version==\"2.6\"': [\n 'ordereddict==1.1',\n 'simplejson==3.3.0',\n ]\n },\n license=\"Apache License 2.0\",\n classifiers=(\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ),\n)\n", "path": "setup.py"}]}
| 937 | 84 |
gh_patches_debug_8073
|
rasdani/github-patches
|
git_diff
|
ivy-llc__ivy-26775
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ifft2
ifft2
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py`
Content:
```
1 import ivy
2 from ivy.functional.frontends.numpy.func_wrapper import to_ivy_arrays_and_back
3 from ivy.func_wrapper import with_unsupported_dtypes
4
5
6 # --- Helpers --- #
7 # --------------- #
8
9
10 def _swap_direction(norm):
11 try:
12 return _SWAP_DIRECTION_MAP[norm]
13 except KeyError:
14 raise ValueError(
15 f'Invalid norm value {norm}; should be "backward", "ortho" or "forward".'
16 ) from None
17
18
19 # --- Main --- #
20 # ------------ #
21
22
23 @to_ivy_arrays_and_back
24 def fft(a, n=None, axis=-1, norm=None):
25 return ivy.fft(ivy.astype(a, ivy.complex128), axis, norm=norm, n=n)
26
27
28 @with_unsupported_dtypes({"1.26.0 and below": ("int",)}, "numpy")
29 @to_ivy_arrays_and_back
30 def fftfreq(n, d=1.0):
31 if not isinstance(
32 n, (int, type(ivy.int8), type(ivy.int16), type(ivy.int32), type(ivy.int64))
33 ):
34 raise ValueError("n should be an integer")
35
36 N = (n - 1) // 2 + 1
37 val = 1.0 / (n * d)
38 results = ivy.empty(tuple([n]), dtype=int)
39
40 p1 = ivy.arange(0, N, dtype=int)
41 results[:N] = p1
42 p2 = ivy.arange(-(n // 2), 0, dtype=int)
43 results[N:] = p2
44
45 return results * val
46
47
48 @to_ivy_arrays_and_back
49 @with_unsupported_dtypes({"1.26.0 and below": ("float16",)}, "numpy")
50 def fftshift(x, axes=None):
51 x = ivy.asarray(x)
52
53 if axes is None:
54 axes = tuple(range(x.ndim))
55 shift = [(dim // 2) for dim in x.shape]
56 elif isinstance(
57 axes,
58 (int, type(ivy.uint8), type(ivy.uint16), type(ivy.uint32), type(ivy.uint64)),
59 ):
60 shift = x.shape[axes] // 2
61 else:
62 shift = [(x.shape[ax] // 2) for ax in axes]
63
64 roll = ivy.roll(x, shift, axis=axes)
65
66 return roll
67
68
69 @to_ivy_arrays_and_back
70 def ifft(a, n=None, axis=-1, norm=None):
71 a = ivy.array(a, dtype=ivy.complex128)
72 if norm is None:
73 norm = "backward"
74 return ivy.ifft(a, axis, norm=norm, n=n)
75
76
77 @with_unsupported_dtypes({"1.24.3 and below": ("float16",)}, "numpy")
78 @to_ivy_arrays_and_back
79 def ifftn(a, s=None, axes=None, norm=None):
80 a = ivy.asarray(a, dtype=ivy.complex128)
81 a = ivy.ifftn(a, s=s, axes=axes, norm=norm)
82 return a
83
84
85 @to_ivy_arrays_and_back
86 @with_unsupported_dtypes({"1.26.0 and below": ("float16",)}, "numpy")
87 def ifftshift(x, axes=None):
88 x = ivy.asarray(x)
89
90 if axes is None:
91 axes = tuple(range(x.ndim))
92 shift = [-(dim // 2) for dim in x.shape]
93 elif isinstance(
94 axes,
95 (int, type(ivy.uint8), type(ivy.uint16), type(ivy.uint32), type(ivy.uint64)),
96 ):
97 shift = -(x.shape[axes] // 2)
98 else:
99 shift = [-(x.shape[ax] // 2) for ax in axes]
100
101 roll = ivy.roll(x, shift, axis=axes)
102
103 return roll
104
105
106 @with_unsupported_dtypes({"1.26.0 and below": ("float16",)}, "numpy")
107 @to_ivy_arrays_and_back
108 def ihfft(a, n=None, axis=-1, norm=None):
109 if n is None:
110 n = a.shape[axis]
111 norm = _swap_direction(norm)
112 output = ivy.conj(rfft(a, n, axis, norm=norm).ivy_array)
113 return output
114
115
116 @with_unsupported_dtypes({"1.26.0 and below": ("float16",)}, "numpy")
117 @to_ivy_arrays_and_back
118 def rfft(a, n=None, axis=-1, norm=None):
119 if norm is None:
120 norm = "backward"
121 a = ivy.array(a, dtype=ivy.float64)
122 return ivy.dft(a, axis=axis, inverse=False, onesided=True, dft_length=n, norm=norm)
123
124
125 @to_ivy_arrays_and_back
126 def rfftfreq(n, d=1.0):
127 if not isinstance(
128 n, (int, type(ivy.int8), type(ivy.int16), type(ivy.int32), type(ivy.int64))
129 ):
130 raise ValueError("n should be an integer")
131
132 val = 1.0 / (n * d)
133 N = n // 2 + 1
134 results = ivy.arange(0, N, dtype=int)
135 return results * val
136
137
138 @with_unsupported_dtypes({"1.24.3 and below": ("float16",)}, "numpy")
139 @to_ivy_arrays_and_back
140 def rfftn(a, s=None, axes=None, norm=None):
141 a = ivy.asarray(a, dtype=ivy.complex128)
142 return ivy.rfftn(a, s=s, axes=axes, norm=norm)
143
144
145 _SWAP_DIRECTION_MAP = {
146 None: "forward",
147 "backward": "forward",
148 "ortho": "ortho",
149 "forward": "backward",
150 }
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py b/ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py
--- a/ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py
+++ b/ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py
@@ -74,6 +74,14 @@
return ivy.ifft(a, axis, norm=norm, n=n)
+@with_unsupported_dtypes({"1.24.3 and below": ("float16",)}, "numpy")
+@to_ivy_arrays_and_back
+def ifft2(a, s=None, axes=(-2, -1), norm=None):
+ a = ivy.asarray(a, dtype=ivy.complex128)
+ a = ivy.ifftn(a, s=s, axes=axes, norm=norm)
+ return a
+
+
@with_unsupported_dtypes({"1.24.3 and below": ("float16",)}, "numpy")
@to_ivy_arrays_and_back
def ifftn(a, s=None, axes=None, norm=None):
|
{"golden_diff": "diff --git a/ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py b/ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py\n--- a/ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py\n+++ b/ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py\n@@ -74,6 +74,14 @@\n return ivy.ifft(a, axis, norm=norm, n=n)\n \n \n+@with_unsupported_dtypes({\"1.24.3 and below\": (\"float16\",)}, \"numpy\")\n+@to_ivy_arrays_and_back\n+def ifft2(a, s=None, axes=(-2, -1), norm=None):\n+ a = ivy.asarray(a, dtype=ivy.complex128)\n+ a = ivy.ifftn(a, s=s, axes=axes, norm=norm)\n+ return a\n+\n+\n @with_unsupported_dtypes({\"1.24.3 and below\": (\"float16\",)}, \"numpy\")\n @to_ivy_arrays_and_back\n def ifftn(a, s=None, axes=None, norm=None):\n", "issue": "ifft2\n\nifft2\n\n", "before_files": [{"content": "import ivy\nfrom ivy.functional.frontends.numpy.func_wrapper import to_ivy_arrays_and_back\nfrom ivy.func_wrapper import with_unsupported_dtypes\n\n\n# --- Helpers --- #\n# --------------- #\n\n\ndef _swap_direction(norm):\n try:\n return _SWAP_DIRECTION_MAP[norm]\n except KeyError:\n raise ValueError(\n f'Invalid norm value {norm}; should be \"backward\", \"ortho\" or \"forward\".'\n ) from None\n\n\n# --- Main --- #\n# ------------ #\n\n\n@to_ivy_arrays_and_back\ndef fft(a, n=None, axis=-1, norm=None):\n return ivy.fft(ivy.astype(a, ivy.complex128), axis, norm=norm, n=n)\n\n\n@with_unsupported_dtypes({\"1.26.0 and below\": (\"int\",)}, \"numpy\")\n@to_ivy_arrays_and_back\ndef fftfreq(n, d=1.0):\n if not isinstance(\n n, (int, type(ivy.int8), type(ivy.int16), type(ivy.int32), type(ivy.int64))\n ):\n raise ValueError(\"n should be an integer\")\n\n N = (n - 1) // 2 + 1\n val = 1.0 / (n * d)\n results = ivy.empty(tuple([n]), dtype=int)\n\n p1 = ivy.arange(0, N, dtype=int)\n results[:N] = p1\n p2 = ivy.arange(-(n // 2), 0, dtype=int)\n results[N:] = p2\n\n return results * val\n\n\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes({\"1.26.0 and below\": (\"float16\",)}, \"numpy\")\ndef fftshift(x, axes=None):\n x = ivy.asarray(x)\n\n if axes is None:\n axes = tuple(range(x.ndim))\n shift = [(dim // 2) for dim in x.shape]\n elif isinstance(\n axes,\n (int, type(ivy.uint8), type(ivy.uint16), type(ivy.uint32), type(ivy.uint64)),\n ):\n shift = x.shape[axes] // 2\n else:\n shift = [(x.shape[ax] // 2) for ax in axes]\n\n roll = ivy.roll(x, shift, axis=axes)\n\n return roll\n\n\n@to_ivy_arrays_and_back\ndef ifft(a, n=None, axis=-1, norm=None):\n a = ivy.array(a, dtype=ivy.complex128)\n if norm is None:\n norm = \"backward\"\n return ivy.ifft(a, axis, norm=norm, n=n)\n\n\n@with_unsupported_dtypes({\"1.24.3 and below\": (\"float16\",)}, \"numpy\")\n@to_ivy_arrays_and_back\ndef ifftn(a, s=None, axes=None, norm=None):\n a = ivy.asarray(a, dtype=ivy.complex128)\n a = ivy.ifftn(a, s=s, axes=axes, norm=norm)\n return a\n\n\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes({\"1.26.0 and below\": (\"float16\",)}, \"numpy\")\ndef ifftshift(x, axes=None):\n x = ivy.asarray(x)\n\n if axes is None:\n axes = tuple(range(x.ndim))\n shift = [-(dim // 2) for dim in x.shape]\n elif isinstance(\n axes,\n (int, type(ivy.uint8), type(ivy.uint16), type(ivy.uint32), type(ivy.uint64)),\n ):\n shift = -(x.shape[axes] // 2)\n else:\n shift = [-(x.shape[ax] // 2) for ax in axes]\n\n roll = ivy.roll(x, shift, axis=axes)\n\n return roll\n\n\n@with_unsupported_dtypes({\"1.26.0 and below\": (\"float16\",)}, \"numpy\")\n@to_ivy_arrays_and_back\ndef ihfft(a, n=None, axis=-1, norm=None):\n if n is None:\n n = a.shape[axis]\n norm = _swap_direction(norm)\n output = ivy.conj(rfft(a, n, axis, norm=norm).ivy_array)\n return output\n\n\n@with_unsupported_dtypes({\"1.26.0 and below\": (\"float16\",)}, \"numpy\")\n@to_ivy_arrays_and_back\ndef rfft(a, n=None, axis=-1, norm=None):\n if norm is None:\n norm = \"backward\"\n a = ivy.array(a, dtype=ivy.float64)\n return ivy.dft(a, axis=axis, inverse=False, onesided=True, dft_length=n, norm=norm)\n\n\n@to_ivy_arrays_and_back\ndef rfftfreq(n, d=1.0):\n if not isinstance(\n n, (int, type(ivy.int8), type(ivy.int16), type(ivy.int32), type(ivy.int64))\n ):\n raise ValueError(\"n should be an integer\")\n\n val = 1.0 / (n * d)\n N = n // 2 + 1\n results = ivy.arange(0, N, dtype=int)\n return results * val\n\n\n@with_unsupported_dtypes({\"1.24.3 and below\": (\"float16\",)}, \"numpy\")\n@to_ivy_arrays_and_back\ndef rfftn(a, s=None, axes=None, norm=None):\n a = ivy.asarray(a, dtype=ivy.complex128)\n return ivy.rfftn(a, s=s, axes=axes, norm=norm)\n\n\n_SWAP_DIRECTION_MAP = {\n None: \"forward\",\n \"backward\": \"forward\",\n \"ortho\": \"ortho\",\n \"forward\": \"backward\",\n}\n", "path": "ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py"}], "after_files": [{"content": "import ivy\nfrom ivy.functional.frontends.numpy.func_wrapper import to_ivy_arrays_and_back\nfrom ivy.func_wrapper import with_unsupported_dtypes\n\n\n# --- Helpers --- #\n# --------------- #\n\n\ndef _swap_direction(norm):\n try:\n return _SWAP_DIRECTION_MAP[norm]\n except KeyError:\n raise ValueError(\n f'Invalid norm value {norm}; should be \"backward\", \"ortho\" or \"forward\".'\n ) from None\n\n\n# --- Main --- #\n# ------------ #\n\n\n@to_ivy_arrays_and_back\ndef fft(a, n=None, axis=-1, norm=None):\n return ivy.fft(ivy.astype(a, ivy.complex128), axis, norm=norm, n=n)\n\n\n@with_unsupported_dtypes({\"1.26.0 and below\": (\"int\",)}, \"numpy\")\n@to_ivy_arrays_and_back\ndef fftfreq(n, d=1.0):\n if not isinstance(\n n, (int, type(ivy.int8), type(ivy.int16), type(ivy.int32), type(ivy.int64))\n ):\n raise ValueError(\"n should be an integer\")\n\n N = (n - 1) // 2 + 1\n val = 1.0 / (n * d)\n results = ivy.empty(tuple([n]), dtype=int)\n\n p1 = ivy.arange(0, N, dtype=int)\n results[:N] = p1\n p2 = ivy.arange(-(n // 2), 0, dtype=int)\n results[N:] = p2\n\n return results * val\n\n\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes({\"1.26.0 and below\": (\"float16\",)}, \"numpy\")\ndef fftshift(x, axes=None):\n x = ivy.asarray(x)\n\n if axes is None:\n axes = tuple(range(x.ndim))\n shift = [(dim // 2) for dim in x.shape]\n elif isinstance(\n axes,\n (int, type(ivy.uint8), type(ivy.uint16), type(ivy.uint32), type(ivy.uint64)),\n ):\n shift = x.shape[axes] // 2\n else:\n shift = [(x.shape[ax] // 2) for ax in axes]\n\n roll = ivy.roll(x, shift, axis=axes)\n\n return roll\n\n\n@to_ivy_arrays_and_back\ndef ifft(a, n=None, axis=-1, norm=None):\n a = ivy.array(a, dtype=ivy.complex128)\n if norm is None:\n norm = \"backward\"\n return ivy.ifft(a, axis, norm=norm, n=n)\n\n\n@with_unsupported_dtypes({\"1.24.3 and below\": (\"float16\",)}, \"numpy\")\n@to_ivy_arrays_and_back\ndef ifft2(a, s=None, axes=(-2, -1), norm=None):\n a = ivy.asarray(a, dtype=ivy.complex128)\n a = ivy.ifftn(a, s=s, axes=axes, norm=norm)\n return a\n\n\n@with_unsupported_dtypes({\"1.24.3 and below\": (\"float16\",)}, \"numpy\")\n@to_ivy_arrays_and_back\ndef ifftn(a, s=None, axes=None, norm=None):\n a = ivy.asarray(a, dtype=ivy.complex128)\n a = ivy.ifftn(a, s=s, axes=axes, norm=norm)\n return a\n\n\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes({\"1.26.0 and below\": (\"float16\",)}, \"numpy\")\ndef ifftshift(x, axes=None):\n x = ivy.asarray(x)\n\n if axes is None:\n axes = tuple(range(x.ndim))\n shift = [-(dim // 2) for dim in x.shape]\n elif isinstance(\n axes,\n (int, type(ivy.uint8), type(ivy.uint16), type(ivy.uint32), type(ivy.uint64)),\n ):\n shift = -(x.shape[axes] // 2)\n else:\n shift = [-(x.shape[ax] // 2) for ax in axes]\n\n roll = ivy.roll(x, shift, axis=axes)\n\n return roll\n\n\n@with_unsupported_dtypes({\"1.26.0 and below\": (\"float16\",)}, \"numpy\")\n@to_ivy_arrays_and_back\ndef ihfft(a, n=None, axis=-1, norm=None):\n if n is None:\n n = a.shape[axis]\n norm = _swap_direction(norm)\n output = ivy.conj(rfft(a, n, axis, norm=norm).ivy_array)\n return output\n\n\n@with_unsupported_dtypes({\"1.26.0 and below\": (\"float16\",)}, \"numpy\")\n@to_ivy_arrays_and_back\ndef rfft(a, n=None, axis=-1, norm=None):\n if norm is None:\n norm = \"backward\"\n a = ivy.array(a, dtype=ivy.float64)\n return ivy.dft(a, axis=axis, inverse=False, onesided=True, dft_length=n, norm=norm)\n\n\n@to_ivy_arrays_and_back\ndef rfftfreq(n, d=1.0):\n if not isinstance(\n n, (int, type(ivy.int8), type(ivy.int16), type(ivy.int32), type(ivy.int64))\n ):\n raise ValueError(\"n should be an integer\")\n\n val = 1.0 / (n * d)\n N = n // 2 + 1\n results = ivy.arange(0, N, dtype=int)\n return results * val\n\n\n@with_unsupported_dtypes({\"1.24.3 and below\": (\"float16\",)}, \"numpy\")\n@to_ivy_arrays_and_back\ndef rfftn(a, s=None, axes=None, norm=None):\n a = ivy.asarray(a, dtype=ivy.complex128)\n return ivy.rfftn(a, s=s, axes=axes, norm=norm)\n\n\n_SWAP_DIRECTION_MAP = {\n None: \"forward\",\n \"backward\": \"forward\",\n \"ortho\": \"ortho\",\n \"forward\": \"backward\",\n}\n", "path": "ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py"}]}
| 1,938 | 257 |
gh_patches_debug_16715
|
rasdani/github-patches
|
git_diff
|
OpenEnergyPlatform__oeplatform-1196
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update API documentation on readthedocs?
The documentation on readthedocs does not provide an indication on whether it is up to date and when it was last updated, the copyright states 2016 which may lead users to think it is outdated.

Thus I think it would be useful to add such an indicator as the main OEP page links prominently and directly to the API documentation on readthedocs: https://oep-data-interface.readthedocs.io/en/latest/api/how_to.html
As a follow-up I wonder:
* Does this API documentation - and possibly also the rest on readthedocs - need an update? How does its API part relate to the API tutorials? Do we replicate content and effort?
cc: @stap-m @wingechr
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/source/conf.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # OpenEnergyPlatform documentation build configuration file, created by
4 # sphinx-quickstart on Fri Aug 12 20:13:24 2016.
5 #
6 # This file is execfile()d with the current directory set to its
7 # containing dir.
8 #
9 # Note that not all possible configuration values are present in this
10 # autogenerated file.
11 #
12 # All configuration values have a default; values that are commented out
13 # serve to show the default.
14
15 import os
16 import sys
17 from unittest.mock import MagicMock
18
19 import django
20
21 # try:
22 # from oeplatform.oeplatform.securitysettings import SECRET_KEY
23 # except:
24 # import logging
25 # logging.error("No securitysettings found. Setting defaults")
26 # DEBUG = True
27 # SECRET_KEY = '0'
28
29
30 # If extensions (or modules to document with autodoc) are in another directory,
31 # add these directories to sys.path here. If the directory is relative to the
32 # documentation root, use os.path.abspath to make it absolute, like shown here.
33 # sys.path.insert(0, os.path.abspath('.'))
34
35 # -- General configuration ------------------------------------------------
36
37 sys.path.insert(0, os.path.abspath(os.path.join("..", "..")))
38 os.environ["DJANGO_SETTINGS_MODULE"] = "oeplatform.settings"
39
40 django.setup()
41
42 # If your documentation needs a minimal Sphinx version, state it here.
43 # needs_sphinx = '1.0'
44
45 # Add any Sphinx extension module names here, as strings. They can be
46 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
47 # ones.
48 extensions = [
49 "sphinx.ext.autodoc",
50 "sphinx.ext.doctest",
51 "sphinx.ext.intersphinx",
52 "sphinx.ext.todo",
53 "sphinx.ext.coverage",
54 "sphinx.ext.ifconfig",
55 "sphinx.ext.viewcode",
56 "sphinx.ext.napoleon",
57 ]
58
59 # Add any paths that contain templates here, relative to this directory.
60 templates_path = ["_templates"]
61
62 # The suffix(es) of source filenames.
63 # You can specify multiple suffix as a list of string:
64 # source_suffix = ['.rst', '.md']
65 source_suffix = ".rst"
66
67 # The encoding of source files.
68 # source_encoding = 'utf-8-sig'
69
70 # The master toctree document.
71 master_doc = "index"
72
73 # General information about the project.
74 project = "OpenEnergyPlatform"
75 copyright = "2016, open_eGo"
76 author = "open_eGo"
77
78 # The version info for the project you're documenting, acts as replacement for
79 # |version| and |release|, also used in various other places throughout the
80 # built documents.
81 #
82 # The short X.Y version.
83 version = "0.0.3"
84 # The full version, including alpha/beta/rc tags.
85 release = "0.0.3"
86
87 # The language for content autogenerated by Sphinx. Refer to documentation
88 # for a list of supported languages.
89 #
90 # This is also used if you do content translation via gettext catalogs.
91 # Usually you set "language" from the command line for these cases.
92 language = None
93
94 # There are two options for replacing |today|: either, you set today to some
95 # non-false value, then it is used:
96 # today = ''
97 # Else, today_fmt is used as the format for a strftime call.
98 # today_fmt = '%B %d, %Y'
99
100 # List of patterns, relative to source directory, that match files and
101 # directories to ignore when looking for source files.
102 exclude_patterns = []
103
104 # The reST default role (used for this markup: `text`) to use for all
105 # documents.
106 # default_role = None
107
108 # If true, '()' will be appended to :func: etc. cross-reference text.
109 # add_function_parentheses = True
110
111 # If true, the current module name will be prepended to all description
112 # unit titles (such as .. function::).
113 # add_module_names = True
114
115 # If true, sectionauthor and moduleauthor directives will be shown in the
116 # output. They are ignored by default.
117 # show_authors = False
118
119 # The name of the Pygments (syntax highlighting) style to use.
120 pygments_style = "sphinx"
121
122 # A list of ignored prefixes for module index sorting.
123 # modindex_common_prefix = []
124
125 # If true, keep warnings as "system message" paragraphs in the built documents.
126 # keep_warnings = False
127
128 # If true, `todo` and `todoList` produce output, else they produce nothing.
129 todo_include_todos = True
130
131
132 # -- Options for HTML output ----------------------------------------------
133
134 # The theme to use for HTML and HTML Help pages. See the documentation for
135 # a list of builtin themes.
136 html_theme = "alabaster"
137
138 # Theme options are theme-specific and customize the look and feel of a theme
139 # further. For a list of options available for each theme, see the
140 # documentation.
141 # html_theme_options = {}
142
143 # Add any paths that contain custom themes here, relative to this directory.
144 # html_theme_path = []
145
146 # The name for this set of Sphinx documents. If None, it defaults to
147 # "<project> v<release> documentation".
148 # html_title = None
149
150 # A shorter title for the navigation bar. Default is the same as html_title.
151 # html_short_title = None
152
153 # The name of an image file (relative to this directory) to place at the top
154 # of the sidebar.
155 # html_logo = None
156
157 # The name of an image file (within the static path) to use as favicon of the
158 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
159 # pixels large.
160 # html_favicon = None
161
162 # Add any paths that contain custom static files (such as style sheets) here,
163 # relative to this directory. They are copied after the builtin static files,
164 # so a file named "default.css" will overwrite the builtin "default.css".
165 html_static_path = ["_static"]
166
167 # Add any extra paths that contain custom files (such as robots.txt or
168 # .htaccess) here, relative to this directory. These files are copied
169 # directly to the root of the documentation.
170 # html_extra_path = []
171
172 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
173 # using the given strftime format.
174 # html_last_updated_fmt = '%b %d, %Y'
175
176 # If true, SmartyPants will be used to convert quotes and dashes to
177 # typographically correct entities.
178 # html_use_smartypants = True
179
180 # Custom sidebar templates, maps document names to template names.
181 # html_sidebars = {}
182
183 # Additional templates that should be rendered to pages, maps page names to
184 # template names.
185 # html_additional_pages = {}
186
187 # If false, no module index is generated.
188 # html_domain_indices = True
189
190 # If false, no index is generated.
191 # html_use_index = True
192
193 # If true, the index is split into individual pages for each letter.
194 # html_split_index = False
195
196 # If true, links to the reST sources are added to the pages.
197 # html_show_sourcelink = True
198
199 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
200 # html_show_sphinx = True
201
202 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
203 # html_show_copyright = True
204
205 # If true, an OpenSearch description file will be output, and all pages will
206 # contain a <link> tag referring to it. The value of this option must be the
207 # base URL from which the finished HTML is served.
208 # html_use_opensearch = ''
209
210 # This is the file name suffix for HTML files (e.g. ".xhtml").
211 # html_file_suffix = None
212
213 # Language to be used for generating the HTML full-text search index.
214 # Sphinx supports the following languages:
215 # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
216 # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
217 # html_search_language = 'en'
218
219 # A dictionary with options for the search language support, empty by default.
220 # Now only 'ja' uses this config value
221 # html_search_options = {'type': 'default'}
222
223 # The name of a javascript file (relative to the configuration directory) that
224 # implements a search results scorer. If empty, the default will be used.
225 # html_search_scorer = 'scorer.js'
226
227 # Output file base name for HTML help builder.
228 htmlhelp_basename = "OpenEnergyPlatformdoc"
229
230 # -- Options for LaTeX output ---------------------------------------------
231
232 latex_elements = {
233 # # The paper size ('letterpaper' or 'a4paper').
234 # 'papersize': 'letterpaper',
235 # # The font size ('10pt', '11pt' or '12pt').
236 # 'pointsize': '10pt',
237 # # Additional stuff for the LaTeX preamble.
238 # 'preamble': '',
239 # # Latex figure (float) alignment
240 # 'figure_align': 'htbp',
241 }
242
243 # Grouping the document tree into LaTeX files. List of tuples
244 # (source start file, target name, title,
245 # author, documentclass [howto, manual, or own class]).
246 latex_documents = [
247 (
248 master_doc,
249 "OpenEnergyPlatform.tex",
250 "OpenEnergyPlatform Documentation",
251 "open\\_eGo",
252 "manual",
253 )
254 ]
255
256 # The name of an image file (relative to this directory) to place at the top of
257 # the title page.
258 # latex_logo = None
259
260 # For "manual" documents, if this is true, then toplevel headings are parts,
261 # not chapters.
262 # latex_use_parts = False
263
264 # If true, show page references after internal links.
265 # latex_show_pagerefs = False
266
267 # If true, show URL addresses after external links.
268 # latex_show_urls = False
269
270 # Documents to append as an appendix to all manuals.
271 # latex_appendices = []
272
273 # If false, no module index is generated.
274 # latex_domain_indices = True
275
276
277 # -- Options for manual page output ---------------------------------------
278
279 # One entry per manual page. List of tuples
280 # (source start file, name, description, authors, manual section).
281 man_pages = [
282 (master_doc, "openenergyplatform", "OpenEnergyPlatform Documentation", [author], 1)
283 ]
284
285 # If true, show URL addresses after external links.
286 # man_show_urls = False
287
288
289 # -- Options for Texinfo output -------------------------------------------
290
291 # Grouping the document tree into Texinfo files. List of tuples
292 # (source start file, target name, title, author,
293 # dir menu entry, description, category)
294 texinfo_documents = [
295 (
296 master_doc,
297 "OpenEnergyPlatform",
298 "OpenEnergyPlatform Documentation",
299 author,
300 "OpenEnergyPlatform",
301 "One line description of project.",
302 "Miscellaneous",
303 )
304 ]
305
306 # Documents to append as an appendix to all manuals.
307 # texinfo_appendices = []
308
309 # If false, no module index is generated.
310 # texinfo_domain_indices = True
311
312 # How to display URL addresses: 'footnote', 'no', or 'inline'.
313 # texinfo_show_urls = 'footnote'
314
315 # If true, do not generate a @detailmenu in the "Top" node's menu.
316 # texinfo_no_detailmenu = False
317
318
319 # Example configuration for intersphinx: refer to the Python standard library.
320 intersphinx_mapping = {"https://docs.python.org/": None}
321
322
323 class Mock(MagicMock):
324 @classmethod
325 def __getattr__(cls, name):
326 if name == "_mock_methods":
327 return name._mock_methods
328 else:
329 return Mock()
330
331
332 MOCK_MODULES = ["shapely"]
333 sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
334
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/source/conf.py b/docs/source/conf.py
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -72,17 +72,17 @@
# General information about the project.
project = "OpenEnergyPlatform"
-copyright = "2016, open_eGo"
-author = "open_eGo"
+copyright = "2023, SIROP"
+author = "Open Energy Family"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
-version = "0.0.3"
+version = "0.0.4"
# The full version, including alpha/beta/rc tags.
-release = "0.0.3"
+release = "0.0.4"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
|
{"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -72,17 +72,17 @@\n \n # General information about the project.\n project = \"OpenEnergyPlatform\"\n-copyright = \"2016, open_eGo\"\n-author = \"open_eGo\"\n+copyright = \"2023, SIROP\"\n+author = \"Open Energy Family\"\n \n # The version info for the project you're documenting, acts as replacement for\n # |version| and |release|, also used in various other places throughout the\n # built documents.\n #\n # The short X.Y version.\n-version = \"0.0.3\"\n+version = \"0.0.4\"\n # The full version, including alpha/beta/rc tags.\n-release = \"0.0.3\"\n+release = \"0.0.4\"\n \n # The language for content autogenerated by Sphinx. Refer to documentation\n # for a list of supported languages.\n", "issue": "Update API documentation on readthedocs?\nThe documentation on readthedocs does not provide an indication on whether it is up to date and when it was last updated, the copyright states 2016 which may lead users to think it is outdated.\r\n \r\n\r\nThus I think it would be useful to add such an indicator as the main OEP page links prominently and directly to the API documentation on readthedocs: https://oep-data-interface.readthedocs.io/en/latest/api/how_to.html\r\n\r\nAs a follow-up I wonder: \r\n* Does this API documentation - and possibly also the rest on readthedocs - need an update? How does its API part relate to the API tutorials? Do we replicate content and effort? \r\n\r\ncc: @stap-m @wingechr \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# OpenEnergyPlatform documentation build configuration file, created by\n# sphinx-quickstart on Fri Aug 12 20:13:24 2016.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport os\nimport sys\nfrom unittest.mock import MagicMock\n\nimport django\n\n# try:\n# from oeplatform.oeplatform.securitysettings import SECRET_KEY\n# except:\n# import logging\n# logging.error(\"No securitysettings found. Setting defaults\")\n# DEBUG = True\n# SECRET_KEY = '0'\n\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n# sys.path.insert(0, os.path.abspath('.'))\n\n# -- General configuration ------------------------------------------------\n\nsys.path.insert(0, os.path.abspath(os.path.join(\"..\", \"..\")))\nos.environ[\"DJANGO_SETTINGS_MODULE\"] = \"oeplatform.settings\"\n\ndjango.setup()\n\n# If your documentation needs a minimal Sphinx version, state it here.\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.ifconfig\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.napoleon\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n# source_suffix = ['.rst', '.md']\nsource_suffix = \".rst\"\n\n# The encoding of source files.\n# source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"OpenEnergyPlatform\"\ncopyright = \"2016, open_eGo\"\nauthor = \"open_eGo\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = \"0.0.3\"\n# The full version, including alpha/beta/rc tags.\nrelease = \"0.0.3\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n# today = ''\n# Else, today_fmt is used as the format for a strftime call.\n# today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = []\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n# default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n# add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n# show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# A list of ignored prefixes for module index sorting.\n# modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n# keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"alabaster\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n# html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n# html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n# html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n# html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n# html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n# html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n# html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n# html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n# html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n# html_additional_pages = {}\n\n# If false, no module index is generated.\n# html_domain_indices = True\n\n# If false, no index is generated.\n# html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n# html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n# html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n# html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n# html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n# html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'\n# html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# Now only 'ja' uses this config value\n# html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n# html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"OpenEnergyPlatformdoc\"\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # # The paper size ('letterpaper' or 'a4paper').\n # 'papersize': 'letterpaper',\n # # The font size ('10pt', '11pt' or '12pt').\n # 'pointsize': '10pt',\n # # Additional stuff for the LaTeX preamble.\n # 'preamble': '',\n # # Latex figure (float) alignment\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (\n master_doc,\n \"OpenEnergyPlatform.tex\",\n \"OpenEnergyPlatform Documentation\",\n \"open\\\\_eGo\",\n \"manual\",\n )\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n# latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n# latex_use_parts = False\n\n# If true, show page references after internal links.\n# latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n# latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n# latex_appendices = []\n\n# If false, no module index is generated.\n# latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, \"openenergyplatform\", \"OpenEnergyPlatform Documentation\", [author], 1)\n]\n\n# If true, show URL addresses after external links.\n# man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"OpenEnergyPlatform\",\n \"OpenEnergyPlatform Documentation\",\n author,\n \"OpenEnergyPlatform\",\n \"One line description of project.\",\n \"Miscellaneous\",\n )\n]\n\n# Documents to append as an appendix to all manuals.\n# texinfo_appendices = []\n\n# If false, no module index is generated.\n# texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n# texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n# texinfo_no_detailmenu = False\n\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\"https://docs.python.org/\": None}\n\n\nclass Mock(MagicMock):\n @classmethod\n def __getattr__(cls, name):\n if name == \"_mock_methods\":\n return name._mock_methods\n else:\n return Mock()\n\n\nMOCK_MODULES = [\"shapely\"]\nsys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)\n", "path": "docs/source/conf.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# OpenEnergyPlatform documentation build configuration file, created by\n# sphinx-quickstart on Fri Aug 12 20:13:24 2016.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport os\nimport sys\nfrom unittest.mock import MagicMock\n\nimport django\n\n# try:\n# from oeplatform.oeplatform.securitysettings import SECRET_KEY\n# except:\n# import logging\n# logging.error(\"No securitysettings found. Setting defaults\")\n# DEBUG = True\n# SECRET_KEY = '0'\n\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n# sys.path.insert(0, os.path.abspath('.'))\n\n# -- General configuration ------------------------------------------------\n\nsys.path.insert(0, os.path.abspath(os.path.join(\"..\", \"..\")))\nos.environ[\"DJANGO_SETTINGS_MODULE\"] = \"oeplatform.settings\"\n\ndjango.setup()\n\n# If your documentation needs a minimal Sphinx version, state it here.\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.ifconfig\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.napoleon\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n# source_suffix = ['.rst', '.md']\nsource_suffix = \".rst\"\n\n# The encoding of source files.\n# source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"OpenEnergyPlatform\"\ncopyright = \"2023, SIROP\"\nauthor = \"Open Energy Family\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = \"0.0.4\"\n# The full version, including alpha/beta/rc tags.\nrelease = \"0.0.4\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n# today = ''\n# Else, today_fmt is used as the format for a strftime call.\n# today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = []\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n# default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n# add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n# show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# A list of ignored prefixes for module index sorting.\n# modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n# keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"alabaster\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n# html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n# html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n# html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n# html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n# html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n# html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n# html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n# html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n# html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n# html_additional_pages = {}\n\n# If false, no module index is generated.\n# html_domain_indices = True\n\n# If false, no index is generated.\n# html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n# html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n# html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n# html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n# html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n# html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'\n# html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# Now only 'ja' uses this config value\n# html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n# html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"OpenEnergyPlatformdoc\"\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # # The paper size ('letterpaper' or 'a4paper').\n # 'papersize': 'letterpaper',\n # # The font size ('10pt', '11pt' or '12pt').\n # 'pointsize': '10pt',\n # # Additional stuff for the LaTeX preamble.\n # 'preamble': '',\n # # Latex figure (float) alignment\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (\n master_doc,\n \"OpenEnergyPlatform.tex\",\n \"OpenEnergyPlatform Documentation\",\n \"open\\\\_eGo\",\n \"manual\",\n )\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n# latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n# latex_use_parts = False\n\n# If true, show page references after internal links.\n# latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n# latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n# latex_appendices = []\n\n# If false, no module index is generated.\n# latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, \"openenergyplatform\", \"OpenEnergyPlatform Documentation\", [author], 1)\n]\n\n# If true, show URL addresses after external links.\n# man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"OpenEnergyPlatform\",\n \"OpenEnergyPlatform Documentation\",\n author,\n \"OpenEnergyPlatform\",\n \"One line description of project.\",\n \"Miscellaneous\",\n )\n]\n\n# Documents to append as an appendix to all manuals.\n# texinfo_appendices = []\n\n# If false, no module index is generated.\n# texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n# texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n# texinfo_no_detailmenu = False\n\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\"https://docs.python.org/\": None}\n\n\nclass Mock(MagicMock):\n @classmethod\n def __getattr__(cls, name):\n if name == \"_mock_methods\":\n return name._mock_methods\n else:\n return Mock()\n\n\nMOCK_MODULES = [\"shapely\"]\nsys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)\n", "path": "docs/source/conf.py"}]}
| 3,979 | 216 |
gh_patches_debug_21336
|
rasdani/github-patches
|
git_diff
|
aws-cloudformation__cfn-lint-2222
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add correct size checking to IAM ManagedPolicy resource types
*cfn-lint version: (cfn-lint 0.58.1)*
*Description of issue.*
The PolicyDocument specified in an IAM Managed Policy has a maximum length of 6,144 characters (after converted to JSON and minified).
https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html
When creating through CloudFormation an AWS::IAM::ManagedPolicy and using conditionals to leverage a 3 level environment creation differentiation, is throwing an error **E3502 PolicyDocument JSON text cannot be longer than 6144 characters**.
That happens because the policy defined in CloudFormation with the 3 level conditionals exceeds the 6144 characters limit, but when it is created it only uses 1 of the paths in the conditionals so the size is not really over 6144 characters.
This is the sample code in CloudFormation:
```
ResourcePolicy:
Type: AWS::IAM::ManagedPolicy
Properties:
ManagedPolicyName: !Sub
- ResourcePolicy
PolicyDocument:
Version: '2012-10-17'
Statement:
- Sid: LambdaExecutionLogs
Effect: Allow
Action:
- logs:CreateLogGroup
- logs:CreateLogStream
- logs:PutLogEvents
Resource:
!Split
- ","
- !Join
- ","
- - !If
- IsDev
- !Join
- ","
-
- !Sub
arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdadev1-${EnvNameCamelCase}
- !Sub
arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdadev1-:*
- !Sub
arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdadev2
- !Sub
arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdadev2:*
- !Ref AWS::NoValue
- !If
- IsUat
- !Join
- ","
-
- !Sub
arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdauat1
- !Sub
arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdauat1:*
- !Sub
arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdauat2
- !Sub
arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdauat2:*
- !Sub
arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdauat3
- !Sub
arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdauat3:*
- !Ref AWS::NoValue
- !If
- IsProd
- !Join
- ","
-
- !Sub
arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdaprod
- !Sub
arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdaprod:*
- !Ref AWS::NoValue
- Sid: ........
. . .
```
So the missing feature is to be able to check the size of the policy which is going to be finally created, not the source CloudFormation code, so in that regard it needs to evaluate the parameter and resolve the conditionals to end up with the final resource code to be created.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/resources/properties/JsonSize.py`
Content:
```
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 import datetime
6 import json
7 import re
8 import six
9 import cfnlint.helpers
10 from cfnlint.rules import CloudFormationLintRule
11 from cfnlint.rules import RuleMatch
12 from cfnlint.helpers import RESOURCE_SPECS
13
14
15 class JsonSize(CloudFormationLintRule):
16 """Check if JSON Object Size is within the specified length"""
17 id = 'E3502'
18 shortdesc = 'Check if a JSON Object is within size limits'
19 description = 'Validate properties that are JSON values so that their length is within the limits'
20 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cloudformation-limits.html'
21 tags = ['resources', 'limits', 'json']
22
23 def initialize(self, cfn):
24 """Initialize the rule"""
25 for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes'):
26 self.resource_property_types.append(resource_type_spec)
27 for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes'):
28 self.resource_sub_property_types.append(property_type_spec)
29
30 def _serialize_date(self, obj):
31 if isinstance(obj, datetime.date):
32 return obj.isoformat()
33 raise TypeError('Object of type {} is not JSON serializable'.format(obj.__class__.__name__))
34
35 def check_value(self, value, path, prop, cfn, specs):
36 """Check Role.AssumeRolePolicyDocument is within limits"""
37 matches = []
38
39 def remove_functions(obj):
40 """ Replaces intrinsic functions with string """
41 if isinstance(obj, dict):
42 new_obj = {}
43 if len(obj) == 1:
44 for k, v in obj.items():
45 if k in cfnlint.helpers.FUNCTIONS:
46 if k == 'Fn::Sub':
47 if isinstance(v, six.string_types):
48 return re.sub(r'\${.*}', '', v)
49 if isinstance(v, list):
50 return re.sub(r'\${.*}', '', v[0])
51 else:
52 new_obj[k] = remove_functions(v)
53 return new_obj
54 else:
55 for k, v in obj.items():
56 new_obj[k] = remove_functions(v)
57 return new_obj
58 elif isinstance(obj, list):
59 new_list = []
60 for v in obj:
61 new_list.append(remove_functions(v))
62 return new_list
63
64 return obj
65
66 scenarios = cfn.get_object_without_nested_conditions(value, path)
67 json_max_size = specs.get('JsonMax')
68 for scenario in scenarios:
69 j = remove_functions(scenario['Object'][prop])
70 if isinstance(j, six.string_types):
71 try:
72 j = json.loads(j)
73 except: #pylint: disable=bare-except
74 continue
75 if len(json.dumps(j, separators=(',', ':'), default=self._serialize_date)) > json_max_size:
76 if scenario['Scenario']:
77 message = '{0} JSON text cannot be longer than {1} characters when {2}'
78 scenario_text = ' and '.join(['when condition "%s" is %s' % (
79 k, v) for (k, v) in scenario['Scenario'].items()])
80 matches.append(
81 RuleMatch(path + [prop], message.format(prop, json_max_size, scenario_text)))
82 else:
83 message = '{0} JSON text cannot be longer than {1} characters'
84 matches.append(
85 RuleMatch(
86 path + [prop],
87 message.format(prop, json_max_size),
88 )
89 )
90
91 return matches
92
93 def check(self, cfn, properties, specs, path):
94 """Check itself"""
95 matches = []
96 for p_value, p_path in properties.items_safe(path[:]):
97 for prop in p_value:
98 if prop in specs:
99 value = specs.get(prop).get('Value', {})
100 if value:
101 value_type = value.get('ValueType', '')
102 primitive_type = specs.get(prop).get('PrimitiveType')
103 if primitive_type == 'Json':
104 matches.extend(
105 self.check_value(
106 p_value, p_path, prop, cfn,
107 RESOURCE_SPECS.get(cfn.regions[0]).get(
108 'ValueTypes').get(value_type, {})
109 )
110 )
111 return matches
112
113 def match_resource_sub_properties(self, properties, property_type, path, cfn):
114 """Match for sub properties"""
115 matches = list()
116
117 specs = RESOURCE_SPECS.get(cfn.regions[0]).get(
118 'PropertyTypes').get(property_type, {}).get('Properties', {})
119 matches.extend(self.check(cfn, properties, specs, path))
120
121 return matches
122
123 def match_resource_properties(self, properties, resource_type, path, cfn):
124 """Check CloudFormation Properties"""
125 matches = list()
126
127 specs = RESOURCE_SPECS.get(cfn.regions[0]).get(
128 'ResourceTypes').get(resource_type, {}).get('Properties', {})
129 matches.extend(self.check(cfn, properties, specs, path))
130
131 return matches
132
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cfnlint/rules/resources/properties/JsonSize.py b/src/cfnlint/rules/resources/properties/JsonSize.py
--- a/src/cfnlint/rules/resources/properties/JsonSize.py
+++ b/src/cfnlint/rules/resources/properties/JsonSize.py
@@ -36,6 +36,7 @@
"""Check Role.AssumeRolePolicyDocument is within limits"""
matches = []
+ #pylint: disable=too-many-return-statements
def remove_functions(obj):
""" Replaces intrinsic functions with string """
if isinstance(obj, dict):
@@ -48,6 +49,8 @@
return re.sub(r'\${.*}', '', v)
if isinstance(v, list):
return re.sub(r'\${.*}', '', v[0])
+ else:
+ return ''
else:
new_obj[k] = remove_functions(v)
return new_obj
|
{"golden_diff": "diff --git a/src/cfnlint/rules/resources/properties/JsonSize.py b/src/cfnlint/rules/resources/properties/JsonSize.py\n--- a/src/cfnlint/rules/resources/properties/JsonSize.py\n+++ b/src/cfnlint/rules/resources/properties/JsonSize.py\n@@ -36,6 +36,7 @@\n \"\"\"Check Role.AssumeRolePolicyDocument is within limits\"\"\"\n matches = []\n \n+ #pylint: disable=too-many-return-statements\n def remove_functions(obj):\n \"\"\" Replaces intrinsic functions with string \"\"\"\n if isinstance(obj, dict):\n@@ -48,6 +49,8 @@\n return re.sub(r'\\${.*}', '', v)\n if isinstance(v, list):\n return re.sub(r'\\${.*}', '', v[0])\n+ else:\n+ return ''\n else:\n new_obj[k] = remove_functions(v)\n return new_obj\n", "issue": "Add correct size checking to IAM ManagedPolicy resource types\n*cfn-lint version: (cfn-lint 0.58.1)*\r\n\r\n*Description of issue.*\r\n\r\nThe PolicyDocument specified in an IAM Managed Policy has a maximum length of 6,144 characters (after converted to JSON and minified).\r\nhttps://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html\r\n\r\nWhen creating through CloudFormation an AWS::IAM::ManagedPolicy and using conditionals to leverage a 3 level environment creation differentiation, is throwing an error **E3502 PolicyDocument JSON text cannot be longer than 6144 characters**.\r\n\r\nThat happens because the policy defined in CloudFormation with the 3 level conditionals exceeds the 6144 characters limit, but when it is created it only uses 1 of the paths in the conditionals so the size is not really over 6144 characters.\r\n\r\nThis is the sample code in CloudFormation:\r\n```\r\n ResourcePolicy:\r\n Type: AWS::IAM::ManagedPolicy\r\n Properties:\r\n ManagedPolicyName: !Sub\r\n - ResourcePolicy\r\n PolicyDocument:\r\n Version: '2012-10-17'\r\n Statement:\r\n - Sid: LambdaExecutionLogs\r\n Effect: Allow\r\n Action:\r\n - logs:CreateLogGroup\r\n - logs:CreateLogStream\r\n - logs:PutLogEvents\r\n Resource:\r\n !Split\r\n - \",\"\r\n - !Join\r\n - \",\"\r\n - - !If\r\n - IsDev\r\n - !Join\r\n - \",\"\r\n - \r\n - !Sub\r\n arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdadev1-${EnvNameCamelCase}\r\n - !Sub\r\n arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdadev1-:*\r\n - !Sub\r\n arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdadev2\r\n - !Sub\r\n arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdadev2:*\r\n - !Ref AWS::NoValue\r\n - !If\r\n - IsUat\r\n - !Join\r\n - \",\"\r\n - \r\n - !Sub\r\n arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdauat1\r\n - !Sub\r\n arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdauat1:*\r\n - !Sub\r\n arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdauat2\r\n - !Sub\r\n arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdauat2:*\r\n - !Sub\r\n arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdauat3\r\n - !Sub\r\n arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdauat3:*\r\n - !Ref AWS::NoValue\r\n - !If\r\n - IsProd\r\n - !Join\r\n - \",\"\r\n - \r\n - !Sub\r\n arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdaprod\r\n - !Sub\r\n arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdaprod:*\r\n - !Ref AWS::NoValue\r\n - Sid: ........\r\n . . .\r\n```\r\n\r\nSo the missing feature is to be able to check the size of the policy which is going to be finally created, not the source CloudFormation code, so in that regard it needs to evaluate the parameter and resolve the conditionals to end up with the final resource code to be created.\r\n\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport datetime\nimport json\nimport re\nimport six\nimport cfnlint.helpers\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\nfrom cfnlint.helpers import RESOURCE_SPECS\n\n\nclass JsonSize(CloudFormationLintRule):\n \"\"\"Check if JSON Object Size is within the specified length\"\"\"\n id = 'E3502'\n shortdesc = 'Check if a JSON Object is within size limits'\n description = 'Validate properties that are JSON values so that their length is within the limits'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cloudformation-limits.html'\n tags = ['resources', 'limits', 'json']\n\n def initialize(self, cfn):\n \"\"\"Initialize the rule\"\"\"\n for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes'):\n self.resource_property_types.append(resource_type_spec)\n for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes'):\n self.resource_sub_property_types.append(property_type_spec)\n\n def _serialize_date(self, obj):\n if isinstance(obj, datetime.date):\n return obj.isoformat()\n raise TypeError('Object of type {} is not JSON serializable'.format(obj.__class__.__name__))\n\n def check_value(self, value, path, prop, cfn, specs):\n \"\"\"Check Role.AssumeRolePolicyDocument is within limits\"\"\"\n matches = []\n\n def remove_functions(obj):\n \"\"\" Replaces intrinsic functions with string \"\"\"\n if isinstance(obj, dict):\n new_obj = {}\n if len(obj) == 1:\n for k, v in obj.items():\n if k in cfnlint.helpers.FUNCTIONS:\n if k == 'Fn::Sub':\n if isinstance(v, six.string_types):\n return re.sub(r'\\${.*}', '', v)\n if isinstance(v, list):\n return re.sub(r'\\${.*}', '', v[0])\n else:\n new_obj[k] = remove_functions(v)\n return new_obj\n else:\n for k, v in obj.items():\n new_obj[k] = remove_functions(v)\n return new_obj\n elif isinstance(obj, list):\n new_list = []\n for v in obj:\n new_list.append(remove_functions(v))\n return new_list\n\n return obj\n\n scenarios = cfn.get_object_without_nested_conditions(value, path)\n json_max_size = specs.get('JsonMax')\n for scenario in scenarios:\n j = remove_functions(scenario['Object'][prop])\n if isinstance(j, six.string_types):\n try:\n j = json.loads(j)\n except: #pylint: disable=bare-except\n continue\n if len(json.dumps(j, separators=(',', ':'), default=self._serialize_date)) > json_max_size:\n if scenario['Scenario']:\n message = '{0} JSON text cannot be longer than {1} characters when {2}'\n scenario_text = ' and '.join(['when condition \"%s\" is %s' % (\n k, v) for (k, v) in scenario['Scenario'].items()])\n matches.append(\n RuleMatch(path + [prop], message.format(prop, json_max_size, scenario_text)))\n else:\n message = '{0} JSON text cannot be longer than {1} characters'\n matches.append(\n RuleMatch(\n path + [prop],\n message.format(prop, json_max_size),\n )\n )\n\n return matches\n\n def check(self, cfn, properties, specs, path):\n \"\"\"Check itself\"\"\"\n matches = []\n for p_value, p_path in properties.items_safe(path[:]):\n for prop in p_value:\n if prop in specs:\n value = specs.get(prop).get('Value', {})\n if value:\n value_type = value.get('ValueType', '')\n primitive_type = specs.get(prop).get('PrimitiveType')\n if primitive_type == 'Json':\n matches.extend(\n self.check_value(\n p_value, p_path, prop, cfn,\n RESOURCE_SPECS.get(cfn.regions[0]).get(\n 'ValueTypes').get(value_type, {})\n )\n )\n return matches\n\n def match_resource_sub_properties(self, properties, property_type, path, cfn):\n \"\"\"Match for sub properties\"\"\"\n matches = list()\n\n specs = RESOURCE_SPECS.get(cfn.regions[0]).get(\n 'PropertyTypes').get(property_type, {}).get('Properties', {})\n matches.extend(self.check(cfn, properties, specs, path))\n\n return matches\n\n def match_resource_properties(self, properties, resource_type, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = list()\n\n specs = RESOURCE_SPECS.get(cfn.regions[0]).get(\n 'ResourceTypes').get(resource_type, {}).get('Properties', {})\n matches.extend(self.check(cfn, properties, specs, path))\n\n return matches\n", "path": "src/cfnlint/rules/resources/properties/JsonSize.py"}], "after_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport datetime\nimport json\nimport re\nimport six\nimport cfnlint.helpers\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\nfrom cfnlint.helpers import RESOURCE_SPECS\n\n\nclass JsonSize(CloudFormationLintRule):\n \"\"\"Check if JSON Object Size is within the specified length\"\"\"\n id = 'E3502'\n shortdesc = 'Check if a JSON Object is within size limits'\n description = 'Validate properties that are JSON values so that their length is within the limits'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cloudformation-limits.html'\n tags = ['resources', 'limits', 'json']\n\n def initialize(self, cfn):\n \"\"\"Initialize the rule\"\"\"\n for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes'):\n self.resource_property_types.append(resource_type_spec)\n for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes'):\n self.resource_sub_property_types.append(property_type_spec)\n\n def _serialize_date(self, obj):\n if isinstance(obj, datetime.date):\n return obj.isoformat()\n raise TypeError('Object of type {} is not JSON serializable'.format(obj.__class__.__name__))\n\n def check_value(self, value, path, prop, cfn, specs):\n \"\"\"Check Role.AssumeRolePolicyDocument is within limits\"\"\"\n matches = []\n\n #pylint: disable=too-many-return-statements\n def remove_functions(obj):\n \"\"\" Replaces intrinsic functions with string \"\"\"\n if isinstance(obj, dict):\n new_obj = {}\n if len(obj) == 1:\n for k, v in obj.items():\n if k in cfnlint.helpers.FUNCTIONS:\n if k == 'Fn::Sub':\n if isinstance(v, six.string_types):\n return re.sub(r'\\${.*}', '', v)\n if isinstance(v, list):\n return re.sub(r'\\${.*}', '', v[0])\n else:\n return ''\n else:\n new_obj[k] = remove_functions(v)\n return new_obj\n else:\n for k, v in obj.items():\n new_obj[k] = remove_functions(v)\n return new_obj\n elif isinstance(obj, list):\n new_list = []\n for v in obj:\n new_list.append(remove_functions(v))\n return new_list\n\n return obj\n\n scenarios = cfn.get_object_without_nested_conditions(value, path)\n json_max_size = specs.get('JsonMax')\n for scenario in scenarios:\n j = remove_functions(scenario['Object'][prop])\n if isinstance(j, six.string_types):\n try:\n j = json.loads(j)\n except: #pylint: disable=bare-except\n continue\n if len(json.dumps(j, separators=(',', ':'), default=self._serialize_date)) > json_max_size:\n if scenario['Scenario']:\n message = '{0} JSON text cannot be longer than {1} characters when {2}'\n scenario_text = ' and '.join(['when condition \"%s\" is %s' % (\n k, v) for (k, v) in scenario['Scenario'].items()])\n matches.append(\n RuleMatch(path + [prop], message.format(prop, json_max_size, scenario_text)))\n else:\n message = '{0} JSON text cannot be longer than {1} characters'\n matches.append(\n RuleMatch(\n path + [prop],\n message.format(prop, json_max_size),\n )\n )\n\n return matches\n\n def check(self, cfn, properties, specs, path):\n \"\"\"Check itself\"\"\"\n matches = []\n for p_value, p_path in properties.items_safe(path[:]):\n for prop in p_value:\n if prop in specs:\n value = specs.get(prop).get('Value', {})\n if value:\n value_type = value.get('ValueType', '')\n primitive_type = specs.get(prop).get('PrimitiveType')\n if primitive_type == 'Json':\n matches.extend(\n self.check_value(\n p_value, p_path, prop, cfn,\n RESOURCE_SPECS.get(cfn.regions[0]).get(\n 'ValueTypes').get(value_type, {})\n )\n )\n return matches\n\n def match_resource_sub_properties(self, properties, property_type, path, cfn):\n \"\"\"Match for sub properties\"\"\"\n matches = list()\n\n specs = RESOURCE_SPECS.get(cfn.regions[0]).get(\n 'PropertyTypes').get(property_type, {}).get('Properties', {})\n matches.extend(self.check(cfn, properties, specs, path))\n\n return matches\n\n def match_resource_properties(self, properties, resource_type, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = list()\n\n specs = RESOURCE_SPECS.get(cfn.regions[0]).get(\n 'ResourceTypes').get(resource_type, {}).get('Properties', {})\n matches.extend(self.check(cfn, properties, specs, path))\n\n return matches\n", "path": "src/cfnlint/rules/resources/properties/JsonSize.py"}]}
| 2,536 | 197 |
gh_patches_debug_15289
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-5482
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
scrapy parse -h throws error
### Description
running `scrapy parse -h` from inside of a project throws an error.
### Steps to Reproduce
1. `scrapy startproject example`
2. `cd example`
3. `scrapy parse -h`
**Expected behavior:** It should show the help message output for the `parse` command
**Actual behavior:** Throws error and displays traceback information
**Reproduces how often:** Every time I tried it.
### Versions
v2.6.1
### Additional context
I identified the issue and will submit a PR shortly.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/commands/parse.py`
Content:
```
1 import json
2 import logging
3 from typing import Dict
4
5 from itemadapter import is_item, ItemAdapter
6 from w3lib.url import is_url
7
8 from scrapy.commands import BaseRunSpiderCommand
9 from scrapy.http import Request
10 from scrapy.utils import display
11 from scrapy.utils.spider import iterate_spider_output, spidercls_for_request
12 from scrapy.exceptions import UsageError
13
14
15 logger = logging.getLogger(__name__)
16
17
18 class Command(BaseRunSpiderCommand):
19 requires_project = True
20
21 spider = None
22 items: Dict[int, list] = {}
23 requests: Dict[int, list] = {}
24
25 first_response = None
26
27 def syntax(self):
28 return "[options] <url>"
29
30 def short_desc(self):
31 return "Parse URL (using its spider) and print the results"
32
33 def add_options(self, parser):
34 BaseRunSpiderCommand.add_options(self, parser)
35 parser.add_argument("--spider", dest="spider", default=None,
36 help="use this spider without looking for one")
37 parser.add_argument("--pipelines", action="store_true",
38 help="process items through pipelines")
39 parser.add_argument("--nolinks", dest="nolinks", action="store_true",
40 help="don't show links to follow (extracted requests)")
41 parser.add_argument("--noitems", dest="noitems", action="store_true",
42 help="don't show scraped items")
43 parser.add_argument("--nocolour", dest="nocolour", action="store_true",
44 help="avoid using pygments to colorize the output")
45 parser.add_argument("-r", "--rules", dest="rules", action="store_true",
46 help="use CrawlSpider rules to discover the callback")
47 parser.add_argument("-c", "--callback", dest="callback",
48 help="use this callback for parsing, instead looking for a callback")
49 parser.add_argument("-m", "--meta", dest="meta",
50 help="inject extra meta into the Request, it must be a valid raw json string")
51 parser.add_argument("--cbkwargs", dest="cbkwargs",
52 help="inject extra callback kwargs into the Request, it must be a valid raw json string")
53 parser.add_argument("-d", "--depth", dest="depth", type=int, default=1,
54 help="maximum depth for parsing requests [default: %default]")
55 parser.add_argument("-v", "--verbose", dest="verbose", action="store_true",
56 help="print each depth level one by one")
57
58 @property
59 def max_level(self):
60 max_items, max_requests = 0, 0
61 if self.items:
62 max_items = max(self.items)
63 if self.requests:
64 max_requests = max(self.requests)
65 return max(max_items, max_requests)
66
67 def add_items(self, lvl, new_items):
68 old_items = self.items.get(lvl, [])
69 self.items[lvl] = old_items + new_items
70
71 def add_requests(self, lvl, new_reqs):
72 old_reqs = self.requests.get(lvl, [])
73 self.requests[lvl] = old_reqs + new_reqs
74
75 def print_items(self, lvl=None, colour=True):
76 if lvl is None:
77 items = [item for lst in self.items.values() for item in lst]
78 else:
79 items = self.items.get(lvl, [])
80
81 print("# Scraped Items ", "-" * 60)
82 display.pprint([ItemAdapter(x).asdict() for x in items], colorize=colour)
83
84 def print_requests(self, lvl=None, colour=True):
85 if lvl is None:
86 if self.requests:
87 requests = self.requests[max(self.requests)]
88 else:
89 requests = []
90 else:
91 requests = self.requests.get(lvl, [])
92
93 print("# Requests ", "-" * 65)
94 display.pprint(requests, colorize=colour)
95
96 def print_results(self, opts):
97 colour = not opts.nocolour
98
99 if opts.verbose:
100 for level in range(1, self.max_level + 1):
101 print(f'\n>>> DEPTH LEVEL: {level} <<<')
102 if not opts.noitems:
103 self.print_items(level, colour)
104 if not opts.nolinks:
105 self.print_requests(level, colour)
106 else:
107 print(f'\n>>> STATUS DEPTH LEVEL {self.max_level} <<<')
108 if not opts.noitems:
109 self.print_items(colour=colour)
110 if not opts.nolinks:
111 self.print_requests(colour=colour)
112
113 def run_callback(self, response, callback, cb_kwargs=None):
114 cb_kwargs = cb_kwargs or {}
115 items, requests = [], []
116
117 for x in iterate_spider_output(callback(response, **cb_kwargs)):
118 if is_item(x):
119 items.append(x)
120 elif isinstance(x, Request):
121 requests.append(x)
122 return items, requests
123
124 def get_callback_from_rules(self, spider, response):
125 if getattr(spider, 'rules', None):
126 for rule in spider.rules:
127 if rule.link_extractor.matches(response.url):
128 return rule.callback or "parse"
129 else:
130 logger.error('No CrawlSpider rules found in spider %(spider)r, '
131 'please specify a callback to use for parsing',
132 {'spider': spider.name})
133
134 def set_spidercls(self, url, opts):
135 spider_loader = self.crawler_process.spider_loader
136 if opts.spider:
137 try:
138 self.spidercls = spider_loader.load(opts.spider)
139 except KeyError:
140 logger.error('Unable to find spider: %(spider)s',
141 {'spider': opts.spider})
142 else:
143 self.spidercls = spidercls_for_request(spider_loader, Request(url))
144 if not self.spidercls:
145 logger.error('Unable to find spider for: %(url)s', {'url': url})
146
147 def _start_requests(spider):
148 yield self.prepare_request(spider, Request(url), opts)
149 self.spidercls.start_requests = _start_requests
150
151 def start_parsing(self, url, opts):
152 self.crawler_process.crawl(self.spidercls, **opts.spargs)
153 self.pcrawler = list(self.crawler_process.crawlers)[0]
154 self.crawler_process.start()
155
156 if not self.first_response:
157 logger.error('No response downloaded for: %(url)s',
158 {'url': url})
159
160 def prepare_request(self, spider, request, opts):
161 def callback(response, **cb_kwargs):
162 # memorize first request
163 if not self.first_response:
164 self.first_response = response
165
166 # determine real callback
167 cb = response.meta['_callback']
168 if not cb:
169 if opts.callback:
170 cb = opts.callback
171 elif opts.rules and self.first_response == response:
172 cb = self.get_callback_from_rules(spider, response)
173
174 if not cb:
175 logger.error('Cannot find a rule that matches %(url)r in spider: %(spider)s',
176 {'url': response.url, 'spider': spider.name})
177 return
178 else:
179 cb = 'parse'
180
181 if not callable(cb):
182 cb_method = getattr(spider, cb, None)
183 if callable(cb_method):
184 cb = cb_method
185 else:
186 logger.error('Cannot find callback %(callback)r in spider: %(spider)s',
187 {'callback': cb, 'spider': spider.name})
188 return
189
190 # parse items and requests
191 depth = response.meta['_depth']
192
193 items, requests = self.run_callback(response, cb, cb_kwargs)
194 if opts.pipelines:
195 itemproc = self.pcrawler.engine.scraper.itemproc
196 for item in items:
197 itemproc.process_item(item, spider)
198 self.add_items(depth, items)
199 self.add_requests(depth, requests)
200
201 scraped_data = items if opts.output else []
202 if depth < opts.depth:
203 for req in requests:
204 req.meta['_depth'] = depth + 1
205 req.meta['_callback'] = req.callback
206 req.callback = callback
207 scraped_data += requests
208
209 return scraped_data
210
211 # update request meta if any extra meta was passed through the --meta/-m opts.
212 if opts.meta:
213 request.meta.update(opts.meta)
214
215 # update cb_kwargs if any extra values were was passed through the --cbkwargs option.
216 if opts.cbkwargs:
217 request.cb_kwargs.update(opts.cbkwargs)
218
219 request.meta['_depth'] = 1
220 request.meta['_callback'] = request.callback
221 request.callback = callback
222 return request
223
224 def process_options(self, args, opts):
225 BaseRunSpiderCommand.process_options(self, args, opts)
226
227 self.process_request_meta(opts)
228 self.process_request_cb_kwargs(opts)
229
230 def process_request_meta(self, opts):
231 if opts.meta:
232 try:
233 opts.meta = json.loads(opts.meta)
234 except ValueError:
235 raise UsageError("Invalid -m/--meta value, pass a valid json string to -m or --meta. "
236 "Example: --meta='{\"foo\" : \"bar\"}'", print_help=False)
237
238 def process_request_cb_kwargs(self, opts):
239 if opts.cbkwargs:
240 try:
241 opts.cbkwargs = json.loads(opts.cbkwargs)
242 except ValueError:
243 raise UsageError("Invalid --cbkwargs value, pass a valid json string to --cbkwargs. "
244 "Example: --cbkwargs='{\"foo\" : \"bar\"}'", print_help=False)
245
246 def run(self, args, opts):
247 # parse arguments
248 if not len(args) == 1 or not is_url(args[0]):
249 raise UsageError()
250 else:
251 url = args[0]
252
253 # prepare spidercls
254 self.set_spidercls(url, opts)
255
256 if self.spidercls and opts.depth > 0:
257 self.start_parsing(url, opts)
258 self.print_results(opts)
259
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scrapy/commands/parse.py b/scrapy/commands/parse.py
--- a/scrapy/commands/parse.py
+++ b/scrapy/commands/parse.py
@@ -51,7 +51,7 @@
parser.add_argument("--cbkwargs", dest="cbkwargs",
help="inject extra callback kwargs into the Request, it must be a valid raw json string")
parser.add_argument("-d", "--depth", dest="depth", type=int, default=1,
- help="maximum depth for parsing requests [default: %default]")
+ help="maximum depth for parsing requests [default: %(default)s]")
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true",
help="print each depth level one by one")
|
{"golden_diff": "diff --git a/scrapy/commands/parse.py b/scrapy/commands/parse.py\n--- a/scrapy/commands/parse.py\n+++ b/scrapy/commands/parse.py\n@@ -51,7 +51,7 @@\n parser.add_argument(\"--cbkwargs\", dest=\"cbkwargs\",\n help=\"inject extra callback kwargs into the Request, it must be a valid raw json string\")\n parser.add_argument(\"-d\", \"--depth\", dest=\"depth\", type=int, default=1,\n- help=\"maximum depth for parsing requests [default: %default]\")\n+ help=\"maximum depth for parsing requests [default: %(default)s]\")\n parser.add_argument(\"-v\", \"--verbose\", dest=\"verbose\", action=\"store_true\",\n help=\"print each depth level one by one\")\n", "issue": "scrapy parse -h throws error\n### Description\r\nrunning `scrapy parse -h` from inside of a project throws an error.\r\n\r\n### Steps to Reproduce\r\n1. `scrapy startproject example`\r\n2. `cd example`\r\n3. `scrapy parse -h`\r\n\r\n**Expected behavior:** It should show the help message output for the `parse` command\r\n**Actual behavior:** Throws error and displays traceback information\r\n**Reproduces how often:** Every time I tried it.\r\n\r\n### Versions\r\nv2.6.1\r\n\r\n### Additional context\r\n\r\nI identified the issue and will submit a PR shortly. \n", "before_files": [{"content": "import json\nimport logging\nfrom typing import Dict\n\nfrom itemadapter import is_item, ItemAdapter\nfrom w3lib.url import is_url\n\nfrom scrapy.commands import BaseRunSpiderCommand\nfrom scrapy.http import Request\nfrom scrapy.utils import display\nfrom scrapy.utils.spider import iterate_spider_output, spidercls_for_request\nfrom scrapy.exceptions import UsageError\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Command(BaseRunSpiderCommand):\n requires_project = True\n\n spider = None\n items: Dict[int, list] = {}\n requests: Dict[int, list] = {}\n\n first_response = None\n\n def syntax(self):\n return \"[options] <url>\"\n\n def short_desc(self):\n return \"Parse URL (using its spider) and print the results\"\n\n def add_options(self, parser):\n BaseRunSpiderCommand.add_options(self, parser)\n parser.add_argument(\"--spider\", dest=\"spider\", default=None,\n help=\"use this spider without looking for one\")\n parser.add_argument(\"--pipelines\", action=\"store_true\",\n help=\"process items through pipelines\")\n parser.add_argument(\"--nolinks\", dest=\"nolinks\", action=\"store_true\",\n help=\"don't show links to follow (extracted requests)\")\n parser.add_argument(\"--noitems\", dest=\"noitems\", action=\"store_true\",\n help=\"don't show scraped items\")\n parser.add_argument(\"--nocolour\", dest=\"nocolour\", action=\"store_true\",\n help=\"avoid using pygments to colorize the output\")\n parser.add_argument(\"-r\", \"--rules\", dest=\"rules\", action=\"store_true\",\n help=\"use CrawlSpider rules to discover the callback\")\n parser.add_argument(\"-c\", \"--callback\", dest=\"callback\",\n help=\"use this callback for parsing, instead looking for a callback\")\n parser.add_argument(\"-m\", \"--meta\", dest=\"meta\",\n help=\"inject extra meta into the Request, it must be a valid raw json string\")\n parser.add_argument(\"--cbkwargs\", dest=\"cbkwargs\",\n help=\"inject extra callback kwargs into the Request, it must be a valid raw json string\")\n parser.add_argument(\"-d\", \"--depth\", dest=\"depth\", type=int, default=1,\n help=\"maximum depth for parsing requests [default: %default]\")\n parser.add_argument(\"-v\", \"--verbose\", dest=\"verbose\", action=\"store_true\",\n help=\"print each depth level one by one\")\n\n @property\n def max_level(self):\n max_items, max_requests = 0, 0\n if self.items:\n max_items = max(self.items)\n if self.requests:\n max_requests = max(self.requests)\n return max(max_items, max_requests)\n\n def add_items(self, lvl, new_items):\n old_items = self.items.get(lvl, [])\n self.items[lvl] = old_items + new_items\n\n def add_requests(self, lvl, new_reqs):\n old_reqs = self.requests.get(lvl, [])\n self.requests[lvl] = old_reqs + new_reqs\n\n def print_items(self, lvl=None, colour=True):\n if lvl is None:\n items = [item for lst in self.items.values() for item in lst]\n else:\n items = self.items.get(lvl, [])\n\n print(\"# Scraped Items \", \"-\" * 60)\n display.pprint([ItemAdapter(x).asdict() for x in items], colorize=colour)\n\n def print_requests(self, lvl=None, colour=True):\n if lvl is None:\n if self.requests:\n requests = self.requests[max(self.requests)]\n else:\n requests = []\n else:\n requests = self.requests.get(lvl, [])\n\n print(\"# Requests \", \"-\" * 65)\n display.pprint(requests, colorize=colour)\n\n def print_results(self, opts):\n colour = not opts.nocolour\n\n if opts.verbose:\n for level in range(1, self.max_level + 1):\n print(f'\\n>>> DEPTH LEVEL: {level} <<<')\n if not opts.noitems:\n self.print_items(level, colour)\n if not opts.nolinks:\n self.print_requests(level, colour)\n else:\n print(f'\\n>>> STATUS DEPTH LEVEL {self.max_level} <<<')\n if not opts.noitems:\n self.print_items(colour=colour)\n if not opts.nolinks:\n self.print_requests(colour=colour)\n\n def run_callback(self, response, callback, cb_kwargs=None):\n cb_kwargs = cb_kwargs or {}\n items, requests = [], []\n\n for x in iterate_spider_output(callback(response, **cb_kwargs)):\n if is_item(x):\n items.append(x)\n elif isinstance(x, Request):\n requests.append(x)\n return items, requests\n\n def get_callback_from_rules(self, spider, response):\n if getattr(spider, 'rules', None):\n for rule in spider.rules:\n if rule.link_extractor.matches(response.url):\n return rule.callback or \"parse\"\n else:\n logger.error('No CrawlSpider rules found in spider %(spider)r, '\n 'please specify a callback to use for parsing',\n {'spider': spider.name})\n\n def set_spidercls(self, url, opts):\n spider_loader = self.crawler_process.spider_loader\n if opts.spider:\n try:\n self.spidercls = spider_loader.load(opts.spider)\n except KeyError:\n logger.error('Unable to find spider: %(spider)s',\n {'spider': opts.spider})\n else:\n self.spidercls = spidercls_for_request(spider_loader, Request(url))\n if not self.spidercls:\n logger.error('Unable to find spider for: %(url)s', {'url': url})\n\n def _start_requests(spider):\n yield self.prepare_request(spider, Request(url), opts)\n self.spidercls.start_requests = _start_requests\n\n def start_parsing(self, url, opts):\n self.crawler_process.crawl(self.spidercls, **opts.spargs)\n self.pcrawler = list(self.crawler_process.crawlers)[0]\n self.crawler_process.start()\n\n if not self.first_response:\n logger.error('No response downloaded for: %(url)s',\n {'url': url})\n\n def prepare_request(self, spider, request, opts):\n def callback(response, **cb_kwargs):\n # memorize first request\n if not self.first_response:\n self.first_response = response\n\n # determine real callback\n cb = response.meta['_callback']\n if not cb:\n if opts.callback:\n cb = opts.callback\n elif opts.rules and self.first_response == response:\n cb = self.get_callback_from_rules(spider, response)\n\n if not cb:\n logger.error('Cannot find a rule that matches %(url)r in spider: %(spider)s',\n {'url': response.url, 'spider': spider.name})\n return\n else:\n cb = 'parse'\n\n if not callable(cb):\n cb_method = getattr(spider, cb, None)\n if callable(cb_method):\n cb = cb_method\n else:\n logger.error('Cannot find callback %(callback)r in spider: %(spider)s',\n {'callback': cb, 'spider': spider.name})\n return\n\n # parse items and requests\n depth = response.meta['_depth']\n\n items, requests = self.run_callback(response, cb, cb_kwargs)\n if opts.pipelines:\n itemproc = self.pcrawler.engine.scraper.itemproc\n for item in items:\n itemproc.process_item(item, spider)\n self.add_items(depth, items)\n self.add_requests(depth, requests)\n\n scraped_data = items if opts.output else []\n if depth < opts.depth:\n for req in requests:\n req.meta['_depth'] = depth + 1\n req.meta['_callback'] = req.callback\n req.callback = callback\n scraped_data += requests\n\n return scraped_data\n\n # update request meta if any extra meta was passed through the --meta/-m opts.\n if opts.meta:\n request.meta.update(opts.meta)\n\n # update cb_kwargs if any extra values were was passed through the --cbkwargs option.\n if opts.cbkwargs:\n request.cb_kwargs.update(opts.cbkwargs)\n\n request.meta['_depth'] = 1\n request.meta['_callback'] = request.callback\n request.callback = callback\n return request\n\n def process_options(self, args, opts):\n BaseRunSpiderCommand.process_options(self, args, opts)\n\n self.process_request_meta(opts)\n self.process_request_cb_kwargs(opts)\n\n def process_request_meta(self, opts):\n if opts.meta:\n try:\n opts.meta = json.loads(opts.meta)\n except ValueError:\n raise UsageError(\"Invalid -m/--meta value, pass a valid json string to -m or --meta. \"\n \"Example: --meta='{\\\"foo\\\" : \\\"bar\\\"}'\", print_help=False)\n\n def process_request_cb_kwargs(self, opts):\n if opts.cbkwargs:\n try:\n opts.cbkwargs = json.loads(opts.cbkwargs)\n except ValueError:\n raise UsageError(\"Invalid --cbkwargs value, pass a valid json string to --cbkwargs. \"\n \"Example: --cbkwargs='{\\\"foo\\\" : \\\"bar\\\"}'\", print_help=False)\n\n def run(self, args, opts):\n # parse arguments\n if not len(args) == 1 or not is_url(args[0]):\n raise UsageError()\n else:\n url = args[0]\n\n # prepare spidercls\n self.set_spidercls(url, opts)\n\n if self.spidercls and opts.depth > 0:\n self.start_parsing(url, opts)\n self.print_results(opts)\n", "path": "scrapy/commands/parse.py"}], "after_files": [{"content": "import json\nimport logging\nfrom typing import Dict\n\nfrom itemadapter import is_item, ItemAdapter\nfrom w3lib.url import is_url\n\nfrom scrapy.commands import BaseRunSpiderCommand\nfrom scrapy.http import Request\nfrom scrapy.utils import display\nfrom scrapy.utils.spider import iterate_spider_output, spidercls_for_request\nfrom scrapy.exceptions import UsageError\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Command(BaseRunSpiderCommand):\n requires_project = True\n\n spider = None\n items: Dict[int, list] = {}\n requests: Dict[int, list] = {}\n\n first_response = None\n\n def syntax(self):\n return \"[options] <url>\"\n\n def short_desc(self):\n return \"Parse URL (using its spider) and print the results\"\n\n def add_options(self, parser):\n BaseRunSpiderCommand.add_options(self, parser)\n parser.add_argument(\"--spider\", dest=\"spider\", default=None,\n help=\"use this spider without looking for one\")\n parser.add_argument(\"--pipelines\", action=\"store_true\",\n help=\"process items through pipelines\")\n parser.add_argument(\"--nolinks\", dest=\"nolinks\", action=\"store_true\",\n help=\"don't show links to follow (extracted requests)\")\n parser.add_argument(\"--noitems\", dest=\"noitems\", action=\"store_true\",\n help=\"don't show scraped items\")\n parser.add_argument(\"--nocolour\", dest=\"nocolour\", action=\"store_true\",\n help=\"avoid using pygments to colorize the output\")\n parser.add_argument(\"-r\", \"--rules\", dest=\"rules\", action=\"store_true\",\n help=\"use CrawlSpider rules to discover the callback\")\n parser.add_argument(\"-c\", \"--callback\", dest=\"callback\",\n help=\"use this callback for parsing, instead looking for a callback\")\n parser.add_argument(\"-m\", \"--meta\", dest=\"meta\",\n help=\"inject extra meta into the Request, it must be a valid raw json string\")\n parser.add_argument(\"--cbkwargs\", dest=\"cbkwargs\",\n help=\"inject extra callback kwargs into the Request, it must be a valid raw json string\")\n parser.add_argument(\"-d\", \"--depth\", dest=\"depth\", type=int, default=1,\n help=\"maximum depth for parsing requests [default: %(default)s]\")\n parser.add_argument(\"-v\", \"--verbose\", dest=\"verbose\", action=\"store_true\",\n help=\"print each depth level one by one\")\n\n @property\n def max_level(self):\n max_items, max_requests = 0, 0\n if self.items:\n max_items = max(self.items)\n if self.requests:\n max_requests = max(self.requests)\n return max(max_items, max_requests)\n\n def add_items(self, lvl, new_items):\n old_items = self.items.get(lvl, [])\n self.items[lvl] = old_items + new_items\n\n def add_requests(self, lvl, new_reqs):\n old_reqs = self.requests.get(lvl, [])\n self.requests[lvl] = old_reqs + new_reqs\n\n def print_items(self, lvl=None, colour=True):\n if lvl is None:\n items = [item for lst in self.items.values() for item in lst]\n else:\n items = self.items.get(lvl, [])\n\n print(\"# Scraped Items \", \"-\" * 60)\n display.pprint([ItemAdapter(x).asdict() for x in items], colorize=colour)\n\n def print_requests(self, lvl=None, colour=True):\n if lvl is None:\n if self.requests:\n requests = self.requests[max(self.requests)]\n else:\n requests = []\n else:\n requests = self.requests.get(lvl, [])\n\n print(\"# Requests \", \"-\" * 65)\n display.pprint(requests, colorize=colour)\n\n def print_results(self, opts):\n colour = not opts.nocolour\n\n if opts.verbose:\n for level in range(1, self.max_level + 1):\n print(f'\\n>>> DEPTH LEVEL: {level} <<<')\n if not opts.noitems:\n self.print_items(level, colour)\n if not opts.nolinks:\n self.print_requests(level, colour)\n else:\n print(f'\\n>>> STATUS DEPTH LEVEL {self.max_level} <<<')\n if not opts.noitems:\n self.print_items(colour=colour)\n if not opts.nolinks:\n self.print_requests(colour=colour)\n\n def run_callback(self, response, callback, cb_kwargs=None):\n cb_kwargs = cb_kwargs or {}\n items, requests = [], []\n\n for x in iterate_spider_output(callback(response, **cb_kwargs)):\n if is_item(x):\n items.append(x)\n elif isinstance(x, Request):\n requests.append(x)\n return items, requests\n\n def get_callback_from_rules(self, spider, response):\n if getattr(spider, 'rules', None):\n for rule in spider.rules:\n if rule.link_extractor.matches(response.url):\n return rule.callback or \"parse\"\n else:\n logger.error('No CrawlSpider rules found in spider %(spider)r, '\n 'please specify a callback to use for parsing',\n {'spider': spider.name})\n\n def set_spidercls(self, url, opts):\n spider_loader = self.crawler_process.spider_loader\n if opts.spider:\n try:\n self.spidercls = spider_loader.load(opts.spider)\n except KeyError:\n logger.error('Unable to find spider: %(spider)s',\n {'spider': opts.spider})\n else:\n self.spidercls = spidercls_for_request(spider_loader, Request(url))\n if not self.spidercls:\n logger.error('Unable to find spider for: %(url)s', {'url': url})\n\n def _start_requests(spider):\n yield self.prepare_request(spider, Request(url), opts)\n self.spidercls.start_requests = _start_requests\n\n def start_parsing(self, url, opts):\n self.crawler_process.crawl(self.spidercls, **opts.spargs)\n self.pcrawler = list(self.crawler_process.crawlers)[0]\n self.crawler_process.start()\n\n if not self.first_response:\n logger.error('No response downloaded for: %(url)s',\n {'url': url})\n\n def prepare_request(self, spider, request, opts):\n def callback(response, **cb_kwargs):\n # memorize first request\n if not self.first_response:\n self.first_response = response\n\n # determine real callback\n cb = response.meta['_callback']\n if not cb:\n if opts.callback:\n cb = opts.callback\n elif opts.rules and self.first_response == response:\n cb = self.get_callback_from_rules(spider, response)\n\n if not cb:\n logger.error('Cannot find a rule that matches %(url)r in spider: %(spider)s',\n {'url': response.url, 'spider': spider.name})\n return\n else:\n cb = 'parse'\n\n if not callable(cb):\n cb_method = getattr(spider, cb, None)\n if callable(cb_method):\n cb = cb_method\n else:\n logger.error('Cannot find callback %(callback)r in spider: %(spider)s',\n {'callback': cb, 'spider': spider.name})\n return\n\n # parse items and requests\n depth = response.meta['_depth']\n\n items, requests = self.run_callback(response, cb, cb_kwargs)\n if opts.pipelines:\n itemproc = self.pcrawler.engine.scraper.itemproc\n for item in items:\n itemproc.process_item(item, spider)\n self.add_items(depth, items)\n self.add_requests(depth, requests)\n\n scraped_data = items if opts.output else []\n if depth < opts.depth:\n for req in requests:\n req.meta['_depth'] = depth + 1\n req.meta['_callback'] = req.callback\n req.callback = callback\n scraped_data += requests\n\n return scraped_data\n\n # update request meta if any extra meta was passed through the --meta/-m opts.\n if opts.meta:\n request.meta.update(opts.meta)\n\n # update cb_kwargs if any extra values were was passed through the --cbkwargs option.\n if opts.cbkwargs:\n request.cb_kwargs.update(opts.cbkwargs)\n\n request.meta['_depth'] = 1\n request.meta['_callback'] = request.callback\n request.callback = callback\n return request\n\n def process_options(self, args, opts):\n BaseRunSpiderCommand.process_options(self, args, opts)\n\n self.process_request_meta(opts)\n self.process_request_cb_kwargs(opts)\n\n def process_request_meta(self, opts):\n if opts.meta:\n try:\n opts.meta = json.loads(opts.meta)\n except ValueError:\n raise UsageError(\"Invalid -m/--meta value, pass a valid json string to -m or --meta. \"\n \"Example: --meta='{\\\"foo\\\" : \\\"bar\\\"}'\", print_help=False)\n\n def process_request_cb_kwargs(self, opts):\n if opts.cbkwargs:\n try:\n opts.cbkwargs = json.loads(opts.cbkwargs)\n except ValueError:\n raise UsageError(\"Invalid --cbkwargs value, pass a valid json string to --cbkwargs. \"\n \"Example: --cbkwargs='{\\\"foo\\\" : \\\"bar\\\"}'\", print_help=False)\n\n def run(self, args, opts):\n # parse arguments\n if not len(args) == 1 or not is_url(args[0]):\n raise UsageError()\n else:\n url = args[0]\n\n # prepare spidercls\n self.set_spidercls(url, opts)\n\n if self.spidercls and opts.depth > 0:\n self.start_parsing(url, opts)\n self.print_results(opts)\n", "path": "scrapy/commands/parse.py"}]}
| 3,166 | 167 |
gh_patches_debug_37121
|
rasdani/github-patches
|
git_diff
|
DataBiosphere__toil-902
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Build takes too long
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright (C) 2015 UCSC Computational Genomics Lab
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import sys
16 from version import version
17 from setuptools import find_packages, setup
18
19 botoVersionRequired = 'boto==2.38.0'
20
21 kwargs = dict(
22 name='toil',
23 version=version,
24 description='Pipeline management software for clusters.',
25 author='Benedict Paten',
26 author_email='[email protected]',
27 url="https://github.com/BD2KGenomics/toil",
28 install_requires=[
29 'bd2k-python-lib==1.13.dev14'],
30 tests_require=[
31 'mock==1.0.1',
32 'pytest==2.8.3'],
33 test_suite='toil',
34 extras_require={
35 'mesos': [
36 'psutil==3.0.1'],
37 'aws': [
38 botoVersionRequired,
39 'cgcloud-lib==1.4a1.dev195' ],
40 'azure': [
41 'azure==1.0.3'],
42 'encryption': [
43 'pynacl==0.3.0'],
44 'google': [
45 'gcs_oauth2_boto_plugin==1.9',
46 botoVersionRequired],
47 'cwl': [
48 'cwltool==1.0.20160425140546']},
49 package_dir={'': 'src'},
50 packages=find_packages('src', exclude=['*.test']),
51 entry_points={
52 'console_scripts': [
53 'toil = toil.utils.toilMain:main',
54 '_toil_worker = toil.worker:main',
55 'cwltoil = toil.cwl.cwltoil:main [cwl]',
56 'cwl-runner = toil.cwl.cwltoil:main [cwl]',
57 '_toil_mesos_executor = toil.batchSystems.mesos.executor:main [mesos]']})
58
59 from setuptools.command.test import test as TestCommand
60
61
62 class PyTest(TestCommand):
63 user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
64
65 def initialize_options(self):
66 TestCommand.initialize_options(self)
67 self.pytest_args = []
68
69 def finalize_options(self):
70 TestCommand.finalize_options(self)
71 self.test_args = []
72 self.test_suite = True
73
74 def run_tests(self):
75 import pytest
76 # Sanitize command line arguments to avoid confusing Toil code attempting to parse them
77 sys.argv[1:] = []
78 errno = pytest.main(self.pytest_args)
79 sys.exit(errno)
80
81
82 kwargs['cmdclass'] = {'test': PyTest}
83
84 setup(**kwargs)
85
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -12,13 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import sys
from version import version
from setuptools import find_packages, setup
-botoVersionRequired = 'boto==2.38.0'
+botoRequirement = 'boto==2.38.0'
-kwargs = dict(
+setup(
name='toil',
version=version,
description='Pipeline management software for clusters.',
@@ -27,15 +26,11 @@
url="https://github.com/BD2KGenomics/toil",
install_requires=[
'bd2k-python-lib==1.13.dev14'],
- tests_require=[
- 'mock==1.0.1',
- 'pytest==2.8.3'],
- test_suite='toil',
extras_require={
'mesos': [
'psutil==3.0.1'],
'aws': [
- botoVersionRequired,
+ botoRequirement,
'cgcloud-lib==1.4a1.dev195' ],
'azure': [
'azure==1.0.3'],
@@ -43,7 +38,7 @@
'pynacl==0.3.0'],
'google': [
'gcs_oauth2_boto_plugin==1.9',
- botoVersionRequired],
+ botoRequirement],
'cwl': [
'cwltool==1.0.20160425140546']},
package_dir={'': 'src'},
@@ -55,30 +50,3 @@
'cwltoil = toil.cwl.cwltoil:main [cwl]',
'cwl-runner = toil.cwl.cwltoil:main [cwl]',
'_toil_mesos_executor = toil.batchSystems.mesos.executor:main [mesos]']})
-
-from setuptools.command.test import test as TestCommand
-
-
-class PyTest(TestCommand):
- user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
-
- def initialize_options(self):
- TestCommand.initialize_options(self)
- self.pytest_args = []
-
- def finalize_options(self):
- TestCommand.finalize_options(self)
- self.test_args = []
- self.test_suite = True
-
- def run_tests(self):
- import pytest
- # Sanitize command line arguments to avoid confusing Toil code attempting to parse them
- sys.argv[1:] = []
- errno = pytest.main(self.pytest_args)
- sys.exit(errno)
-
-
-kwargs['cmdclass'] = {'test': PyTest}
-
-setup(**kwargs)
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -12,13 +12,12 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-import sys\n from version import version\n from setuptools import find_packages, setup\n \n-botoVersionRequired = 'boto==2.38.0'\n+botoRequirement = 'boto==2.38.0'\n \n-kwargs = dict(\n+setup(\n name='toil',\n version=version,\n description='Pipeline management software for clusters.',\n@@ -27,15 +26,11 @@\n url=\"https://github.com/BD2KGenomics/toil\",\n install_requires=[\n 'bd2k-python-lib==1.13.dev14'],\n- tests_require=[\n- 'mock==1.0.1',\n- 'pytest==2.8.3'],\n- test_suite='toil',\n extras_require={\n 'mesos': [\n 'psutil==3.0.1'],\n 'aws': [\n- botoVersionRequired,\n+ botoRequirement,\n 'cgcloud-lib==1.4a1.dev195' ],\n 'azure': [\n 'azure==1.0.3'],\n@@ -43,7 +38,7 @@\n 'pynacl==0.3.0'],\n 'google': [\n 'gcs_oauth2_boto_plugin==1.9',\n- botoVersionRequired],\n+ botoRequirement],\n 'cwl': [\n 'cwltool==1.0.20160425140546']},\n package_dir={'': 'src'},\n@@ -55,30 +50,3 @@\n 'cwltoil = toil.cwl.cwltoil:main [cwl]',\n 'cwl-runner = toil.cwl.cwltoil:main [cwl]',\n '_toil_mesos_executor = toil.batchSystems.mesos.executor:main [mesos]']})\n-\n-from setuptools.command.test import test as TestCommand\n-\n-\n-class PyTest(TestCommand):\n- user_options = [('pytest-args=', 'a', \"Arguments to pass to py.test\")]\n-\n- def initialize_options(self):\n- TestCommand.initialize_options(self)\n- self.pytest_args = []\n-\n- def finalize_options(self):\n- TestCommand.finalize_options(self)\n- self.test_args = []\n- self.test_suite = True\n-\n- def run_tests(self):\n- import pytest\n- # Sanitize command line arguments to avoid confusing Toil code attempting to parse them\n- sys.argv[1:] = []\n- errno = pytest.main(self.pytest_args)\n- sys.exit(errno)\n-\n-\n-kwargs['cmdclass'] = {'test': PyTest}\n-\n-setup(**kwargs)\n", "issue": "Build takes too long\n\n", "before_files": [{"content": "# Copyright (C) 2015 UCSC Computational Genomics Lab\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nfrom version import version\nfrom setuptools import find_packages, setup\n\nbotoVersionRequired = 'boto==2.38.0'\n\nkwargs = dict(\n name='toil',\n version=version,\n description='Pipeline management software for clusters.',\n author='Benedict Paten',\n author_email='[email protected]',\n url=\"https://github.com/BD2KGenomics/toil\",\n install_requires=[\n 'bd2k-python-lib==1.13.dev14'],\n tests_require=[\n 'mock==1.0.1',\n 'pytest==2.8.3'],\n test_suite='toil',\n extras_require={\n 'mesos': [\n 'psutil==3.0.1'],\n 'aws': [\n botoVersionRequired,\n 'cgcloud-lib==1.4a1.dev195' ],\n 'azure': [\n 'azure==1.0.3'],\n 'encryption': [\n 'pynacl==0.3.0'],\n 'google': [\n 'gcs_oauth2_boto_plugin==1.9',\n botoVersionRequired],\n 'cwl': [\n 'cwltool==1.0.20160425140546']},\n package_dir={'': 'src'},\n packages=find_packages('src', exclude=['*.test']),\n entry_points={\n 'console_scripts': [\n 'toil = toil.utils.toilMain:main',\n '_toil_worker = toil.worker:main',\n 'cwltoil = toil.cwl.cwltoil:main [cwl]',\n 'cwl-runner = toil.cwl.cwltoil:main [cwl]',\n '_toil_mesos_executor = toil.batchSystems.mesos.executor:main [mesos]']})\n\nfrom setuptools.command.test import test as TestCommand\n\n\nclass PyTest(TestCommand):\n user_options = [('pytest-args=', 'a', \"Arguments to pass to py.test\")]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.pytest_args = []\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n import pytest\n # Sanitize command line arguments to avoid confusing Toil code attempting to parse them\n sys.argv[1:] = []\n errno = pytest.main(self.pytest_args)\n sys.exit(errno)\n\n\nkwargs['cmdclass'] = {'test': PyTest}\n\nsetup(**kwargs)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright (C) 2015 UCSC Computational Genomics Lab\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom version import version\nfrom setuptools import find_packages, setup\n\nbotoRequirement = 'boto==2.38.0'\n\nsetup(\n name='toil',\n version=version,\n description='Pipeline management software for clusters.',\n author='Benedict Paten',\n author_email='[email protected]',\n url=\"https://github.com/BD2KGenomics/toil\",\n install_requires=[\n 'bd2k-python-lib==1.13.dev14'],\n extras_require={\n 'mesos': [\n 'psutil==3.0.1'],\n 'aws': [\n botoRequirement,\n 'cgcloud-lib==1.4a1.dev195' ],\n 'azure': [\n 'azure==1.0.3'],\n 'encryption': [\n 'pynacl==0.3.0'],\n 'google': [\n 'gcs_oauth2_boto_plugin==1.9',\n botoRequirement],\n 'cwl': [\n 'cwltool==1.0.20160425140546']},\n package_dir={'': 'src'},\n packages=find_packages('src', exclude=['*.test']),\n entry_points={\n 'console_scripts': [\n 'toil = toil.utils.toilMain:main',\n '_toil_worker = toil.worker:main',\n 'cwltoil = toil.cwl.cwltoil:main [cwl]',\n 'cwl-runner = toil.cwl.cwltoil:main [cwl]',\n '_toil_mesos_executor = toil.batchSystems.mesos.executor:main [mesos]']})\n", "path": "setup.py"}]}
| 1,122 | 628 |
gh_patches_debug_20793
|
rasdani/github-patches
|
git_diff
|
google__TensorNetwork-944
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Flaky Test when seed not used
The test `test_max_truncation_error` in `tensornetwork/backends/symmetric/decompositions_test.py` [link](https://github.com/google/TensorNetwork/blob/edf3f345640c64595b4ad1002bebf4a008988b3e/tensornetwork/backends/symmetric/decompositions_test.py#L90) almost always fails when the seed [here](https://github.com/google/TensorNetwork/blob/edf3f345640c64595b4ad1002bebf4a008988b3e/tensornetwork/backends/symmetric/decompositions_test.py#L91) is removed. This happens only for `[3-float64]` and `[4-complex128]` parameters. The test failed 500/500 times for me for both these parameters.
From the error message it seems `svals[mask][::-1]` is always empty whereas `S2.data` most often has an element of size 1(except when using the seed). Is this expected? Or is this indicative of a bug?
Please let me know if you need more details. I will be happy to raise a PR if you have any suggestions on how to fix it.
Test failure message:
```
> np.testing.assert_allclose(S2.data, svals[mask][::-1])
E AssertionError:
E Not equal to tolerance rtol=1e-07, atol=0
E
E (shapes (1,), (0,) mismatch)
E x: array([1.])
E y: array([], dtype=float64)
tensornetwork/backends/symmetric/decompositions_test.py:112: AssertionError
```
Environment:
```
Anaconda python 3.6
tensorflow==2.6.0
numpy==1.19.5
scipy==1.5.4
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tensornetwork/backends/symmetric/decompositions.py`
Content:
```
1 # Copyright 2019 The TensorNetwork Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Tensor Decomposition Implementations."""
15 import numpy as np
16 import warnings
17 from typing import Optional, Any, Tuple
18 from tensornetwork.block_sparse.blocksparse_utils import (
19 _find_transposed_diagonal_sparse_blocks)
20 from tensornetwork.block_sparse.utils import get_real_dtype
21 from tensornetwork.block_sparse.sizetypes import SIZE_T
22 from tensornetwork.block_sparse.blocksparsetensor import (BlockSparseTensor,
23 ChargeArray)
24 Tensor = Any
25
26
27 def svd(
28 bt,
29 tensor: BlockSparseTensor,
30 pivot_axis: int,
31 max_singular_values: Optional[int] = None,
32 max_truncation_error: Optional[float] = None,
33 relative: Optional[bool] = False) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
34 """
35 Computes the singular value decomposition (SVD) of a tensor.
36 See tensornetwork.backends.tensorflow.decompositions for details.
37 """
38
39 left_dims = tensor.shape[:pivot_axis]
40 right_dims = tensor.shape[pivot_axis:]
41
42 matrix = bt.reshape(tensor, [np.prod(left_dims), np.prod(right_dims)])
43
44 flat_charges = matrix._charges
45 flat_flows = matrix._flows
46 flat_order = matrix.flat_order
47 tr_partition = len(matrix._order[0])
48 blocks, charges, shapes = _find_transposed_diagonal_sparse_blocks(
49 flat_charges, flat_flows, tr_partition, flat_order)
50
51 u_blocks = []
52 singvals = []
53 v_blocks = []
54 for n, b in enumerate(blocks):
55 out = np.linalg.svd(
56 np.reshape(matrix.data[b], shapes[:, n]),
57 full_matrices=False,
58 compute_uv=True)
59 u_blocks.append(out[0])
60 singvals.append(out[1])
61 v_blocks.append(out[2])
62
63 orig_num_singvals = np.int64(np.sum([len(s) for s in singvals]))
64 orig_block_size = [len(s) for s in singvals]
65 discarded_singvals = np.zeros(0, dtype=get_real_dtype(tensor.dtype))
66 if (max_singular_values
67 is not None) and (max_singular_values >= orig_num_singvals):
68 max_singular_values = None
69
70 if (max_truncation_error is not None) or (max_singular_values is not None):
71 max_D = np.max([len(s) for s in singvals]) if len(singvals) > 0 else 0
72
73 #extend singvals of all blocks into a matrix by padding each block with 0
74 if len(singvals) > 0:
75 extended_singvals = np.stack([
76 np.append(s, np.zeros(max_D - len(s), dtype=s.dtype))
77 for s in singvals
78 ],
79 axis=1)
80 else:
81 extended_singvals = np.empty((0, 0), dtype=get_real_dtype(tensor.dtype))
82
83 extended_flat_singvals = np.ravel(extended_singvals)
84 #sort singular values
85 inds = np.argsort(extended_flat_singvals, kind='stable')
86 discarded_inds = np.zeros(0, dtype=SIZE_T)
87 if inds.shape[0] > 0:
88 maxind = inds[-1]
89 else:
90 maxind = 0
91 if max_truncation_error is not None:
92 if relative and (len(singvals) > 0):
93 max_truncation_error = max_truncation_error * np.max(
94 [s[0] for s in singvals])
95
96 kept_inds_mask = np.sqrt(
97 np.cumsum(np.square(
98 extended_flat_singvals[inds]))) > max_truncation_error
99 trunc_inds_mask = np.logical_not(kept_inds_mask)
100 discarded_inds = inds[trunc_inds_mask]
101 inds = inds[kept_inds_mask]
102 if max_singular_values is not None:
103 #if the original number of non-zero singular values
104 #is smaller than `max_singular_values` we need to reset
105 #`max_singular_values` (we were filling in 0.0 into singular
106 #value blocks to facilitate trunction steps, thus we could end up
107 #with more singular values than originally there).
108 if max_singular_values > orig_num_singvals:
109 max_singular_values = orig_num_singvals
110 if max_singular_values < len(inds):
111 discarded_inds = np.append(discarded_inds,
112 inds[:(-1) * max_singular_values])
113 inds = inds[(-1) * max_singular_values::]
114
115 if len(inds) == 0:
116 #special case of truncation to 0 dimension;
117 warnings.warn("svd_decomposition truncated to 0 dimensions. "
118 "Adjusting to `max_singular_values = 1`")
119 inds = np.asarray([maxind])
120
121 if extended_singvals.shape[1] > 0:
122 #pylint: disable=no-member
123 keep = np.divmod(inds, extended_singvals.shape[1])
124 disc = np.divmod(discarded_inds, extended_singvals.shape[1])
125 else:
126 keep = (np.zeros(1, dtype=SIZE_T), np.zeros(1, dtype=SIZE_T))
127 disc = (np.zeros(0, dtype=SIZE_T), np.zeros(0, dtype=SIZE_T))
128 newsingvals = [
129 extended_singvals[keep[0][keep[1] == n], keep[1][keep[1] == n]][::-1]
130 for n in range(extended_singvals.shape[1])
131 ]
132 discsingvals = [
133 extended_singvals[disc[0][disc[1] == n], disc[1][disc[1] == n]][::-1]
134 for n in range(extended_singvals.shape[1])
135 ]
136 new_block_size = [len(s) for s in newsingvals]
137 discsingvals = [
138 d[:(orig_block_size[n] - new_block_size[n])]
139 for n, d in enumerate(discsingvals)
140 ]
141 singvals = newsingvals
142 discarded_singvals = discsingvals
143 if len(singvals) > 0:
144 left_singval_charge_labels = np.concatenate([
145 np.full(singvals[n].shape[0], fill_value=n, dtype=np.int16)
146 for n in range(len(singvals))
147 ])
148 all_singvals = np.concatenate(singvals)
149 #define the new charges on the two central bonds
150 left_charge_labels = np.concatenate([
151 np.full(len(singvals[n]), fill_value=n, dtype=np.int16)
152 for n in range(len(u_blocks))
153 ])
154 right_charge_labels = np.concatenate([
155 np.full(len(singvals[n]), fill_value=n, dtype=np.int16)
156 for n in range(len(v_blocks))
157 ])
158 all_ublocks = np.concatenate([
159 np.ravel(np.transpose(u_blocks[n][:, 0:len(singvals[n])]))
160 for n in range(len(u_blocks))
161 ])
162 all_vblocks = np.concatenate([
163 np.ravel(v_blocks[n][0:len(singvals[n]), :])
164 for n in range(len(v_blocks))
165 ])
166 else:
167 left_singval_charge_labels = np.empty(0, dtype=np.int16)
168 all_singvals = np.empty(0, dtype=get_real_dtype(tensor.dtype))
169 left_charge_labels = np.empty(0, dtype=np.int16)
170 right_charge_labels = np.empty(0, dtype=np.int16)
171 all_ublocks = np.empty(0, dtype=get_real_dtype(tensor.dtype))
172 all_vblocks = np.empty(0, dtype=get_real_dtype(tensor.dtype))
173
174 if len(discarded_singvals) > 0:
175 tmp_labels = [
176 np.full(discarded_singvals[n].shape[0], fill_value=n, dtype=np.int16)
177 for n in range(len(discarded_singvals))
178 ]
179 left_discarded_singval_charge_labels = np.concatenate(tmp_labels)
180 all_discarded_singvals = np.concatenate(discarded_singvals)
181
182 else:
183 left_discarded_singval_charge_labels = np.empty(0, dtype=np.int16)
184 all_discarded_singvals = np.empty(0, dtype=get_real_dtype(tensor.dtype))
185
186
187 left_singval_charge = charges[left_singval_charge_labels]
188 S = ChargeArray(all_singvals, [left_singval_charge], [False])
189
190 left_discarded_singval_charge = charges[left_discarded_singval_charge_labels]
191 Sdisc = ChargeArray(all_discarded_singvals, [left_discarded_singval_charge],
192 [False])
193
194 new_left_charge = charges[left_charge_labels]
195 new_right_charge = charges[right_charge_labels]
196
197 #get the indices of the new tensors U,S and V
198 charges_u = [new_left_charge] + [matrix._charges[o] for o in matrix._order[0]]
199 order_u = [[0]] + [list(np.arange(1, len(matrix._order[0]) + 1))]
200 flows_u = [True] + [matrix._flows[o] for o in matrix._order[0]]
201 charges_v = [new_right_charge
202 ] + [matrix._charges[o] for o in matrix._order[1]]
203 flows_v = [False] + [matrix._flows[o] for o in matrix._order[1]]
204 order_v = [[0]] + [list(np.arange(1, len(matrix._order[1]) + 1))]
205
206 #We fill in data into the transposed U
207 U = BlockSparseTensor(
208 all_ublocks,
209 charges=charges_u,
210 flows=flows_u,
211 order=order_u,
212 check_consistency=False).transpose((1, 0))
213
214 V = BlockSparseTensor(
215 all_vblocks,
216 charges=charges_v,
217 flows=flows_v,
218 order=order_v,
219 check_consistency=False)
220 left_shape = left_dims + (S.shape[0],)
221 right_shape = (S.shape[0],) + right_dims
222 return U.reshape(left_shape), S, V.reshape(right_shape), Sdisc
223
224
225 def qr(bt, tensor: BlockSparseTensor, pivot_axis: int) -> Tuple[Tensor, Tensor]:
226 """Computes the QR decomposition of a tensor.
227
228 See tensornetwork.backends.tensorflow.decompositions for details.
229 """
230 left_dims = tensor.shape[:pivot_axis]
231 right_dims = tensor.shape[pivot_axis:]
232 tensor = bt.reshape(tensor, [np.prod(left_dims), np.prod(right_dims)])
233 q, r = bt.qr(tensor)
234 center_dim = q.shape[1]
235 q = bt.reshape(q, list(left_dims) + [center_dim])
236 r = bt.reshape(r, [center_dim] + list(right_dims))
237 return q, r
238
239
240 def rq(bt, tensor: BlockSparseTensor, pivot_axis: int) -> Tuple[Tensor, Tensor]:
241 """Computes the RQ (reversed QR) decomposition of a tensor.
242
243 See tensornetwork.backends.tensorflow.decompositions for details.
244 """
245 left_dims = tensor.shape[:pivot_axis]
246 right_dims = tensor.shape[pivot_axis:]
247 tensor = bt.reshape(tensor, [np.prod(left_dims), np.prod(right_dims)])
248 q, r = bt.qr(bt.conj(bt.transpose(tensor, (1, 0))))
249 r, q = bt.conj(bt.transpose(r, (1, 0))), bt.conj(bt.transpose(
250 q, (1, 0))) #M=r*q at this point
251 center_dim = r.shape[1]
252 r = bt.reshape(r, list(left_dims) + [center_dim])
253 q = bt.reshape(q, [center_dim] + list(right_dims))
254 return r, q
255
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tensornetwork/backends/symmetric/decompositions.py b/tensornetwork/backends/symmetric/decompositions.py
--- a/tensornetwork/backends/symmetric/decompositions.py
+++ b/tensornetwork/backends/symmetric/decompositions.py
@@ -84,10 +84,6 @@
#sort singular values
inds = np.argsort(extended_flat_singvals, kind='stable')
discarded_inds = np.zeros(0, dtype=SIZE_T)
- if inds.shape[0] > 0:
- maxind = inds[-1]
- else:
- maxind = 0
if max_truncation_error is not None:
if relative and (len(singvals) > 0):
max_truncation_error = max_truncation_error * np.max(
@@ -114,9 +110,7 @@
if len(inds) == 0:
#special case of truncation to 0 dimension;
- warnings.warn("svd_decomposition truncated to 0 dimensions. "
- "Adjusting to `max_singular_values = 1`")
- inds = np.asarray([maxind])
+ warnings.warn("svd_decomposition truncated to 0 dimensions.")
if extended_singvals.shape[1] > 0:
#pylint: disable=no-member
|
{"golden_diff": "diff --git a/tensornetwork/backends/symmetric/decompositions.py b/tensornetwork/backends/symmetric/decompositions.py\n--- a/tensornetwork/backends/symmetric/decompositions.py\n+++ b/tensornetwork/backends/symmetric/decompositions.py\n@@ -84,10 +84,6 @@\n #sort singular values\n inds = np.argsort(extended_flat_singvals, kind='stable')\n discarded_inds = np.zeros(0, dtype=SIZE_T)\n- if inds.shape[0] > 0:\n- maxind = inds[-1]\n- else:\n- maxind = 0\n if max_truncation_error is not None:\n if relative and (len(singvals) > 0):\n max_truncation_error = max_truncation_error * np.max(\n@@ -114,9 +110,7 @@\n \n if len(inds) == 0:\n #special case of truncation to 0 dimension;\n- warnings.warn(\"svd_decomposition truncated to 0 dimensions. \"\n- \"Adjusting to `max_singular_values = 1`\")\n- inds = np.asarray([maxind])\n+ warnings.warn(\"svd_decomposition truncated to 0 dimensions.\")\n \n if extended_singvals.shape[1] > 0:\n #pylint: disable=no-member\n", "issue": "Flaky Test when seed not used\nThe test `test_max_truncation_error` in `tensornetwork/backends/symmetric/decompositions_test.py` [link](https://github.com/google/TensorNetwork/blob/edf3f345640c64595b4ad1002bebf4a008988b3e/tensornetwork/backends/symmetric/decompositions_test.py#L90) almost always fails when the seed [here](https://github.com/google/TensorNetwork/blob/edf3f345640c64595b4ad1002bebf4a008988b3e/tensornetwork/backends/symmetric/decompositions_test.py#L91) is removed. This happens only for `[3-float64]` and `[4-complex128]` parameters. The test failed 500/500 times for me for both these parameters.\r\n\r\nFrom the error message it seems `svals[mask][::-1]` is always empty whereas `S2.data` most often has an element of size 1(except when using the seed). Is this expected? Or is this indicative of a bug?\r\n\r\nPlease let me know if you need more details. I will be happy to raise a PR if you have any suggestions on how to fix it.\r\n\r\nTest failure message:\r\n```\r\n> np.testing.assert_allclose(S2.data, svals[mask][::-1])\r\nE AssertionError:\r\nE Not equal to tolerance rtol=1e-07, atol=0\r\nE\r\nE (shapes (1,), (0,) mismatch)\r\nE x: array([1.])\r\nE y: array([], dtype=float64)\r\n\r\ntensornetwork/backends/symmetric/decompositions_test.py:112: AssertionError\r\n```\r\n\r\nEnvironment:\r\n```\r\nAnaconda python 3.6\r\ntensorflow==2.6.0\r\nnumpy==1.19.5\r\nscipy==1.5.4\r\n```\r\n\n", "before_files": [{"content": "# Copyright 2019 The TensorNetwork Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tensor Decomposition Implementations.\"\"\"\nimport numpy as np\nimport warnings\nfrom typing import Optional, Any, Tuple\nfrom tensornetwork.block_sparse.blocksparse_utils import (\n _find_transposed_diagonal_sparse_blocks)\nfrom tensornetwork.block_sparse.utils import get_real_dtype\nfrom tensornetwork.block_sparse.sizetypes import SIZE_T\nfrom tensornetwork.block_sparse.blocksparsetensor import (BlockSparseTensor,\n ChargeArray)\nTensor = Any\n\n\ndef svd(\n bt,\n tensor: BlockSparseTensor,\n pivot_axis: int,\n max_singular_values: Optional[int] = None,\n max_truncation_error: Optional[float] = None,\n relative: Optional[bool] = False) -> Tuple[Tensor, Tensor, Tensor, Tensor]:\n \"\"\"\n Computes the singular value decomposition (SVD) of a tensor.\n See tensornetwork.backends.tensorflow.decompositions for details.\n \"\"\"\n\n left_dims = tensor.shape[:pivot_axis]\n right_dims = tensor.shape[pivot_axis:]\n\n matrix = bt.reshape(tensor, [np.prod(left_dims), np.prod(right_dims)])\n\n flat_charges = matrix._charges\n flat_flows = matrix._flows\n flat_order = matrix.flat_order\n tr_partition = len(matrix._order[0])\n blocks, charges, shapes = _find_transposed_diagonal_sparse_blocks(\n flat_charges, flat_flows, tr_partition, flat_order)\n\n u_blocks = []\n singvals = []\n v_blocks = []\n for n, b in enumerate(blocks):\n out = np.linalg.svd(\n np.reshape(matrix.data[b], shapes[:, n]),\n full_matrices=False,\n compute_uv=True)\n u_blocks.append(out[0])\n singvals.append(out[1])\n v_blocks.append(out[2])\n\n orig_num_singvals = np.int64(np.sum([len(s) for s in singvals]))\n orig_block_size = [len(s) for s in singvals]\n discarded_singvals = np.zeros(0, dtype=get_real_dtype(tensor.dtype))\n if (max_singular_values\n is not None) and (max_singular_values >= orig_num_singvals):\n max_singular_values = None\n\n if (max_truncation_error is not None) or (max_singular_values is not None):\n max_D = np.max([len(s) for s in singvals]) if len(singvals) > 0 else 0\n\n #extend singvals of all blocks into a matrix by padding each block with 0\n if len(singvals) > 0:\n extended_singvals = np.stack([\n np.append(s, np.zeros(max_D - len(s), dtype=s.dtype))\n for s in singvals\n ],\n axis=1)\n else:\n extended_singvals = np.empty((0, 0), dtype=get_real_dtype(tensor.dtype))\n\n extended_flat_singvals = np.ravel(extended_singvals)\n #sort singular values\n inds = np.argsort(extended_flat_singvals, kind='stable')\n discarded_inds = np.zeros(0, dtype=SIZE_T)\n if inds.shape[0] > 0:\n maxind = inds[-1]\n else:\n maxind = 0\n if max_truncation_error is not None:\n if relative and (len(singvals) > 0):\n max_truncation_error = max_truncation_error * np.max(\n [s[0] for s in singvals])\n\n kept_inds_mask = np.sqrt(\n np.cumsum(np.square(\n extended_flat_singvals[inds]))) > max_truncation_error\n trunc_inds_mask = np.logical_not(kept_inds_mask)\n discarded_inds = inds[trunc_inds_mask]\n inds = inds[kept_inds_mask]\n if max_singular_values is not None:\n #if the original number of non-zero singular values\n #is smaller than `max_singular_values` we need to reset\n #`max_singular_values` (we were filling in 0.0 into singular\n #value blocks to facilitate trunction steps, thus we could end up\n #with more singular values than originally there).\n if max_singular_values > orig_num_singvals:\n max_singular_values = orig_num_singvals\n if max_singular_values < len(inds):\n discarded_inds = np.append(discarded_inds,\n inds[:(-1) * max_singular_values])\n inds = inds[(-1) * max_singular_values::]\n\n if len(inds) == 0:\n #special case of truncation to 0 dimension;\n warnings.warn(\"svd_decomposition truncated to 0 dimensions. \"\n \"Adjusting to `max_singular_values = 1`\")\n inds = np.asarray([maxind])\n\n if extended_singvals.shape[1] > 0:\n #pylint: disable=no-member\n keep = np.divmod(inds, extended_singvals.shape[1])\n disc = np.divmod(discarded_inds, extended_singvals.shape[1])\n else:\n keep = (np.zeros(1, dtype=SIZE_T), np.zeros(1, dtype=SIZE_T))\n disc = (np.zeros(0, dtype=SIZE_T), np.zeros(0, dtype=SIZE_T))\n newsingvals = [\n extended_singvals[keep[0][keep[1] == n], keep[1][keep[1] == n]][::-1]\n for n in range(extended_singvals.shape[1])\n ]\n discsingvals = [\n extended_singvals[disc[0][disc[1] == n], disc[1][disc[1] == n]][::-1]\n for n in range(extended_singvals.shape[1])\n ]\n new_block_size = [len(s) for s in newsingvals]\n discsingvals = [\n d[:(orig_block_size[n] - new_block_size[n])]\n for n, d in enumerate(discsingvals)\n ]\n singvals = newsingvals\n discarded_singvals = discsingvals\n if len(singvals) > 0:\n left_singval_charge_labels = np.concatenate([\n np.full(singvals[n].shape[0], fill_value=n, dtype=np.int16)\n for n in range(len(singvals))\n ])\n all_singvals = np.concatenate(singvals)\n #define the new charges on the two central bonds\n left_charge_labels = np.concatenate([\n np.full(len(singvals[n]), fill_value=n, dtype=np.int16)\n for n in range(len(u_blocks))\n ])\n right_charge_labels = np.concatenate([\n np.full(len(singvals[n]), fill_value=n, dtype=np.int16)\n for n in range(len(v_blocks))\n ])\n all_ublocks = np.concatenate([\n np.ravel(np.transpose(u_blocks[n][:, 0:len(singvals[n])]))\n for n in range(len(u_blocks))\n ])\n all_vblocks = np.concatenate([\n np.ravel(v_blocks[n][0:len(singvals[n]), :])\n for n in range(len(v_blocks))\n ])\n else:\n left_singval_charge_labels = np.empty(0, dtype=np.int16)\n all_singvals = np.empty(0, dtype=get_real_dtype(tensor.dtype))\n left_charge_labels = np.empty(0, dtype=np.int16)\n right_charge_labels = np.empty(0, dtype=np.int16)\n all_ublocks = np.empty(0, dtype=get_real_dtype(tensor.dtype))\n all_vblocks = np.empty(0, dtype=get_real_dtype(tensor.dtype))\n\n if len(discarded_singvals) > 0:\n tmp_labels = [\n np.full(discarded_singvals[n].shape[0], fill_value=n, dtype=np.int16)\n for n in range(len(discarded_singvals))\n ]\n left_discarded_singval_charge_labels = np.concatenate(tmp_labels)\n all_discarded_singvals = np.concatenate(discarded_singvals)\n\n else:\n left_discarded_singval_charge_labels = np.empty(0, dtype=np.int16)\n all_discarded_singvals = np.empty(0, dtype=get_real_dtype(tensor.dtype))\n\n\n left_singval_charge = charges[left_singval_charge_labels]\n S = ChargeArray(all_singvals, [left_singval_charge], [False])\n\n left_discarded_singval_charge = charges[left_discarded_singval_charge_labels]\n Sdisc = ChargeArray(all_discarded_singvals, [left_discarded_singval_charge],\n [False])\n\n new_left_charge = charges[left_charge_labels]\n new_right_charge = charges[right_charge_labels]\n\n #get the indices of the new tensors U,S and V\n charges_u = [new_left_charge] + [matrix._charges[o] for o in matrix._order[0]]\n order_u = [[0]] + [list(np.arange(1, len(matrix._order[0]) + 1))]\n flows_u = [True] + [matrix._flows[o] for o in matrix._order[0]]\n charges_v = [new_right_charge\n ] + [matrix._charges[o] for o in matrix._order[1]]\n flows_v = [False] + [matrix._flows[o] for o in matrix._order[1]]\n order_v = [[0]] + [list(np.arange(1, len(matrix._order[1]) + 1))]\n\n #We fill in data into the transposed U\n U = BlockSparseTensor(\n all_ublocks,\n charges=charges_u,\n flows=flows_u,\n order=order_u,\n check_consistency=False).transpose((1, 0))\n\n V = BlockSparseTensor(\n all_vblocks,\n charges=charges_v,\n flows=flows_v,\n order=order_v,\n check_consistency=False)\n left_shape = left_dims + (S.shape[0],)\n right_shape = (S.shape[0],) + right_dims\n return U.reshape(left_shape), S, V.reshape(right_shape), Sdisc\n\n\ndef qr(bt, tensor: BlockSparseTensor, pivot_axis: int) -> Tuple[Tensor, Tensor]:\n \"\"\"Computes the QR decomposition of a tensor.\n\n See tensornetwork.backends.tensorflow.decompositions for details.\n \"\"\"\n left_dims = tensor.shape[:pivot_axis]\n right_dims = tensor.shape[pivot_axis:]\n tensor = bt.reshape(tensor, [np.prod(left_dims), np.prod(right_dims)])\n q, r = bt.qr(tensor)\n center_dim = q.shape[1]\n q = bt.reshape(q, list(left_dims) + [center_dim])\n r = bt.reshape(r, [center_dim] + list(right_dims))\n return q, r\n\n\ndef rq(bt, tensor: BlockSparseTensor, pivot_axis: int) -> Tuple[Tensor, Tensor]:\n \"\"\"Computes the RQ (reversed QR) decomposition of a tensor.\n\n See tensornetwork.backends.tensorflow.decompositions for details.\n \"\"\"\n left_dims = tensor.shape[:pivot_axis]\n right_dims = tensor.shape[pivot_axis:]\n tensor = bt.reshape(tensor, [np.prod(left_dims), np.prod(right_dims)])\n q, r = bt.qr(bt.conj(bt.transpose(tensor, (1, 0))))\n r, q = bt.conj(bt.transpose(r, (1, 0))), bt.conj(bt.transpose(\n q, (1, 0))) #M=r*q at this point\n center_dim = r.shape[1]\n r = bt.reshape(r, list(left_dims) + [center_dim])\n q = bt.reshape(q, [center_dim] + list(right_dims))\n return r, q\n", "path": "tensornetwork/backends/symmetric/decompositions.py"}], "after_files": [{"content": "# Copyright 2019 The TensorNetwork Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tensor Decomposition Implementations.\"\"\"\nimport numpy as np\nimport warnings\nfrom typing import Optional, Any, Tuple\nfrom tensornetwork.block_sparse.blocksparse_utils import (\n _find_transposed_diagonal_sparse_blocks)\nfrom tensornetwork.block_sparse.utils import get_real_dtype\nfrom tensornetwork.block_sparse.sizetypes import SIZE_T\nfrom tensornetwork.block_sparse.blocksparsetensor import (BlockSparseTensor,\n ChargeArray)\nTensor = Any\n\n\ndef svd(\n bt,\n tensor: BlockSparseTensor,\n pivot_axis: int,\n max_singular_values: Optional[int] = None,\n max_truncation_error: Optional[float] = None,\n relative: Optional[bool] = False) -> Tuple[Tensor, Tensor, Tensor, Tensor]:\n \"\"\"\n Computes the singular value decomposition (SVD) of a tensor.\n See tensornetwork.backends.tensorflow.decompositions for details.\n \"\"\"\n\n left_dims = tensor.shape[:pivot_axis]\n right_dims = tensor.shape[pivot_axis:]\n\n matrix = bt.reshape(tensor, [np.prod(left_dims), np.prod(right_dims)])\n\n flat_charges = matrix._charges\n flat_flows = matrix._flows\n flat_order = matrix.flat_order\n tr_partition = len(matrix._order[0])\n blocks, charges, shapes = _find_transposed_diagonal_sparse_blocks(\n flat_charges, flat_flows, tr_partition, flat_order)\n\n u_blocks = []\n singvals = []\n v_blocks = []\n for n, b in enumerate(blocks):\n out = np.linalg.svd(\n np.reshape(matrix.data[b], shapes[:, n]),\n full_matrices=False,\n compute_uv=True)\n u_blocks.append(out[0])\n singvals.append(out[1])\n v_blocks.append(out[2])\n\n orig_num_singvals = np.int64(np.sum([len(s) for s in singvals]))\n orig_block_size = [len(s) for s in singvals]\n discarded_singvals = np.zeros(0, dtype=get_real_dtype(tensor.dtype))\n if (max_singular_values\n is not None) and (max_singular_values >= orig_num_singvals):\n max_singular_values = None\n\n if (max_truncation_error is not None) or (max_singular_values is not None):\n max_D = np.max([len(s) for s in singvals]) if len(singvals) > 0 else 0\n\n #extend singvals of all blocks into a matrix by padding each block with 0\n if len(singvals) > 0:\n extended_singvals = np.stack([\n np.append(s, np.zeros(max_D - len(s), dtype=s.dtype))\n for s in singvals\n ],\n axis=1)\n else:\n extended_singvals = np.empty((0, 0), dtype=get_real_dtype(tensor.dtype))\n\n extended_flat_singvals = np.ravel(extended_singvals)\n #sort singular values\n inds = np.argsort(extended_flat_singvals, kind='stable')\n discarded_inds = np.zeros(0, dtype=SIZE_T)\n if max_truncation_error is not None:\n if relative and (len(singvals) > 0):\n max_truncation_error = max_truncation_error * np.max(\n [s[0] for s in singvals])\n\n kept_inds_mask = np.sqrt(\n np.cumsum(np.square(\n extended_flat_singvals[inds]))) > max_truncation_error\n trunc_inds_mask = np.logical_not(kept_inds_mask)\n discarded_inds = inds[trunc_inds_mask]\n inds = inds[kept_inds_mask]\n if max_singular_values is not None:\n #if the original number of non-zero singular values\n #is smaller than `max_singular_values` we need to reset\n #`max_singular_values` (we were filling in 0.0 into singular\n #value blocks to facilitate trunction steps, thus we could end up\n #with more singular values than originally there).\n if max_singular_values > orig_num_singvals:\n max_singular_values = orig_num_singvals\n if max_singular_values < len(inds):\n discarded_inds = np.append(discarded_inds,\n inds[:(-1) * max_singular_values])\n inds = inds[(-1) * max_singular_values::]\n\n if len(inds) == 0:\n #special case of truncation to 0 dimension;\n warnings.warn(\"svd_decomposition truncated to 0 dimensions.\")\n\n if extended_singvals.shape[1] > 0:\n #pylint: disable=no-member\n keep = np.divmod(inds, extended_singvals.shape[1])\n disc = np.divmod(discarded_inds, extended_singvals.shape[1])\n else:\n keep = (np.zeros(1, dtype=SIZE_T), np.zeros(1, dtype=SIZE_T))\n disc = (np.zeros(0, dtype=SIZE_T), np.zeros(0, dtype=SIZE_T))\n newsingvals = [\n extended_singvals[keep[0][keep[1] == n], keep[1][keep[1] == n]][::-1]\n for n in range(extended_singvals.shape[1])\n ]\n discsingvals = [\n extended_singvals[disc[0][disc[1] == n], disc[1][disc[1] == n]][::-1]\n for n in range(extended_singvals.shape[1])\n ]\n new_block_size = [len(s) for s in newsingvals]\n discsingvals = [\n d[:(orig_block_size[n] - new_block_size[n])]\n for n, d in enumerate(discsingvals)\n ]\n singvals = newsingvals\n discarded_singvals = discsingvals\n if len(singvals) > 0:\n left_singval_charge_labels = np.concatenate([\n np.full(singvals[n].shape[0], fill_value=n, dtype=np.int16)\n for n in range(len(singvals))\n ])\n all_singvals = np.concatenate(singvals)\n #define the new charges on the two central bonds\n left_charge_labels = np.concatenate([\n np.full(len(singvals[n]), fill_value=n, dtype=np.int16)\n for n in range(len(u_blocks))\n ])\n right_charge_labels = np.concatenate([\n np.full(len(singvals[n]), fill_value=n, dtype=np.int16)\n for n in range(len(v_blocks))\n ])\n all_ublocks = np.concatenate([\n np.ravel(np.transpose(u_blocks[n][:, 0:len(singvals[n])]))\n for n in range(len(u_blocks))\n ])\n all_vblocks = np.concatenate([\n np.ravel(v_blocks[n][0:len(singvals[n]), :])\n for n in range(len(v_blocks))\n ])\n else:\n left_singval_charge_labels = np.empty(0, dtype=np.int16)\n all_singvals = np.empty(0, dtype=get_real_dtype(tensor.dtype))\n left_charge_labels = np.empty(0, dtype=np.int16)\n right_charge_labels = np.empty(0, dtype=np.int16)\n all_ublocks = np.empty(0, dtype=get_real_dtype(tensor.dtype))\n all_vblocks = np.empty(0, dtype=get_real_dtype(tensor.dtype))\n\n if len(discarded_singvals) > 0:\n tmp_labels = [\n np.full(discarded_singvals[n].shape[0], fill_value=n, dtype=np.int16)\n for n in range(len(discarded_singvals))\n ]\n left_discarded_singval_charge_labels = np.concatenate(tmp_labels)\n all_discarded_singvals = np.concatenate(discarded_singvals)\n\n else:\n left_discarded_singval_charge_labels = np.empty(0, dtype=np.int16)\n all_discarded_singvals = np.empty(0, dtype=get_real_dtype(tensor.dtype))\n\n\n left_singval_charge = charges[left_singval_charge_labels]\n S = ChargeArray(all_singvals, [left_singval_charge], [False])\n\n left_discarded_singval_charge = charges[left_discarded_singval_charge_labels]\n Sdisc = ChargeArray(all_discarded_singvals, [left_discarded_singval_charge],\n [False])\n\n new_left_charge = charges[left_charge_labels]\n new_right_charge = charges[right_charge_labels]\n\n #get the indices of the new tensors U,S and V\n charges_u = [new_left_charge] + [matrix._charges[o] for o in matrix._order[0]]\n order_u = [[0]] + [list(np.arange(1, len(matrix._order[0]) + 1))]\n flows_u = [True] + [matrix._flows[o] for o in matrix._order[0]]\n charges_v = [new_right_charge\n ] + [matrix._charges[o] for o in matrix._order[1]]\n flows_v = [False] + [matrix._flows[o] for o in matrix._order[1]]\n order_v = [[0]] + [list(np.arange(1, len(matrix._order[1]) + 1))]\n\n #We fill in data into the transposed U\n U = BlockSparseTensor(\n all_ublocks,\n charges=charges_u,\n flows=flows_u,\n order=order_u,\n check_consistency=False).transpose((1, 0))\n\n V = BlockSparseTensor(\n all_vblocks,\n charges=charges_v,\n flows=flows_v,\n order=order_v,\n check_consistency=False)\n left_shape = left_dims + (S.shape[0],)\n right_shape = (S.shape[0],) + right_dims\n return U.reshape(left_shape), S, V.reshape(right_shape), Sdisc\n\n\ndef qr(bt, tensor: BlockSparseTensor, pivot_axis: int) -> Tuple[Tensor, Tensor]:\n \"\"\"Computes the QR decomposition of a tensor.\n\n See tensornetwork.backends.tensorflow.decompositions for details.\n \"\"\"\n left_dims = tensor.shape[:pivot_axis]\n right_dims = tensor.shape[pivot_axis:]\n tensor = bt.reshape(tensor, [np.prod(left_dims), np.prod(right_dims)])\n q, r = bt.qr(tensor)\n center_dim = q.shape[1]\n q = bt.reshape(q, list(left_dims) + [center_dim])\n r = bt.reshape(r, [center_dim] + list(right_dims))\n return q, r\n\n\ndef rq(bt, tensor: BlockSparseTensor, pivot_axis: int) -> Tuple[Tensor, Tensor]:\n \"\"\"Computes the RQ (reversed QR) decomposition of a tensor.\n\n See tensornetwork.backends.tensorflow.decompositions for details.\n \"\"\"\n left_dims = tensor.shape[:pivot_axis]\n right_dims = tensor.shape[pivot_axis:]\n tensor = bt.reshape(tensor, [np.prod(left_dims), np.prod(right_dims)])\n q, r = bt.qr(bt.conj(bt.transpose(tensor, (1, 0))))\n r, q = bt.conj(bt.transpose(r, (1, 0))), bt.conj(bt.transpose(\n q, (1, 0))) #M=r*q at this point\n center_dim = r.shape[1]\n r = bt.reshape(r, list(left_dims) + [center_dim])\n q = bt.reshape(q, [center_dim] + list(right_dims))\n return r, q\n", "path": "tensornetwork/backends/symmetric/decompositions.py"}]}
| 4,045 | 300 |
gh_patches_debug_15829
|
rasdani/github-patches
|
git_diff
|
opsdroid__opsdroid-523
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Setting typing delay to 0 means bot never responds
# Description
When a typing delay is defined it is an integer which states how many characters per second opsdroid should type. If you set this to `0` then it can type no characters per second and therefore never responds.
## Steps to Reproduce
- Configure a connector with a `typing-delay` of `0`.
- Talk to the bot
## Experienced Functionality
The bot never responds.
## Expected Functionality
I would expect the bot to respond eventually. Perhaps even immediately and log an error stating `0` is an invalid response.
Thinking a little more about this I wonder if this functionality is the wrong way round. With `thinking-delay` that is the number of seconds that opsdroid should way before responding, so as the number gets bigger the longer opsdroid waits. But with `typing-delay` it is the number of characters per second that opsdroid can type, so the bigger the number the less opsdroid waits. The word `delay` suggests that the higher the number the longer the wait.
These opposites could be confusing (it confused me this evening). I think it would be good that both numbers represent time, rather than one representing characters per second. That would involve changing it to be seconds per character. This would result in a bigger number causing a bigger delay.
## Versions
- **Opsdroid version:** master
- **Python version:** 3.5.4
- **OS/Docker version:** macOS 10.13
## Configuration File
```yaml
connectors:
- name: shell
typing-delay: 0
skills:
- name: hello
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opsdroid/message.py`
Content:
```
1 """Class to encapsulate a message."""
2
3 from datetime import datetime
4 from copy import copy
5 import asyncio
6 from random import randrange
7
8 from opsdroid.helper import get_opsdroid
9
10
11 class Message:
12 # pylint: disable=too-few-public-methods
13 """A message object."""
14
15 def __init__(self, text, user, room, connector, raw_message=None):
16 """Create object with minimum properties."""
17 self.created = datetime.now()
18 self.text = text
19 self.user = user
20 self.room = room
21 self.connector = connector
22 self.raw_message = raw_message
23 self.regex = None
24 self.responded_to = False
25
26 async def _thinking_delay(self):
27 """Make opsdroid wait x-seconds before responding."""
28 seconds = self.connector.configuration.get('thinking-delay', 0)
29
30 if isinstance(seconds, list):
31 seconds = randrange(seconds[0], seconds[1])
32
33 await asyncio.sleep(seconds)
34
35 async def _typing_delay(self, text):
36 """Simulate typing, takes an int(characters per second typed)."""
37 try:
38 char_per_sec = self.connector.configuration['typing-delay']
39 char_count = len(text)
40 await asyncio.sleep(char_count//char_per_sec)
41 except KeyError:
42 pass
43
44 async def respond(self, text, room=None):
45 """Respond to this message using the connector it was created by."""
46 opsdroid = get_opsdroid()
47 response = copy(self)
48 response.text = text
49
50 if 'thinking-delay' in self.connector.configuration or \
51 'typing-delay' in self.connector.configuration:
52 await self._thinking_delay()
53 await self._typing_delay(response.text)
54
55 await self.connector.respond(response, room)
56 if not self.responded_to:
57 now = datetime.now()
58 opsdroid.stats["total_responses"] = \
59 opsdroid.stats["total_responses"] + 1
60 opsdroid.stats["total_response_time"] = \
61 opsdroid.stats["total_response_time"] + \
62 (now - self.created).total_seconds()
63 self.responded_to = True
64
65 async def react(self, emoji):
66 """React to this message using the connector it was created by."""
67 if 'thinking-delay' in self.connector.configuration:
68 await self._thinking_delay()
69 return await self.connector.react(self, emoji)
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/opsdroid/message.py b/opsdroid/message.py
--- a/opsdroid/message.py
+++ b/opsdroid/message.py
@@ -33,13 +33,14 @@
await asyncio.sleep(seconds)
async def _typing_delay(self, text):
- """Simulate typing, takes an int(characters per second typed)."""
- try:
- char_per_sec = self.connector.configuration['typing-delay']
- char_count = len(text)
- await asyncio.sleep(char_count//char_per_sec)
- except KeyError:
- pass
+ """Simulate typing, takes an int or float to delay reply."""
+ seconds = self.connector.configuration.get('typing-delay', 0)
+ char_count = len(text)
+
+ if isinstance(seconds, list):
+ seconds = randrange(seconds[0], seconds[1])
+
+ await asyncio.sleep(char_count*seconds)
async def respond(self, text, room=None):
"""Respond to this message using the connector it was created by."""
|
{"golden_diff": "diff --git a/opsdroid/message.py b/opsdroid/message.py\n--- a/opsdroid/message.py\n+++ b/opsdroid/message.py\n@@ -33,13 +33,14 @@\n await asyncio.sleep(seconds)\n \n async def _typing_delay(self, text):\n- \"\"\"Simulate typing, takes an int(characters per second typed).\"\"\"\n- try:\n- char_per_sec = self.connector.configuration['typing-delay']\n- char_count = len(text)\n- await asyncio.sleep(char_count//char_per_sec)\n- except KeyError:\n- pass\n+ \"\"\"Simulate typing, takes an int or float to delay reply.\"\"\"\n+ seconds = self.connector.configuration.get('typing-delay', 0)\n+ char_count = len(text)\n+\n+ if isinstance(seconds, list):\n+ seconds = randrange(seconds[0], seconds[1])\n+\n+ await asyncio.sleep(char_count*seconds)\n \n async def respond(self, text, room=None):\n \"\"\"Respond to this message using the connector it was created by.\"\"\"\n", "issue": "Setting typing delay to 0 means bot never responds\n# Description\r\nWhen a typing delay is defined it is an integer which states how many characters per second opsdroid should type. If you set this to `0` then it can type no characters per second and therefore never responds.\r\n\r\n\r\n## Steps to Reproduce\r\n- Configure a connector with a `typing-delay` of `0`. \r\n- Talk to the bot\r\n\r\n\r\n## Experienced Functionality\r\nThe bot never responds.\r\n\r\n\r\n## Expected Functionality\r\nI would expect the bot to respond eventually. Perhaps even immediately and log an error stating `0` is an invalid response.\r\n\r\nThinking a little more about this I wonder if this functionality is the wrong way round. With `thinking-delay` that is the number of seconds that opsdroid should way before responding, so as the number gets bigger the longer opsdroid waits. But with `typing-delay` it is the number of characters per second that opsdroid can type, so the bigger the number the less opsdroid waits. The word `delay` suggests that the higher the number the longer the wait.\r\n\r\nThese opposites could be confusing (it confused me this evening). I think it would be good that both numbers represent time, rather than one representing characters per second. That would involve changing it to be seconds per character. This would result in a bigger number causing a bigger delay. \r\n\r\n\r\n## Versions\r\n- **Opsdroid version:** master\r\n- **Python version:** 3.5.4\r\n- **OS/Docker version:** macOS 10.13\r\n\r\n## Configuration File\r\n\r\n\r\n```yaml\r\nconnectors:\r\n - name: shell\r\n typing-delay: 0\r\n\r\nskills:\r\n - name: hello\r\n```\r\n\n", "before_files": [{"content": "\"\"\"Class to encapsulate a message.\"\"\"\n\nfrom datetime import datetime\nfrom copy import copy\nimport asyncio\nfrom random import randrange\n\nfrom opsdroid.helper import get_opsdroid\n\n\nclass Message:\n # pylint: disable=too-few-public-methods\n \"\"\"A message object.\"\"\"\n\n def __init__(self, text, user, room, connector, raw_message=None):\n \"\"\"Create object with minimum properties.\"\"\"\n self.created = datetime.now()\n self.text = text\n self.user = user\n self.room = room\n self.connector = connector\n self.raw_message = raw_message\n self.regex = None\n self.responded_to = False\n\n async def _thinking_delay(self):\n \"\"\"Make opsdroid wait x-seconds before responding.\"\"\"\n seconds = self.connector.configuration.get('thinking-delay', 0)\n\n if isinstance(seconds, list):\n seconds = randrange(seconds[0], seconds[1])\n\n await asyncio.sleep(seconds)\n\n async def _typing_delay(self, text):\n \"\"\"Simulate typing, takes an int(characters per second typed).\"\"\"\n try:\n char_per_sec = self.connector.configuration['typing-delay']\n char_count = len(text)\n await asyncio.sleep(char_count//char_per_sec)\n except KeyError:\n pass\n\n async def respond(self, text, room=None):\n \"\"\"Respond to this message using the connector it was created by.\"\"\"\n opsdroid = get_opsdroid()\n response = copy(self)\n response.text = text\n\n if 'thinking-delay' in self.connector.configuration or \\\n 'typing-delay' in self.connector.configuration:\n await self._thinking_delay()\n await self._typing_delay(response.text)\n\n await self.connector.respond(response, room)\n if not self.responded_to:\n now = datetime.now()\n opsdroid.stats[\"total_responses\"] = \\\n opsdroid.stats[\"total_responses\"] + 1\n opsdroid.stats[\"total_response_time\"] = \\\n opsdroid.stats[\"total_response_time\"] + \\\n (now - self.created).total_seconds()\n self.responded_to = True\n\n async def react(self, emoji):\n \"\"\"React to this message using the connector it was created by.\"\"\"\n if 'thinking-delay' in self.connector.configuration:\n await self._thinking_delay()\n return await self.connector.react(self, emoji)\n", "path": "opsdroid/message.py"}], "after_files": [{"content": "\"\"\"Class to encapsulate a message.\"\"\"\n\nfrom datetime import datetime\nfrom copy import copy\nimport asyncio\nfrom random import randrange\n\nfrom opsdroid.helper import get_opsdroid\n\n\nclass Message:\n # pylint: disable=too-few-public-methods\n \"\"\"A message object.\"\"\"\n\n def __init__(self, text, user, room, connector, raw_message=None):\n \"\"\"Create object with minimum properties.\"\"\"\n self.created = datetime.now()\n self.text = text\n self.user = user\n self.room = room\n self.connector = connector\n self.raw_message = raw_message\n self.regex = None\n self.responded_to = False\n\n async def _thinking_delay(self):\n \"\"\"Make opsdroid wait x-seconds before responding.\"\"\"\n seconds = self.connector.configuration.get('thinking-delay', 0)\n\n if isinstance(seconds, list):\n seconds = randrange(seconds[0], seconds[1])\n\n await asyncio.sleep(seconds)\n\n async def _typing_delay(self, text):\n \"\"\"Simulate typing, takes an int or float to delay reply.\"\"\"\n seconds = self.connector.configuration.get('typing-delay', 0)\n char_count = len(text)\n\n if isinstance(seconds, list):\n seconds = randrange(seconds[0], seconds[1])\n\n await asyncio.sleep(char_count*seconds)\n\n async def respond(self, text, room=None):\n \"\"\"Respond to this message using the connector it was created by.\"\"\"\n opsdroid = get_opsdroid()\n response = copy(self)\n response.text = text\n\n if 'thinking-delay' in self.connector.configuration or \\\n 'typing-delay' in self.connector.configuration:\n await self._thinking_delay()\n await self._typing_delay(response.text)\n\n await self.connector.respond(response, room)\n if not self.responded_to:\n now = datetime.now()\n opsdroid.stats[\"total_responses\"] = \\\n opsdroid.stats[\"total_responses\"] + 1\n opsdroid.stats[\"total_response_time\"] = \\\n opsdroid.stats[\"total_response_time\"] + \\\n (now - self.created).total_seconds()\n self.responded_to = True\n\n async def react(self, emoji):\n \"\"\"React to this message using the connector it was created by.\"\"\"\n if 'thinking-delay' in self.connector.configuration:\n await self._thinking_delay()\n return await self.connector.react(self, emoji)\n", "path": "opsdroid/message.py"}]}
| 1,251 | 229 |
gh_patches_debug_7551
|
rasdani/github-patches
|
git_diff
|
mosaicml__composer-18
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Configure Jenkins
Enable github actions for:
- [x] pytest CPU runner
- [x] formatting and type checking (`yapf`, `pyright`)
- [ ] docker builds
- [x] docs builds
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `composer/callbacks/profiler.py`
Content:
```
1 from __future__ import annotations
2
3 import atexit
4 from dataclasses import asdict, dataclass
5 from typing import TYPE_CHECKING, Optional
6
7 import torch.profiler
8 import yahp as hp
9 from torch.profiler.profiler import ProfilerAction
10
11 from composer import Callback
12 from composer.callbacks.callback_hparams import CallbackHparams
13 from composer.core.types import StateDict
14 from composer.utils.ddp import get_global_rank
15
16 if TYPE_CHECKING:
17 from composer.core import Logger, State
18
19 _PROFILE_MISSING_ERROR = "The profiler has not been setup. Please call profiler.training_start() before training starts."
20
21
22 @dataclass
23 class TorchProfilerHparams(CallbackHparams):
24 tensorboard_trace_handler_dir: str = hp.required("directory to store trace results")
25 tensorboard_use_gzip: bool = hp.optional("use gzip for trace", default=False)
26 record_shapes: bool = hp.optional(doc="record_shapes", default=True)
27 profile_memory: bool = hp.optional(doc="track tensor memory allocations and frees", default=False)
28 with_stack: bool = hp.optional(doc="record stack info", default=True)
29 with_flops: bool = hp.optional(doc="estimate flops for operators", default=True)
30
31 skip: int = hp.optional("Number of batches to skip at epoch start", default=0)
32 warmup: int = hp.optional("Number of warmup batches in a cycle", default=1)
33 active: int = hp.optional("Number of batches to profile in a cycle", default=5)
34 wait: int = hp.optional("Number of batches to skip at the end of each cycle", default=0)
35
36 def initialize_object(self) -> TorchProfiler:
37 return TorchProfiler(**asdict(self))
38
39
40 @dataclass
41 class TorchProfilerState:
42 batch_in_epoch: int = 0
43 batches_per_epoch: int = 0
44
45
46 class TorchProfiler(Callback):
47
48 def __init__(
49 self,
50 *,
51 skip: int,
52 warmup: int,
53 active: int,
54 wait: int,
55 tensorboard_trace_handler_dir: str,
56 tensorboard_use_gzip: bool,
57 record_shapes: bool,
58 profile_memory: bool,
59 with_stack: bool,
60 with_flops: bool,
61 ) -> None:
62 super().__init__()
63 self.hparams = TorchProfilerHparams(
64 skip=skip,
65 warmup=warmup,
66 active=active,
67 wait=wait,
68 tensorboard_trace_handler_dir=tensorboard_trace_handler_dir,
69 tensorboard_use_gzip=tensorboard_use_gzip,
70 record_shapes=record_shapes,
71 profile_memory=profile_memory,
72 with_stack=with_stack,
73 with_flops=with_flops,
74 )
75 self.profiler: Optional[torch.profiler.profile] = None
76 self.profiler_state: TorchProfilerState = TorchProfilerState()
77 self._torch_profiler_scheduler = torch.profiler.profiler.schedule(
78 wait=self.hparams.wait,
79 warmup=self.hparams.warmup,
80 active=self.hparams.active,
81 skip_first=self.hparams.skip,
82 )
83
84 def state_dict(self) -> StateDict:
85 return asdict(self.profiler_state)
86
87 def load_state_dict(self, state: StateDict) -> None:
88 self.profiler_state = TorchProfilerState(**state)
89
90 def scheduler_fn(self, profiler_step: int) -> ProfilerAction:
91 # Invoked on every batch, at the batch end
92 # But, it's called one batch in advance.
93 # Wrapping the default scheduling function to deal with epoch boundaries
94 # Giving the torch scheduler the batch in the epoch, not the global step
95
96 # adding 1 since this is called before the step is incremented
97 next_batch_in_epoch = self.profiler_state.batch_in_epoch + 1
98 if profiler_step == 0:
99 next_batch_in_epoch = 0
100 torch_scheduler_action = self._torch_profiler_scheduler(next_batch_in_epoch)
101 if next_batch_in_epoch == self.profiler_state.batches_per_epoch:
102 if torch_scheduler_action == ProfilerAction.RECORD:
103 # force saving at epoch boundaries
104 torch_scheduler_action = ProfilerAction.RECORD_AND_SAVE
105 return torch_scheduler_action
106
107 def training_start(self, state: State, logger: Logger) -> None:
108 assert self.profiler is None, _PROFILE_MISSING_ERROR
109 self.profiler = torch.profiler.profile(
110 schedule=self.scheduler_fn,
111 on_trace_ready=torch.profiler.tensorboard_trace_handler(
112 self.hparams.tensorboard_trace_handler_dir,
113 worker_name=str(get_global_rank()),
114 use_gzip=self.hparams.tensorboard_use_gzip,
115 ),
116 activities=None, # auto-set
117 record_shapes=self.hparams.record_shapes,
118 profile_memory=self.hparams.profile_memory,
119 with_stack=self.hparams.with_stack,
120 with_flops=self.hparams.with_flops,
121 )
122 self.profiler.__enter__()
123 atexit.register(self._close_profiler)
124
125 def batch_end(self, state: State, logger: Logger) -> None:
126 assert self.profiler is not None, _PROFILE_MISSING_ERROR
127 self.profiler.step()
128
129 def epoch_start(self, state: State, logger: Logger) -> None:
130 self.profiler_state.batches_per_epoch = state.steps_per_epoch
131
132 def batch_start(self, state: State, logger: Logger) -> None:
133 self.profiler_state.batch_in_epoch = state.batch_idx
134 assert self.profiler is not None, _PROFILE_MISSING_ERROR
135 logger.metric_batch({"profiler/state": self.profiler.current_action.name})
136
137 def _close_profiler(self) -> None:
138 assert self.profiler is not None
139 self.profiler.__exit__(None, None, None)
140
```
Path: `composer/callbacks/timing_monitor.py`
Content:
```
1 from __future__ import annotations
2
3 import logging
4 import time
5 from dataclasses import dataclass
6 from typing import List
7
8 import yahp as hp
9
10 from composer import Logger, State
11 from composer.callbacks.callback_hparams import CallbackHparams
12 from composer.core.callback import Callback
13 from composer.core.types import BreakEpochException
14
15 log = logging.getLogger(__name__)
16
17
18 class TimingMonitor(Callback):
19
20 def __init__(self, min_steps: int, epoch_list: List[int], step_list: List[int], all_epochs: bool):
21 super().__init__()
22 self.hparams = TimingMonitorHparams(min_steps=min_steps,
23 epoch_list=epoch_list,
24 step_list=step_list,
25 all_epochs=all_epochs)
26
27 if not all_epochs:
28 assert len(epoch_list) > 0, "'epoch_list' must be non-empty."
29 assert 0 in epoch_list, \
30 "'epoch_list' must contain 0, otherwise first K epochs have unknown speed"
31 assert len(step_list) > 0, "'step_list' must be non-empty."
32 assert 0 in step_list, \
33 "'step_list' must contain 0 because `EPOCH_START` requires batch_idx of 0"
34
35 # Sort lists so that time only moves forward
36 epoch_list = list(sorted(epoch_list))
37 step_list = list(sorted(step_list))
38
39 self.current_time = None
40 self.profile_examples = 0
41 self.profile_steps = 0
42 self.profile_time = 0
43 self.wall_clock_train = 0
44
45 self.min_steps = min_steps
46
47 self.all_epochs = all_epochs
48 self.epoch_list = epoch_list
49 self.epoch_ix = 0
50 self.step_list = step_list
51 self.step_ix = 0
52
53 # initialized at training_start
54 self.original_max_epochs = -1
55 self.wct_dict = {}
56
57 def _compute_elapsed_wct(self, epoch_wct_dict, steps_per_epoch, n_epochs):
58 wct = 0.0
59 wct_per_step = 0
60 assert 0 in epoch_wct_dict, "epoch_wct_dict must contain 0"
61 for step in range(steps_per_epoch):
62 if step in epoch_wct_dict:
63 wct_per_step = epoch_wct_dict[step]
64 wct += wct_per_step
65 return wct * n_epochs
66
67 def training_start(self, state: State, logger: Logger):
68 self.wall_clock_train = 0.0
69 self.original_max_epochs = state.max_epochs
70 # maybe override epoch_list
71 if self.all_epochs:
72 self.epoch_list = list(range(state.max_epochs))
73 log.info(f"all_epochs=True, overriding epoch_list to be every epoch from 0 to {state.max_epochs}")
74 self.wct_dict = {e: {s: -1.0 for s in self.step_list} for e in self.epoch_list}
75 state.max_epochs = len(self.epoch_list)
76
77 def epoch_end(self, state: State, logger: Logger):
78 prev_epoch = self.epoch_list[self.epoch_ix]
79 epoch_wct_dict = self.wct_dict[prev_epoch]
80 self.epoch_ix += 1
81 if self.epoch_ix < len(self.epoch_list):
82 next_epoch = self.epoch_list[self.epoch_ix]
83 else:
84 next_epoch = self.original_max_epochs
85
86 state.epoch = next_epoch - 1
87 state.step = next_epoch * state.steps_per_epoch
88 n_epochs = next_epoch - prev_epoch
89
90 self.wall_clock_train += self._compute_elapsed_wct(epoch_wct_dict, state.steps_per_epoch, n_epochs)
91 logger.metric_epoch({'wall_clock_train': self.wall_clock_train})
92
93 def batch_start(self, state: State, logger: Logger):
94 if self.current_time is None:
95 self.current_time = time.time()
96 self.profile_examples = 0
97 self.profile_steps = 0
98 self.profile_time = 0.0
99
100 def batch_end(self, state: State, logger: Logger):
101 if self.current_time is not None:
102 now = time.time()
103 elapsed = now - self.current_time
104 self.current_time = now
105 self.profile_examples += state.last_batch_size * state.world_size
106 self.profile_steps += 1
107 self.profile_time += elapsed
108
109 if self.profile_steps >= self.min_steps:
110 avg_throughput = self.profile_examples / self.profile_time
111 avg_time_per_step = self.profile_time / self.profile_steps
112 profile_epoch = self.epoch_list[self.epoch_ix]
113 profile_step = self.step_list[self.step_ix]
114 self.wct_dict[profile_epoch][profile_step] = avg_time_per_step
115 logger.metric_batch({'throughput/step': avg_throughput})
116
117 self.current_time = None
118 self.step_ix += 1
119 if self.step_ix == len(self.step_list):
120 self.step_ix = 0
121 raise BreakEpochException
122 else:
123 state.step = state.epoch * state.steps_per_epoch + self.step_list[self.step_ix]
124
125
126 @dataclass
127 class TimingMonitorHparams(CallbackHparams):
128 min_steps: int = hp.optional(
129 doc="minimum number of steps to use for measuring throughput",
130 default=50,
131 )
132 epoch_list: List[int] = hp.optional(
133 doc="list of epochs at which to measure throughput",
134 default_factory=lambda: [0, 1],
135 )
136 step_list: List[int] = hp.optional(
137 doc="list of steps at which to measure throughput",
138 default_factory=lambda: [0, 50],
139 )
140 all_epochs: bool = hp.optional(
141 doc="if true, override epoch_list and profile at all epochs.",
142 default=False,
143 )
144
145 def initialize_object(self) -> TimingMonitor:
146 return TimingMonitor(
147 min_steps=self.min_steps,
148 epoch_list=self.epoch_list,
149 step_list=self.step_list,
150 all_epochs=self.all_epochs,
151 )
152
```
Path: `composer/trainer/entrypoint.py`
Content:
```
1 import logging
2
3 import composer
4 from composer.trainer.trainer import Trainer
5 from composer.trainer.trainer_hparams import TrainerHparams
6
7
8 def main() -> None:
9 logging.basicConfig()
10 logging.captureWarnings(True)
11
12 hparams = TrainerHparams.create()
13 logging.getLogger(composer.__name__).setLevel(hparams.log_level)
14 trainer = Trainer.create_from_hparams(hparams=hparams)
15 trainer.fit()
16
17
18 if __name__ == "__main__":
19 main()
20
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/composer/callbacks/profiler.py b/composer/callbacks/profiler.py
--- a/composer/callbacks/profiler.py
+++ b/composer/callbacks/profiler.py
@@ -1,3 +1,5 @@
+# Copyright 2021 MosaicML. All Rights Reserved.
+
from __future__ import annotations
import atexit
diff --git a/composer/callbacks/timing_monitor.py b/composer/callbacks/timing_monitor.py
--- a/composer/callbacks/timing_monitor.py
+++ b/composer/callbacks/timing_monitor.py
@@ -1,3 +1,5 @@
+# Copyright 2021 MosaicML. All Rights Reserved.
+
from __future__ import annotations
import logging
diff --git a/composer/trainer/entrypoint.py b/composer/trainer/entrypoint.py
--- a/composer/trainer/entrypoint.py
+++ b/composer/trainer/entrypoint.py
@@ -1,3 +1,5 @@
+# Copyright 2021 MosaicML. All Rights Reserved.
+
import logging
import composer
|
{"golden_diff": "diff --git a/composer/callbacks/profiler.py b/composer/callbacks/profiler.py\n--- a/composer/callbacks/profiler.py\n+++ b/composer/callbacks/profiler.py\n@@ -1,3 +1,5 @@\n+# Copyright 2021 MosaicML. All Rights Reserved.\n+\n from __future__ import annotations\n \n import atexit\ndiff --git a/composer/callbacks/timing_monitor.py b/composer/callbacks/timing_monitor.py\n--- a/composer/callbacks/timing_monitor.py\n+++ b/composer/callbacks/timing_monitor.py\n@@ -1,3 +1,5 @@\n+# Copyright 2021 MosaicML. All Rights Reserved.\n+\n from __future__ import annotations\n \n import logging\ndiff --git a/composer/trainer/entrypoint.py b/composer/trainer/entrypoint.py\n--- a/composer/trainer/entrypoint.py\n+++ b/composer/trainer/entrypoint.py\n@@ -1,3 +1,5 @@\n+# Copyright 2021 MosaicML. All Rights Reserved.\n+\n import logging\n \n import composer\n", "issue": "Configure Jenkins\nEnable github actions for:\r\n\r\n- [x] pytest CPU runner\r\n- [x] formatting and type checking (`yapf`, `pyright`)\r\n- [ ] docker builds\r\n- [x] docs builds\n", "before_files": [{"content": "from __future__ import annotations\n\nimport atexit\nfrom dataclasses import asdict, dataclass\nfrom typing import TYPE_CHECKING, Optional\n\nimport torch.profiler\nimport yahp as hp\nfrom torch.profiler.profiler import ProfilerAction\n\nfrom composer import Callback\nfrom composer.callbacks.callback_hparams import CallbackHparams\nfrom composer.core.types import StateDict\nfrom composer.utils.ddp import get_global_rank\n\nif TYPE_CHECKING:\n from composer.core import Logger, State\n\n_PROFILE_MISSING_ERROR = \"The profiler has not been setup. Please call profiler.training_start() before training starts.\"\n\n\n@dataclass\nclass TorchProfilerHparams(CallbackHparams):\n tensorboard_trace_handler_dir: str = hp.required(\"directory to store trace results\")\n tensorboard_use_gzip: bool = hp.optional(\"use gzip for trace\", default=False)\n record_shapes: bool = hp.optional(doc=\"record_shapes\", default=True)\n profile_memory: bool = hp.optional(doc=\"track tensor memory allocations and frees\", default=False)\n with_stack: bool = hp.optional(doc=\"record stack info\", default=True)\n with_flops: bool = hp.optional(doc=\"estimate flops for operators\", default=True)\n\n skip: int = hp.optional(\"Number of batches to skip at epoch start\", default=0)\n warmup: int = hp.optional(\"Number of warmup batches in a cycle\", default=1)\n active: int = hp.optional(\"Number of batches to profile in a cycle\", default=5)\n wait: int = hp.optional(\"Number of batches to skip at the end of each cycle\", default=0)\n\n def initialize_object(self) -> TorchProfiler:\n return TorchProfiler(**asdict(self))\n\n\n@dataclass\nclass TorchProfilerState:\n batch_in_epoch: int = 0\n batches_per_epoch: int = 0\n\n\nclass TorchProfiler(Callback):\n\n def __init__(\n self,\n *,\n skip: int,\n warmup: int,\n active: int,\n wait: int,\n tensorboard_trace_handler_dir: str,\n tensorboard_use_gzip: bool,\n record_shapes: bool,\n profile_memory: bool,\n with_stack: bool,\n with_flops: bool,\n ) -> None:\n super().__init__()\n self.hparams = TorchProfilerHparams(\n skip=skip,\n warmup=warmup,\n active=active,\n wait=wait,\n tensorboard_trace_handler_dir=tensorboard_trace_handler_dir,\n tensorboard_use_gzip=tensorboard_use_gzip,\n record_shapes=record_shapes,\n profile_memory=profile_memory,\n with_stack=with_stack,\n with_flops=with_flops,\n )\n self.profiler: Optional[torch.profiler.profile] = None\n self.profiler_state: TorchProfilerState = TorchProfilerState()\n self._torch_profiler_scheduler = torch.profiler.profiler.schedule(\n wait=self.hparams.wait,\n warmup=self.hparams.warmup,\n active=self.hparams.active,\n skip_first=self.hparams.skip,\n )\n\n def state_dict(self) -> StateDict:\n return asdict(self.profiler_state)\n\n def load_state_dict(self, state: StateDict) -> None:\n self.profiler_state = TorchProfilerState(**state)\n\n def scheduler_fn(self, profiler_step: int) -> ProfilerAction:\n # Invoked on every batch, at the batch end\n # But, it's called one batch in advance.\n # Wrapping the default scheduling function to deal with epoch boundaries\n # Giving the torch scheduler the batch in the epoch, not the global step\n\n # adding 1 since this is called before the step is incremented\n next_batch_in_epoch = self.profiler_state.batch_in_epoch + 1\n if profiler_step == 0:\n next_batch_in_epoch = 0\n torch_scheduler_action = self._torch_profiler_scheduler(next_batch_in_epoch)\n if next_batch_in_epoch == self.profiler_state.batches_per_epoch:\n if torch_scheduler_action == ProfilerAction.RECORD:\n # force saving at epoch boundaries\n torch_scheduler_action = ProfilerAction.RECORD_AND_SAVE\n return torch_scheduler_action\n\n def training_start(self, state: State, logger: Logger) -> None:\n assert self.profiler is None, _PROFILE_MISSING_ERROR\n self.profiler = torch.profiler.profile(\n schedule=self.scheduler_fn,\n on_trace_ready=torch.profiler.tensorboard_trace_handler(\n self.hparams.tensorboard_trace_handler_dir,\n worker_name=str(get_global_rank()),\n use_gzip=self.hparams.tensorboard_use_gzip,\n ),\n activities=None, # auto-set\n record_shapes=self.hparams.record_shapes,\n profile_memory=self.hparams.profile_memory,\n with_stack=self.hparams.with_stack,\n with_flops=self.hparams.with_flops,\n )\n self.profiler.__enter__()\n atexit.register(self._close_profiler)\n\n def batch_end(self, state: State, logger: Logger) -> None:\n assert self.profiler is not None, _PROFILE_MISSING_ERROR\n self.profiler.step()\n\n def epoch_start(self, state: State, logger: Logger) -> None:\n self.profiler_state.batches_per_epoch = state.steps_per_epoch\n\n def batch_start(self, state: State, logger: Logger) -> None:\n self.profiler_state.batch_in_epoch = state.batch_idx\n assert self.profiler is not None, _PROFILE_MISSING_ERROR\n logger.metric_batch({\"profiler/state\": self.profiler.current_action.name})\n\n def _close_profiler(self) -> None:\n assert self.profiler is not None\n self.profiler.__exit__(None, None, None)\n", "path": "composer/callbacks/profiler.py"}, {"content": "from __future__ import annotations\n\nimport logging\nimport time\nfrom dataclasses import dataclass\nfrom typing import List\n\nimport yahp as hp\n\nfrom composer import Logger, State\nfrom composer.callbacks.callback_hparams import CallbackHparams\nfrom composer.core.callback import Callback\nfrom composer.core.types import BreakEpochException\n\nlog = logging.getLogger(__name__)\n\n\nclass TimingMonitor(Callback):\n\n def __init__(self, min_steps: int, epoch_list: List[int], step_list: List[int], all_epochs: bool):\n super().__init__()\n self.hparams = TimingMonitorHparams(min_steps=min_steps,\n epoch_list=epoch_list,\n step_list=step_list,\n all_epochs=all_epochs)\n\n if not all_epochs:\n assert len(epoch_list) > 0, \"'epoch_list' must be non-empty.\"\n assert 0 in epoch_list, \\\n \"'epoch_list' must contain 0, otherwise first K epochs have unknown speed\"\n assert len(step_list) > 0, \"'step_list' must be non-empty.\"\n assert 0 in step_list, \\\n \"'step_list' must contain 0 because `EPOCH_START` requires batch_idx of 0\"\n\n # Sort lists so that time only moves forward\n epoch_list = list(sorted(epoch_list))\n step_list = list(sorted(step_list))\n\n self.current_time = None\n self.profile_examples = 0\n self.profile_steps = 0\n self.profile_time = 0\n self.wall_clock_train = 0\n\n self.min_steps = min_steps\n\n self.all_epochs = all_epochs\n self.epoch_list = epoch_list\n self.epoch_ix = 0\n self.step_list = step_list\n self.step_ix = 0\n\n # initialized at training_start\n self.original_max_epochs = -1\n self.wct_dict = {}\n\n def _compute_elapsed_wct(self, epoch_wct_dict, steps_per_epoch, n_epochs):\n wct = 0.0\n wct_per_step = 0\n assert 0 in epoch_wct_dict, \"epoch_wct_dict must contain 0\"\n for step in range(steps_per_epoch):\n if step in epoch_wct_dict:\n wct_per_step = epoch_wct_dict[step]\n wct += wct_per_step\n return wct * n_epochs\n\n def training_start(self, state: State, logger: Logger):\n self.wall_clock_train = 0.0\n self.original_max_epochs = state.max_epochs\n # maybe override epoch_list\n if self.all_epochs:\n self.epoch_list = list(range(state.max_epochs))\n log.info(f\"all_epochs=True, overriding epoch_list to be every epoch from 0 to {state.max_epochs}\")\n self.wct_dict = {e: {s: -1.0 for s in self.step_list} for e in self.epoch_list}\n state.max_epochs = len(self.epoch_list)\n\n def epoch_end(self, state: State, logger: Logger):\n prev_epoch = self.epoch_list[self.epoch_ix]\n epoch_wct_dict = self.wct_dict[prev_epoch]\n self.epoch_ix += 1\n if self.epoch_ix < len(self.epoch_list):\n next_epoch = self.epoch_list[self.epoch_ix]\n else:\n next_epoch = self.original_max_epochs\n\n state.epoch = next_epoch - 1\n state.step = next_epoch * state.steps_per_epoch\n n_epochs = next_epoch - prev_epoch\n\n self.wall_clock_train += self._compute_elapsed_wct(epoch_wct_dict, state.steps_per_epoch, n_epochs)\n logger.metric_epoch({'wall_clock_train': self.wall_clock_train})\n\n def batch_start(self, state: State, logger: Logger):\n if self.current_time is None:\n self.current_time = time.time()\n self.profile_examples = 0\n self.profile_steps = 0\n self.profile_time = 0.0\n\n def batch_end(self, state: State, logger: Logger):\n if self.current_time is not None:\n now = time.time()\n elapsed = now - self.current_time\n self.current_time = now\n self.profile_examples += state.last_batch_size * state.world_size\n self.profile_steps += 1\n self.profile_time += elapsed\n\n if self.profile_steps >= self.min_steps:\n avg_throughput = self.profile_examples / self.profile_time\n avg_time_per_step = self.profile_time / self.profile_steps\n profile_epoch = self.epoch_list[self.epoch_ix]\n profile_step = self.step_list[self.step_ix]\n self.wct_dict[profile_epoch][profile_step] = avg_time_per_step\n logger.metric_batch({'throughput/step': avg_throughput})\n\n self.current_time = None\n self.step_ix += 1\n if self.step_ix == len(self.step_list):\n self.step_ix = 0\n raise BreakEpochException\n else:\n state.step = state.epoch * state.steps_per_epoch + self.step_list[self.step_ix]\n\n\n@dataclass\nclass TimingMonitorHparams(CallbackHparams):\n min_steps: int = hp.optional(\n doc=\"minimum number of steps to use for measuring throughput\",\n default=50,\n )\n epoch_list: List[int] = hp.optional(\n doc=\"list of epochs at which to measure throughput\",\n default_factory=lambda: [0, 1],\n )\n step_list: List[int] = hp.optional(\n doc=\"list of steps at which to measure throughput\",\n default_factory=lambda: [0, 50],\n )\n all_epochs: bool = hp.optional(\n doc=\"if true, override epoch_list and profile at all epochs.\",\n default=False,\n )\n\n def initialize_object(self) -> TimingMonitor:\n return TimingMonitor(\n min_steps=self.min_steps,\n epoch_list=self.epoch_list,\n step_list=self.step_list,\n all_epochs=self.all_epochs,\n )\n", "path": "composer/callbacks/timing_monitor.py"}, {"content": "import logging\n\nimport composer\nfrom composer.trainer.trainer import Trainer\nfrom composer.trainer.trainer_hparams import TrainerHparams\n\n\ndef main() -> None:\n logging.basicConfig()\n logging.captureWarnings(True)\n\n hparams = TrainerHparams.create()\n logging.getLogger(composer.__name__).setLevel(hparams.log_level)\n trainer = Trainer.create_from_hparams(hparams=hparams)\n trainer.fit()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "composer/trainer/entrypoint.py"}], "after_files": [{"content": "# Copyright 2021 MosaicML. All Rights Reserved.\n\nfrom __future__ import annotations\n\nimport atexit\nfrom dataclasses import asdict, dataclass\nfrom typing import TYPE_CHECKING, Optional\n\nimport torch.profiler\nimport yahp as hp\nfrom torch.profiler.profiler import ProfilerAction\n\nfrom composer import Callback\nfrom composer.callbacks.callback_hparams import CallbackHparams\nfrom composer.core.types import StateDict\nfrom composer.utils.ddp import get_global_rank\n\nif TYPE_CHECKING:\n from composer.core import Logger, State\n\n_PROFILE_MISSING_ERROR = \"The profiler has not been setup. Please call profiler.training_start() before training starts.\"\n\n\n@dataclass\nclass TorchProfilerHparams(CallbackHparams):\n tensorboard_trace_handler_dir: str = hp.required(\"directory to store trace results\")\n tensorboard_use_gzip: bool = hp.optional(\"use gzip for trace\", default=False)\n record_shapes: bool = hp.optional(doc=\"record_shapes\", default=True)\n profile_memory: bool = hp.optional(doc=\"track tensor memory allocations and frees\", default=False)\n with_stack: bool = hp.optional(doc=\"record stack info\", default=True)\n with_flops: bool = hp.optional(doc=\"estimate flops for operators\", default=True)\n\n skip: int = hp.optional(\"Number of batches to skip at epoch start\", default=0)\n warmup: int = hp.optional(\"Number of warmup batches in a cycle\", default=1)\n active: int = hp.optional(\"Number of batches to profile in a cycle\", default=5)\n wait: int = hp.optional(\"Number of batches to skip at the end of each cycle\", default=0)\n\n def initialize_object(self) -> TorchProfiler:\n return TorchProfiler(**asdict(self))\n\n\n@dataclass\nclass TorchProfilerState:\n batch_in_epoch: int = 0\n batches_per_epoch: int = 0\n\n\nclass TorchProfiler(Callback):\n\n def __init__(\n self,\n *,\n skip: int,\n warmup: int,\n active: int,\n wait: int,\n tensorboard_trace_handler_dir: str,\n tensorboard_use_gzip: bool,\n record_shapes: bool,\n profile_memory: bool,\n with_stack: bool,\n with_flops: bool,\n ) -> None:\n super().__init__()\n self.hparams = TorchProfilerHparams(\n skip=skip,\n warmup=warmup,\n active=active,\n wait=wait,\n tensorboard_trace_handler_dir=tensorboard_trace_handler_dir,\n tensorboard_use_gzip=tensorboard_use_gzip,\n record_shapes=record_shapes,\n profile_memory=profile_memory,\n with_stack=with_stack,\n with_flops=with_flops,\n )\n self.profiler: Optional[torch.profiler.profile] = None\n self.profiler_state: TorchProfilerState = TorchProfilerState()\n self._torch_profiler_scheduler = torch.profiler.profiler.schedule(\n wait=self.hparams.wait,\n warmup=self.hparams.warmup,\n active=self.hparams.active,\n skip_first=self.hparams.skip,\n )\n\n def state_dict(self) -> StateDict:\n return asdict(self.profiler_state)\n\n def load_state_dict(self, state: StateDict) -> None:\n self.profiler_state = TorchProfilerState(**state)\n\n def scheduler_fn(self, profiler_step: int) -> ProfilerAction:\n # Invoked on every batch, at the batch end\n # But, it's called one batch in advance.\n # Wrapping the default scheduling function to deal with epoch boundaries\n # Giving the torch scheduler the batch in the epoch, not the global step\n\n # adding 1 since this is called before the step is incremented\n next_batch_in_epoch = self.profiler_state.batch_in_epoch + 1\n if profiler_step == 0:\n next_batch_in_epoch = 0\n torch_scheduler_action = self._torch_profiler_scheduler(next_batch_in_epoch)\n if next_batch_in_epoch == self.profiler_state.batches_per_epoch:\n if torch_scheduler_action == ProfilerAction.RECORD:\n # force saving at epoch boundaries\n torch_scheduler_action = ProfilerAction.RECORD_AND_SAVE\n return torch_scheduler_action\n\n def training_start(self, state: State, logger: Logger) -> None:\n assert self.profiler is None, _PROFILE_MISSING_ERROR\n self.profiler = torch.profiler.profile(\n schedule=self.scheduler_fn,\n on_trace_ready=torch.profiler.tensorboard_trace_handler(\n self.hparams.tensorboard_trace_handler_dir,\n worker_name=str(get_global_rank()),\n use_gzip=self.hparams.tensorboard_use_gzip,\n ),\n activities=None, # auto-set\n record_shapes=self.hparams.record_shapes,\n profile_memory=self.hparams.profile_memory,\n with_stack=self.hparams.with_stack,\n with_flops=self.hparams.with_flops,\n )\n self.profiler.__enter__()\n atexit.register(self._close_profiler)\n\n def batch_end(self, state: State, logger: Logger) -> None:\n assert self.profiler is not None, _PROFILE_MISSING_ERROR\n self.profiler.step()\n\n def epoch_start(self, state: State, logger: Logger) -> None:\n self.profiler_state.batches_per_epoch = state.steps_per_epoch\n\n def batch_start(self, state: State, logger: Logger) -> None:\n self.profiler_state.batch_in_epoch = state.batch_idx\n assert self.profiler is not None, _PROFILE_MISSING_ERROR\n logger.metric_batch({\"profiler/state\": self.profiler.current_action.name})\n\n def _close_profiler(self) -> None:\n assert self.profiler is not None\n self.profiler.__exit__(None, None, None)\n", "path": "composer/callbacks/profiler.py"}, {"content": "# Copyright 2021 MosaicML. All Rights Reserved.\n\nfrom __future__ import annotations\n\nimport logging\nimport time\nfrom dataclasses import dataclass\nfrom typing import List\n\nimport yahp as hp\n\nfrom composer import Logger, State\nfrom composer.callbacks.callback_hparams import CallbackHparams\nfrom composer.core.callback import Callback\nfrom composer.core.types import BreakEpochException\n\nlog = logging.getLogger(__name__)\n\n\nclass TimingMonitor(Callback):\n\n def __init__(self, min_steps: int, epoch_list: List[int], step_list: List[int], all_epochs: bool):\n super().__init__()\n self.hparams = TimingMonitorHparams(min_steps=min_steps,\n epoch_list=epoch_list,\n step_list=step_list,\n all_epochs=all_epochs)\n\n if not all_epochs:\n assert len(epoch_list) > 0, \"'epoch_list' must be non-empty.\"\n assert 0 in epoch_list, \\\n \"'epoch_list' must contain 0, otherwise first K epochs have unknown speed\"\n assert len(step_list) > 0, \"'step_list' must be non-empty.\"\n assert 0 in step_list, \\\n \"'step_list' must contain 0 because `EPOCH_START` requires batch_idx of 0\"\n\n # Sort lists so that time only moves forward\n epoch_list = list(sorted(epoch_list))\n step_list = list(sorted(step_list))\n\n self.current_time = None\n self.profile_examples = 0\n self.profile_steps = 0\n self.profile_time = 0\n self.wall_clock_train = 0\n\n self.min_steps = min_steps\n\n self.all_epochs = all_epochs\n self.epoch_list = epoch_list\n self.epoch_ix = 0\n self.step_list = step_list\n self.step_ix = 0\n\n # initialized at training_start\n self.original_max_epochs = -1\n self.wct_dict = {}\n\n def _compute_elapsed_wct(self, epoch_wct_dict, steps_per_epoch, n_epochs):\n wct = 0.0\n wct_per_step = 0\n assert 0 in epoch_wct_dict, \"epoch_wct_dict must contain 0\"\n for step in range(steps_per_epoch):\n if step in epoch_wct_dict:\n wct_per_step = epoch_wct_dict[step]\n wct += wct_per_step\n return wct * n_epochs\n\n def training_start(self, state: State, logger: Logger):\n self.wall_clock_train = 0.0\n self.original_max_epochs = state.max_epochs\n # maybe override epoch_list\n if self.all_epochs:\n self.epoch_list = list(range(state.max_epochs))\n log.info(f\"all_epochs=True, overriding epoch_list to be every epoch from 0 to {state.max_epochs}\")\n self.wct_dict = {e: {s: -1.0 for s in self.step_list} for e in self.epoch_list}\n state.max_epochs = len(self.epoch_list)\n\n def epoch_end(self, state: State, logger: Logger):\n prev_epoch = self.epoch_list[self.epoch_ix]\n epoch_wct_dict = self.wct_dict[prev_epoch]\n self.epoch_ix += 1\n if self.epoch_ix < len(self.epoch_list):\n next_epoch = self.epoch_list[self.epoch_ix]\n else:\n next_epoch = self.original_max_epochs\n\n state.epoch = next_epoch - 1\n state.step = next_epoch * state.steps_per_epoch\n n_epochs = next_epoch - prev_epoch\n\n self.wall_clock_train += self._compute_elapsed_wct(epoch_wct_dict, state.steps_per_epoch, n_epochs)\n logger.metric_epoch({'wall_clock_train': self.wall_clock_train})\n\n def batch_start(self, state: State, logger: Logger):\n if self.current_time is None:\n self.current_time = time.time()\n self.profile_examples = 0\n self.profile_steps = 0\n self.profile_time = 0.0\n\n def batch_end(self, state: State, logger: Logger):\n if self.current_time is not None:\n now = time.time()\n elapsed = now - self.current_time\n self.current_time = now\n self.profile_examples += state.last_batch_size * state.world_size\n self.profile_steps += 1\n self.profile_time += elapsed\n\n if self.profile_steps >= self.min_steps:\n avg_throughput = self.profile_examples / self.profile_time\n avg_time_per_step = self.profile_time / self.profile_steps\n profile_epoch = self.epoch_list[self.epoch_ix]\n profile_step = self.step_list[self.step_ix]\n self.wct_dict[profile_epoch][profile_step] = avg_time_per_step\n logger.metric_batch({'throughput/step': avg_throughput})\n\n self.current_time = None\n self.step_ix += 1\n if self.step_ix == len(self.step_list):\n self.step_ix = 0\n raise BreakEpochException\n else:\n state.step = state.epoch * state.steps_per_epoch + self.step_list[self.step_ix]\n\n\n@dataclass\nclass TimingMonitorHparams(CallbackHparams):\n min_steps: int = hp.optional(\n doc=\"minimum number of steps to use for measuring throughput\",\n default=50,\n )\n epoch_list: List[int] = hp.optional(\n doc=\"list of epochs at which to measure throughput\",\n default_factory=lambda: [0, 1],\n )\n step_list: List[int] = hp.optional(\n doc=\"list of steps at which to measure throughput\",\n default_factory=lambda: [0, 50],\n )\n all_epochs: bool = hp.optional(\n doc=\"if true, override epoch_list and profile at all epochs.\",\n default=False,\n )\n\n def initialize_object(self) -> TimingMonitor:\n return TimingMonitor(\n min_steps=self.min_steps,\n epoch_list=self.epoch_list,\n step_list=self.step_list,\n all_epochs=self.all_epochs,\n )\n", "path": "composer/callbacks/timing_monitor.py"}, {"content": "# Copyright 2021 MosaicML. All Rights Reserved.\n\nimport logging\n\nimport composer\nfrom composer.trainer.trainer import Trainer\nfrom composer.trainer.trainer_hparams import TrainerHparams\n\n\ndef main() -> None:\n logging.basicConfig()\n logging.captureWarnings(True)\n\n hparams = TrainerHparams.create()\n logging.getLogger(composer.__name__).setLevel(hparams.log_level)\n trainer = Trainer.create_from_hparams(hparams=hparams)\n trainer.fit()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "composer/trainer/entrypoint.py"}]}
| 3,633 | 233 |
gh_patches_debug_23776
|
rasdani/github-patches
|
git_diff
|
apache__airflow-18883
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Facebook Ads Provider uses a deprecated version of the API
### Apache Airflow Provider(s)
facebook
### Versions of Apache Airflow Providers
2.0.1
### Apache Airflow version
2.1.1
### Operating System
Ubuntu 20.04
### Deployment
Docker-Compose
### Deployment details
_No response_
### What happened
Task fails because the hook uses a deprecated Facebook API version. The hook is calling v6.0 which is longer supported.
### What you expected to happen
I expected this task to connected to the Facebook API and fetch the requested data.
My log files for the failed task output the following message:
```
facebook_business.exceptions.FacebookRequestError:
Message: Call was not successful
Method: POST
Path: https://graph.facebook.com/v6.0/act_1210763848963620/insights
Params: {'level': 'ad', 'date_preset': 'yesterday', 'fields': '["campaign_name","campaign_id","ad_id","clicks","impressions"]'}
Status: 400
Response:
{
"error": {
"message": "(#2635) You are calling a deprecated version of the Ads API. Please update to the latest version: v11.0.",
"type": "OAuthException",
"code": 2635,
"fbtrace_id": "AGRidwR5VhjU3kAJVUSkvuz"
}
}
```
Line 69 of https://github.com/apache/airflow/blob/main/airflow/providers/facebook/ads/hooks/ads.py should be changed to a newer API version.
### How to reproduce
Run the sample DAG posted here: https://airflow.apache.org/docs/apache-airflow-providers-google/stable/_modules/airflow/providers/google/cloud/example_dags/example_facebook_ads_to_gcs.html
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `airflow/providers/facebook/ads/hooks/ads.py`
Content:
```
1 #
2 # Licensed to the Apache Software Foundation (ASF) under one
3 # or more contributor license agreements. See the NOTICE file
4 # distributed with this work for additional information
5 # regarding copyright ownership. The ASF licenses this file
6 # to you under the Apache License, Version 2.0 (the
7 # "License"); you may not use this file except in compliance
8 # with the License. You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing,
13 # software distributed under the License is distributed on an
14 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 # KIND, either express or implied. See the License for the
16 # specific language governing permissions and limitations
17 # under the License.
18 """This module contains Facebook Ads Reporting hooks"""
19 import time
20 from enum import Enum
21 from typing import Any, Dict, List
22
23 try:
24 from functools import cached_property
25 except ImportError:
26 from cached_property import cached_property
27 from facebook_business.adobjects.adaccount import AdAccount
28 from facebook_business.adobjects.adreportrun import AdReportRun
29 from facebook_business.adobjects.adsinsights import AdsInsights
30 from facebook_business.api import FacebookAdsApi
31
32 from airflow.exceptions import AirflowException
33 from airflow.hooks.base import BaseHook
34
35
36 class JobStatus(Enum):
37 """Available options for facebook async task status"""
38
39 COMPLETED = 'Job Completed'
40 STARTED = 'Job Started'
41 RUNNING = 'Job Running'
42 FAILED = 'Job Failed'
43 SKIPPED = 'Job Skipped'
44
45
46 class FacebookAdsReportingHook(BaseHook):
47 """
48 Hook for the Facebook Ads API
49
50 .. seealso::
51 For more information on the Facebook Ads API, take a look at the API docs:
52 https://developers.facebook.com/docs/marketing-apis/
53
54 :param facebook_conn_id: Airflow Facebook Ads connection ID
55 :type facebook_conn_id: str
56 :param api_version: The version of Facebook API. Default to v6.0
57 :type api_version: str
58
59 """
60
61 conn_name_attr = 'facebook_conn_id'
62 default_conn_name = 'facebook_default'
63 conn_type = 'facebook_social'
64 hook_name = 'Facebook Ads'
65
66 def __init__(
67 self,
68 facebook_conn_id: str = default_conn_name,
69 api_version: str = "v6.0",
70 ) -> None:
71 super().__init__()
72 self.facebook_conn_id = facebook_conn_id
73 self.api_version = api_version
74 self.client_required_fields = ["app_id", "app_secret", "access_token", "account_id"]
75
76 def _get_service(self) -> FacebookAdsApi:
77 """Returns Facebook Ads Client using a service account"""
78 config = self.facebook_ads_config
79 return FacebookAdsApi.init(
80 app_id=config["app_id"],
81 app_secret=config["app_secret"],
82 access_token=config["access_token"],
83 account_id=config["account_id"],
84 api_version=self.api_version,
85 )
86
87 @cached_property
88 def facebook_ads_config(self) -> Dict:
89 """
90 Gets Facebook ads connection from meta db and sets
91 facebook_ads_config attribute with returned config file
92 """
93 self.log.info("Fetching fb connection: %s", self.facebook_conn_id)
94 conn = self.get_connection(self.facebook_conn_id)
95 config = conn.extra_dejson
96 missing_keys = self.client_required_fields - config.keys()
97 if missing_keys:
98 message = f"{missing_keys} fields are missing"
99 raise AirflowException(message)
100 return config
101
102 def bulk_facebook_report(
103 self,
104 params: Dict[str, Any],
105 fields: List[str],
106 sleep_time: int = 5,
107 ) -> List[AdsInsights]:
108 """
109 Pulls data from the Facebook Ads API
110
111 :param fields: List of fields that is obtained from Facebook. Found in AdsInsights.Field class.
112 https://developers.facebook.com/docs/marketing-api/insights/parameters/v6.0
113 :type fields: List[str]
114 :param params: Parameters that determine the query for Facebook
115 https://developers.facebook.com/docs/marketing-api/insights/parameters/v6.0
116 :type fields: Dict[str, Any]
117 :param sleep_time: Time to sleep when async call is happening
118 :type sleep_time: int
119
120 :return: Facebook Ads API response, converted to Facebook Ads Row objects
121 :rtype: List[AdsInsights]
122 """
123 api = self._get_service()
124 ad_account = AdAccount(api.get_default_account_id(), api=api)
125 _async = ad_account.get_insights(params=params, fields=fields, is_async=True)
126 while True:
127 request = _async.api_get()
128 async_status = request[AdReportRun.Field.async_status]
129 percent = request[AdReportRun.Field.async_percent_completion]
130 self.log.info("%s %s completed, async_status: %s", percent, "%", async_status)
131 if async_status == JobStatus.COMPLETED.value:
132 self.log.info("Job run completed")
133 break
134 if async_status in [JobStatus.SKIPPED.value, JobStatus.FAILED.value]:
135 message = f"{async_status}. Please retry."
136 raise AirflowException(message)
137 time.sleep(sleep_time)
138 report_run_id = _async.api_get()["report_run_id"]
139 report_object = AdReportRun(report_run_id, api=api)
140 insights = report_object.get_insights()
141 self.log.info("Extracting data from returned Facebook Ads Iterators")
142 return list(insights)
143
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/airflow/providers/facebook/ads/hooks/ads.py b/airflow/providers/facebook/ads/hooks/ads.py
--- a/airflow/providers/facebook/ads/hooks/ads.py
+++ b/airflow/providers/facebook/ads/hooks/ads.py
@@ -18,7 +18,7 @@
"""This module contains Facebook Ads Reporting hooks"""
import time
from enum import Enum
-from typing import Any, Dict, List
+from typing import Any, Dict, List, Optional
try:
from functools import cached_property
@@ -53,8 +53,9 @@
:param facebook_conn_id: Airflow Facebook Ads connection ID
:type facebook_conn_id: str
- :param api_version: The version of Facebook API. Default to v6.0
- :type api_version: str
+ :param api_version: The version of Facebook API. Default to None. If it is None,
+ it will use the Facebook business SDK default version.
+ :type api_version: Optional[str]
"""
@@ -66,7 +67,7 @@
def __init__(
self,
facebook_conn_id: str = default_conn_name,
- api_version: str = "v6.0",
+ api_version: Optional[str] = None,
) -> None:
super().__init__()
self.facebook_conn_id = facebook_conn_id
|
{"golden_diff": "diff --git a/airflow/providers/facebook/ads/hooks/ads.py b/airflow/providers/facebook/ads/hooks/ads.py\n--- a/airflow/providers/facebook/ads/hooks/ads.py\n+++ b/airflow/providers/facebook/ads/hooks/ads.py\n@@ -18,7 +18,7 @@\n \"\"\"This module contains Facebook Ads Reporting hooks\"\"\"\n import time\n from enum import Enum\n-from typing import Any, Dict, List\n+from typing import Any, Dict, List, Optional\n \n try:\n from functools import cached_property\n@@ -53,8 +53,9 @@\n \n :param facebook_conn_id: Airflow Facebook Ads connection ID\n :type facebook_conn_id: str\n- :param api_version: The version of Facebook API. Default to v6.0\n- :type api_version: str\n+ :param api_version: The version of Facebook API. Default to None. If it is None,\n+ it will use the Facebook business SDK default version.\n+ :type api_version: Optional[str]\n \n \"\"\"\n \n@@ -66,7 +67,7 @@\n def __init__(\n self,\n facebook_conn_id: str = default_conn_name,\n- api_version: str = \"v6.0\",\n+ api_version: Optional[str] = None,\n ) -> None:\n super().__init__()\n self.facebook_conn_id = facebook_conn_id\n", "issue": "Facebook Ads Provider uses a deprecated version of the API\n### Apache Airflow Provider(s)\n\nfacebook\n\n### Versions of Apache Airflow Providers\n\n2.0.1\n\n### Apache Airflow version\n\n2.1.1\n\n### Operating System\n\nUbuntu 20.04\n\n### Deployment\n\nDocker-Compose\n\n### Deployment details\n\n_No response_\n\n### What happened\n\nTask fails because the hook uses a deprecated Facebook API version. The hook is calling v6.0 which is longer supported.\r\n\r\n\n\n### What you expected to happen\n\nI expected this task to connected to the Facebook API and fetch the requested data. \r\n\r\nMy log files for the failed task output the following message:\r\n\r\n```\r\nfacebook_business.exceptions.FacebookRequestError: \r\n\r\n Message: Call was not successful\r\n Method: POST\r\n Path: https://graph.facebook.com/v6.0/act_1210763848963620/insights\r\n Params: {'level': 'ad', 'date_preset': 'yesterday', 'fields': '[\"campaign_name\",\"campaign_id\",\"ad_id\",\"clicks\",\"impressions\"]'}\r\n\r\n Status: 400\r\n Response:\r\n {\r\n \"error\": {\r\n \"message\": \"(#2635) You are calling a deprecated version of the Ads API. Please update to the latest version: v11.0.\",\r\n \"type\": \"OAuthException\",\r\n \"code\": 2635,\r\n \"fbtrace_id\": \"AGRidwR5VhjU3kAJVUSkvuz\"\r\n }\r\n }\r\n```\r\n\r\nLine 69 of https://github.com/apache/airflow/blob/main/airflow/providers/facebook/ads/hooks/ads.py should be changed to a newer API version.\n\n### How to reproduce\n\nRun the sample DAG posted here: https://airflow.apache.org/docs/apache-airflow-providers-google/stable/_modules/airflow/providers/google/cloud/example_dags/example_facebook_ads_to_gcs.html \n\n### Anything else\n\n_No response_\n\n### Are you willing to submit PR?\n\n- [X] Yes I am willing to submit a PR!\n\n### Code of Conduct\n\n- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)\n\n", "before_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"This module contains Facebook Ads Reporting hooks\"\"\"\nimport time\nfrom enum import Enum\nfrom typing import Any, Dict, List\n\ntry:\n from functools import cached_property\nexcept ImportError:\n from cached_property import cached_property\nfrom facebook_business.adobjects.adaccount import AdAccount\nfrom facebook_business.adobjects.adreportrun import AdReportRun\nfrom facebook_business.adobjects.adsinsights import AdsInsights\nfrom facebook_business.api import FacebookAdsApi\n\nfrom airflow.exceptions import AirflowException\nfrom airflow.hooks.base import BaseHook\n\n\nclass JobStatus(Enum):\n \"\"\"Available options for facebook async task status\"\"\"\n\n COMPLETED = 'Job Completed'\n STARTED = 'Job Started'\n RUNNING = 'Job Running'\n FAILED = 'Job Failed'\n SKIPPED = 'Job Skipped'\n\n\nclass FacebookAdsReportingHook(BaseHook):\n \"\"\"\n Hook for the Facebook Ads API\n\n .. seealso::\n For more information on the Facebook Ads API, take a look at the API docs:\n https://developers.facebook.com/docs/marketing-apis/\n\n :param facebook_conn_id: Airflow Facebook Ads connection ID\n :type facebook_conn_id: str\n :param api_version: The version of Facebook API. Default to v6.0\n :type api_version: str\n\n \"\"\"\n\n conn_name_attr = 'facebook_conn_id'\n default_conn_name = 'facebook_default'\n conn_type = 'facebook_social'\n hook_name = 'Facebook Ads'\n\n def __init__(\n self,\n facebook_conn_id: str = default_conn_name,\n api_version: str = \"v6.0\",\n ) -> None:\n super().__init__()\n self.facebook_conn_id = facebook_conn_id\n self.api_version = api_version\n self.client_required_fields = [\"app_id\", \"app_secret\", \"access_token\", \"account_id\"]\n\n def _get_service(self) -> FacebookAdsApi:\n \"\"\"Returns Facebook Ads Client using a service account\"\"\"\n config = self.facebook_ads_config\n return FacebookAdsApi.init(\n app_id=config[\"app_id\"],\n app_secret=config[\"app_secret\"],\n access_token=config[\"access_token\"],\n account_id=config[\"account_id\"],\n api_version=self.api_version,\n )\n\n @cached_property\n def facebook_ads_config(self) -> Dict:\n \"\"\"\n Gets Facebook ads connection from meta db and sets\n facebook_ads_config attribute with returned config file\n \"\"\"\n self.log.info(\"Fetching fb connection: %s\", self.facebook_conn_id)\n conn = self.get_connection(self.facebook_conn_id)\n config = conn.extra_dejson\n missing_keys = self.client_required_fields - config.keys()\n if missing_keys:\n message = f\"{missing_keys} fields are missing\"\n raise AirflowException(message)\n return config\n\n def bulk_facebook_report(\n self,\n params: Dict[str, Any],\n fields: List[str],\n sleep_time: int = 5,\n ) -> List[AdsInsights]:\n \"\"\"\n Pulls data from the Facebook Ads API\n\n :param fields: List of fields that is obtained from Facebook. Found in AdsInsights.Field class.\n https://developers.facebook.com/docs/marketing-api/insights/parameters/v6.0\n :type fields: List[str]\n :param params: Parameters that determine the query for Facebook\n https://developers.facebook.com/docs/marketing-api/insights/parameters/v6.0\n :type fields: Dict[str, Any]\n :param sleep_time: Time to sleep when async call is happening\n :type sleep_time: int\n\n :return: Facebook Ads API response, converted to Facebook Ads Row objects\n :rtype: List[AdsInsights]\n \"\"\"\n api = self._get_service()\n ad_account = AdAccount(api.get_default_account_id(), api=api)\n _async = ad_account.get_insights(params=params, fields=fields, is_async=True)\n while True:\n request = _async.api_get()\n async_status = request[AdReportRun.Field.async_status]\n percent = request[AdReportRun.Field.async_percent_completion]\n self.log.info(\"%s %s completed, async_status: %s\", percent, \"%\", async_status)\n if async_status == JobStatus.COMPLETED.value:\n self.log.info(\"Job run completed\")\n break\n if async_status in [JobStatus.SKIPPED.value, JobStatus.FAILED.value]:\n message = f\"{async_status}. Please retry.\"\n raise AirflowException(message)\n time.sleep(sleep_time)\n report_run_id = _async.api_get()[\"report_run_id\"]\n report_object = AdReportRun(report_run_id, api=api)\n insights = report_object.get_insights()\n self.log.info(\"Extracting data from returned Facebook Ads Iterators\")\n return list(insights)\n", "path": "airflow/providers/facebook/ads/hooks/ads.py"}], "after_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"This module contains Facebook Ads Reporting hooks\"\"\"\nimport time\nfrom enum import Enum\nfrom typing import Any, Dict, List, Optional\n\ntry:\n from functools import cached_property\nexcept ImportError:\n from cached_property import cached_property\nfrom facebook_business.adobjects.adaccount import AdAccount\nfrom facebook_business.adobjects.adreportrun import AdReportRun\nfrom facebook_business.adobjects.adsinsights import AdsInsights\nfrom facebook_business.api import FacebookAdsApi\n\nfrom airflow.exceptions import AirflowException\nfrom airflow.hooks.base import BaseHook\n\n\nclass JobStatus(Enum):\n \"\"\"Available options for facebook async task status\"\"\"\n\n COMPLETED = 'Job Completed'\n STARTED = 'Job Started'\n RUNNING = 'Job Running'\n FAILED = 'Job Failed'\n SKIPPED = 'Job Skipped'\n\n\nclass FacebookAdsReportingHook(BaseHook):\n \"\"\"\n Hook for the Facebook Ads API\n\n .. seealso::\n For more information on the Facebook Ads API, take a look at the API docs:\n https://developers.facebook.com/docs/marketing-apis/\n\n :param facebook_conn_id: Airflow Facebook Ads connection ID\n :type facebook_conn_id: str\n :param api_version: The version of Facebook API. Default to None. If it is None,\n it will use the Facebook business SDK default version.\n :type api_version: Optional[str]\n\n \"\"\"\n\n conn_name_attr = 'facebook_conn_id'\n default_conn_name = 'facebook_default'\n conn_type = 'facebook_social'\n hook_name = 'Facebook Ads'\n\n def __init__(\n self,\n facebook_conn_id: str = default_conn_name,\n api_version: Optional[str] = None,\n ) -> None:\n super().__init__()\n self.facebook_conn_id = facebook_conn_id\n self.api_version = api_version\n self.client_required_fields = [\"app_id\", \"app_secret\", \"access_token\", \"account_id\"]\n\n def _get_service(self) -> FacebookAdsApi:\n \"\"\"Returns Facebook Ads Client using a service account\"\"\"\n config = self.facebook_ads_config\n return FacebookAdsApi.init(\n app_id=config[\"app_id\"],\n app_secret=config[\"app_secret\"],\n access_token=config[\"access_token\"],\n account_id=config[\"account_id\"],\n api_version=self.api_version,\n )\n\n @cached_property\n def facebook_ads_config(self) -> Dict:\n \"\"\"\n Gets Facebook ads connection from meta db and sets\n facebook_ads_config attribute with returned config file\n \"\"\"\n self.log.info(\"Fetching fb connection: %s\", self.facebook_conn_id)\n conn = self.get_connection(self.facebook_conn_id)\n config = conn.extra_dejson\n missing_keys = self.client_required_fields - config.keys()\n if missing_keys:\n message = f\"{missing_keys} fields are missing\"\n raise AirflowException(message)\n return config\n\n def bulk_facebook_report(\n self,\n params: Dict[str, Any],\n fields: List[str],\n sleep_time: int = 5,\n ) -> List[AdsInsights]:\n \"\"\"\n Pulls data from the Facebook Ads API\n\n :param fields: List of fields that is obtained from Facebook. Found in AdsInsights.Field class.\n https://developers.facebook.com/docs/marketing-api/insights/parameters/v6.0\n :type fields: List[str]\n :param params: Parameters that determine the query for Facebook\n https://developers.facebook.com/docs/marketing-api/insights/parameters/v6.0\n :type fields: Dict[str, Any]\n :param sleep_time: Time to sleep when async call is happening\n :type sleep_time: int\n\n :return: Facebook Ads API response, converted to Facebook Ads Row objects\n :rtype: List[AdsInsights]\n \"\"\"\n api = self._get_service()\n ad_account = AdAccount(api.get_default_account_id(), api=api)\n _async = ad_account.get_insights(params=params, fields=fields, is_async=True)\n while True:\n request = _async.api_get()\n async_status = request[AdReportRun.Field.async_status]\n percent = request[AdReportRun.Field.async_percent_completion]\n self.log.info(\"%s %s completed, async_status: %s\", percent, \"%\", async_status)\n if async_status == JobStatus.COMPLETED.value:\n self.log.info(\"Job run completed\")\n break\n if async_status in [JobStatus.SKIPPED.value, JobStatus.FAILED.value]:\n message = f\"{async_status}. Please retry.\"\n raise AirflowException(message)\n time.sleep(sleep_time)\n report_run_id = _async.api_get()[\"report_run_id\"]\n report_object = AdReportRun(report_run_id, api=api)\n insights = report_object.get_insights()\n self.log.info(\"Extracting data from returned Facebook Ads Iterators\")\n return list(insights)\n", "path": "airflow/providers/facebook/ads/hooks/ads.py"}]}
| 2,277 | 303 |
gh_patches_debug_30741
|
rasdani/github-patches
|
git_diff
|
sopel-irc__sopel-2162
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
tell: argument splitting/stripping is a bit broken
This code line removes most IRC formatting bytes from the beginning of the message:
https://github.com/sopel-irc/sopel/blob/f29c10dde2ff166dee15bc49f0cd43c93b93ba3e/sopel/modules/tell.py#L145
It's similar to another issue (#1877) we fixed in `choose` for 7.1 (#1965). The second `.lstrip()` with no arguments needs to be replaced with this plugin's own version of [`_format_safe()`](https://github.com/sopel-irc/sopel/blob/f29c10dde2ff166dee15bc49f0cd43c93b93ba3e/sopel/modules/choose.py#L19-L69) from `choose.py`.
In fact, it wouldn't take much at this point to convince me that Sopel's API should include its own formatting-safe versions of `strip()`, `lstrip()`, and `rstrip()`. Issues like this keep coming up, and stuff like `trigger.plain` won't help…
----
That line also misuses `.lstrip()` with the tellee's nick. That isn't a huge issue, because it's never going to go past the whitespace, but it could cause other quirks like the fact that punctuation gets through even though the code's _intent_ appears to be treating the tellee as a separate argument that isn't part of the message:
```irclog
<~dgw> ;tell Creator|TL, comma test
[…]
<&Kaede> [tell] Creator|TL: Monday, July 12, 2021 22:52:44 (IST) <dgw> tell Creator|TL , comma test
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sopel/modules/tell.py`
Content:
```
1 """
2 tell.py - Sopel Tell and Ask Plugin
3 Copyright 2008, Sean B. Palmer, inamidst.com
4 Copyright 2019, dgw, technobabbl.es
5 Licensed under the Eiffel Forum License 2.
6
7 https://sopel.chat
8 """
9 from __future__ import generator_stop
10
11 from collections import defaultdict
12 import io # don't use `codecs` for loading the DB; it will split lines on some IRC formatting
13 import logging
14 import os
15 import threading
16 import time
17
18 from sopel import plugin, tools
19 from sopel.config import types
20 from sopel.tools.time import format_time, get_timezone
21
22
23 LOGGER = logging.getLogger(__name__)
24
25
26 class TellSection(types.StaticSection):
27 use_private_reminder = types.BooleanAttribute(
28 'use_private_reminder', default=False)
29 """When set to ``true``, Sopel will send reminder as private message."""
30 maximum_public = types.ValidatedAttribute(
31 'maximum_public', parse=int, default=4)
32 """How many Sopel can send in public before using private message."""
33
34
35 def configure(config):
36 """
37 | name | example | purpose |
38 | ---- | ------- | ------- |
39 | use_private_reminder | false | Send reminders as private message |
40 | maximum_public | 4 | Send up to this amount of reminders in public |
41 """
42 config.define_section('tell', TellSection)
43 config.tell.configure_setting(
44 'use_private_reminder',
45 'Should Sopel send tell/ask reminders as private message only?')
46 if not config.tell.use_private_reminder:
47 config.tell.configure_setting(
48 'maximum_public',
49 'How many tell/ask reminders Sopel will send as public message '
50 'before sending them as private messages?')
51
52
53 def load_reminders(filename):
54 """Load tell/ask reminders from a ``filename``.
55
56 :param str filename: path to the tell/ask reminders file
57 :return: a dict with the tell/ask reminders
58 :rtype: dict
59 """
60 result = defaultdict(list)
61 with io.open(filename, 'r', encoding='utf-8') as fd:
62 for line in fd:
63 line = line.strip()
64 if line:
65 try:
66 tellee, teller, verb, timenow, msg = line.split('\t', 4)
67 except ValueError:
68 continue # TODO: Add warning log about malformed reminder
69 result[tellee].append((teller, verb, timenow, msg))
70
71 return result
72
73
74 def dump_reminders(filename, data):
75 """Dump tell/ask reminders (``data``) into a ``filename``.
76
77 :param str filename: path to the tell/ask reminders file
78 :param dict data: tell/ask reminders ``dict``
79 """
80 with io.open(filename, 'w', encoding='utf-8') as fd:
81 for tellee, reminders in data.items():
82 for reminder in reminders:
83 line = '\t'.join((tellee,) + tuple(reminder))
84 fd.write(line + '\n')
85 return True
86
87
88 def setup(bot):
89 bot.config.define_section('tell', TellSection)
90 fn = bot.config.basename + '.tell.db'
91 bot.tell_filename = os.path.join(bot.config.core.homedir, fn)
92
93 # Pre-7.0 migration logic. Remove in 8.0 or 9.0.
94 old = bot.nick + '-' + bot.config.core.host + '.tell.db'
95 old = os.path.join(bot.config.core.homedir, old)
96 if os.path.isfile(old):
97 LOGGER.info("Attempting to migrate old 'tell' database {}..."
98 .format(old))
99 try:
100 os.rename(old, bot.tell_filename)
101 except OSError:
102 LOGGER.error("Migration failed!")
103 LOGGER.error("Old filename: {}".format(old))
104 LOGGER.error("New filename: {}".format(bot.tell_filename))
105 LOGGER.error(
106 "See https://sopel.chat/usage/installing/upgrading-to-sopel-7/#reminder-db-migration")
107 else:
108 LOGGER.info("Migration finished!")
109 # End migration logic
110
111 if not os.path.exists(bot.tell_filename):
112 with io.open(bot.tell_filename, 'w', encoding='utf-8') as fd:
113 # if we can't open/write into the file, the tell plugin can't work
114 fd.write('')
115
116 if 'tell_lock' not in bot.memory:
117 bot.memory['tell_lock'] = threading.Lock()
118
119 if 'reminders' not in bot.memory:
120 with bot.memory['tell_lock']:
121 bot.memory['reminders'] = load_reminders(bot.tell_filename)
122
123
124 def shutdown(bot):
125 for key in ['tell_lock', 'reminders']:
126 try:
127 del bot.memory[key]
128 except KeyError:
129 pass
130
131
132 @plugin.command('tell', 'ask')
133 @plugin.nickname_command('tell', 'ask')
134 @plugin.example('$nickname, tell dgw he broke something again.')
135 def f_remind(bot, trigger):
136 """Give someone a message the next time they're seen"""
137 teller = trigger.nick
138 verb = trigger.group(1)
139
140 if not trigger.group(3):
141 bot.reply("%s whom?" % verb)
142 return
143
144 tellee = trigger.group(3).rstrip('.,:;')
145 msg = trigger.group(2).lstrip(tellee).lstrip()
146
147 if not msg:
148 bot.reply("%s %s what?" % (verb, tellee))
149 return
150
151 tellee = tools.Identifier(tellee)
152
153 if not os.path.exists(bot.tell_filename):
154 return
155
156 if len(tellee) > bot.isupport.get('NICKLEN', 30):
157 bot.reply('That nickname is too long.')
158 return
159
160 if tellee[0] == '@':
161 tellee = tellee[1:]
162
163 if tellee == bot.nick:
164 bot.reply("I'm here now; you can %s me whatever you want!" % verb)
165 return
166
167 if tellee not in (tools.Identifier(teller), bot.nick, 'me'):
168 tz = get_timezone(bot.db, bot.config, None, tellee)
169 timenow = format_time(bot.db, bot.config, tz, tellee)
170 with bot.memory['tell_lock']:
171 if tellee not in bot.memory['reminders']:
172 bot.memory['reminders'][tellee] = [(teller, verb, timenow, msg)]
173 else:
174 bot.memory['reminders'][tellee].append((teller, verb, timenow, msg))
175 # save the reminders
176 dump_reminders(bot.tell_filename, bot.memory['reminders'])
177
178 response = "I'll pass that on when %s is around." % tellee
179 bot.reply(response)
180 elif tools.Identifier(teller) == tellee:
181 bot.reply('You can %s yourself that.' % verb)
182 else:
183 bot.reply("Hey, I'm not as stupid as Monty you know!")
184
185
186 def get_nick_reminders(reminders, nick):
187 lines = []
188 template = "%s: %s <%s> %s %s %s"
189 today = time.strftime('%d %b', time.gmtime())
190
191 for (teller, verb, datetime, msg) in reminders:
192 if datetime.startswith(today):
193 datetime = datetime[len(today) + 1:]
194 lines.append(template % (nick, datetime, teller, verb, nick, msg))
195
196 return lines
197
198
199 def nick_match_tellee(nick, tellee):
200 """Tell if a ``nick`` matches a ``tellee``.
201
202 :param str nick: Nick seen by the bot
203 :param str tellee: Tellee name or pattern
204
205 The check between ``nick`` and ``tellee`` is case-insensitive::
206
207 >>> nick_match_tellee('Exirel', 'exirel')
208 True
209 >>> nick_match_tellee('exirel', 'EXIREL')
210 True
211 >>> nick_match_tellee('exirel', 'dgw')
212 False
213
214 If ``tellee`` ends with a wildcard token (``*`` or ``:``), then ``nick``
215 matches if it starts with ``tellee`` (without the token)::
216
217 >>> nick_match_tellee('Exirel', 'Exi*')
218 True
219 >>> nick_match_tellee('Exirel', 'exi:')
220 True
221 >>> nick_match_tellee('Exirel', 'Exi')
222 False
223
224 Note that this is still case-insensitive.
225 """
226 if tellee[-1] in ['*', ':']: # these are wildcard token
227 return nick.lower().startswith(tellee.lower().rstrip('*:'))
228 return nick.lower() == tellee.lower()
229
230
231 @plugin.rule('(.*)')
232 @plugin.priority('low')
233 @plugin.unblockable
234 @plugin.output_prefix('[tell] ')
235 def message(bot, trigger):
236 nick = trigger.nick
237
238 if not os.path.exists(bot.tell_filename):
239 # plugin can't work without its storage file
240 return
241
242 # get all matching reminders
243 reminders = []
244 tellees = list(reversed(sorted(
245 tellee
246 for tellee in bot.memory['reminders']
247 if nick_match_tellee(nick, tellee)
248 )))
249
250 with bot.memory['tell_lock']:
251 # pop reminders for nick
252 reminders = list(
253 reminder
254 for tellee in tellees
255 for reminder in get_nick_reminders(
256 bot.memory['reminders'].pop(tellee, []), nick)
257 )
258
259 # check if there are reminders to send
260 if not reminders:
261 return # nothing to do
262
263 # then send reminders (as public and/or private messages)
264 if bot.config.tell.use_private_reminder:
265 # send reminders with private messages
266 for line in reminders:
267 bot.say(line, nick)
268 else:
269 # send up to 'maximum_public' reminders to the channel
270 max_public = bot.config.tell.maximum_public
271 for line in reminders[:max_public]:
272 bot.say(line)
273
274 # send other reminders directly to nick as private message
275 if reminders[max_public:]:
276 bot.reply('Further messages sent privately')
277 for line in reminders[max_public:]:
278 bot.say(line, nick)
279
280 # save reminders left in memory
281 with bot.memory['tell_lock']:
282 dump_reminders(bot.tell_filename, bot.memory['reminders'])
283
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sopel/modules/tell.py b/sopel/modules/tell.py
--- a/sopel/modules/tell.py
+++ b/sopel/modules/tell.py
@@ -14,8 +14,9 @@
import os
import threading
import time
+import unicodedata
-from sopel import plugin, tools
+from sopel import formatting, plugin, tools
from sopel.config import types
from sopel.tools.time import format_time, get_timezone
@@ -129,6 +130,43 @@
pass
+def _format_safe_lstrip(text):
+ """``str.lstrip()`` but without eating IRC formatting.
+
+ :param str text: text to clean
+ :rtype: str
+ :raises TypeError: if the passed ``text`` is not a string
+
+ Stolen and tweaked from the ``choose`` plugin's ``_format_safe()``
+ function by the person who wrote it.
+ """
+ if not isinstance(text, str):
+ raise TypeError("A string is required.")
+ elif not text:
+ # unnecessary optimization
+ return ''
+
+ start = 0
+
+ # strip left
+ pos = 0
+ while pos < len(text):
+ is_whitespace = unicodedata.category(text[pos]) == 'Zs'
+ is_non_printing = (
+ text[pos] in formatting.CONTROL_NON_PRINTING and
+ text[pos] not in formatting.CONTROL_FORMATTING
+ )
+ if not is_whitespace and not is_non_printing:
+ start = pos
+ break
+ pos += 1
+ else:
+ # skipped everything; string is all whitespace
+ return ''
+
+ return text[start:]
+
+
@plugin.command('tell', 'ask')
@plugin.nickname_command('tell', 'ask')
@plugin.example('$nickname, tell dgw he broke something again.')
@@ -142,7 +180,7 @@
return
tellee = trigger.group(3).rstrip('.,:;')
- msg = trigger.group(2).lstrip(tellee).lstrip()
+ msg = _format_safe_lstrip(trigger.group(2).split(' ', 1)[1])
if not msg:
bot.reply("%s %s what?" % (verb, tellee))
|
{"golden_diff": "diff --git a/sopel/modules/tell.py b/sopel/modules/tell.py\n--- a/sopel/modules/tell.py\n+++ b/sopel/modules/tell.py\n@@ -14,8 +14,9 @@\n import os\n import threading\n import time\n+import unicodedata\n \n-from sopel import plugin, tools\n+from sopel import formatting, plugin, tools\n from sopel.config import types\n from sopel.tools.time import format_time, get_timezone\n \n@@ -129,6 +130,43 @@\n pass\n \n \n+def _format_safe_lstrip(text):\n+ \"\"\"``str.lstrip()`` but without eating IRC formatting.\n+\n+ :param str text: text to clean\n+ :rtype: str\n+ :raises TypeError: if the passed ``text`` is not a string\n+\n+ Stolen and tweaked from the ``choose`` plugin's ``_format_safe()``\n+ function by the person who wrote it.\n+ \"\"\"\n+ if not isinstance(text, str):\n+ raise TypeError(\"A string is required.\")\n+ elif not text:\n+ # unnecessary optimization\n+ return ''\n+\n+ start = 0\n+\n+ # strip left\n+ pos = 0\n+ while pos < len(text):\n+ is_whitespace = unicodedata.category(text[pos]) == 'Zs'\n+ is_non_printing = (\n+ text[pos] in formatting.CONTROL_NON_PRINTING and\n+ text[pos] not in formatting.CONTROL_FORMATTING\n+ )\n+ if not is_whitespace and not is_non_printing:\n+ start = pos\n+ break\n+ pos += 1\n+ else:\n+ # skipped everything; string is all whitespace\n+ return ''\n+\n+ return text[start:]\n+\n+\n @plugin.command('tell', 'ask')\n @plugin.nickname_command('tell', 'ask')\n @plugin.example('$nickname, tell dgw he broke something again.')\n@@ -142,7 +180,7 @@\n return\n \n tellee = trigger.group(3).rstrip('.,:;')\n- msg = trigger.group(2).lstrip(tellee).lstrip()\n+ msg = _format_safe_lstrip(trigger.group(2).split(' ', 1)[1])\n \n if not msg:\n bot.reply(\"%s %s what?\" % (verb, tellee))\n", "issue": "tell: argument splitting/stripping is a bit broken\nThis code line removes most IRC formatting bytes from the beginning of the message:\r\n\r\nhttps://github.com/sopel-irc/sopel/blob/f29c10dde2ff166dee15bc49f0cd43c93b93ba3e/sopel/modules/tell.py#L145\r\n\r\nIt's similar to another issue (#1877) we fixed in `choose` for 7.1 (#1965). The second `.lstrip()` with no arguments needs to be replaced with this plugin's own version of [`_format_safe()`](https://github.com/sopel-irc/sopel/blob/f29c10dde2ff166dee15bc49f0cd43c93b93ba3e/sopel/modules/choose.py#L19-L69) from `choose.py`.\r\n\r\nIn fact, it wouldn't take much at this point to convince me that Sopel's API should include its own formatting-safe versions of `strip()`, `lstrip()`, and `rstrip()`. Issues like this keep coming up, and stuff like `trigger.plain` won't help\u2026\r\n\r\n----\r\n\r\nThat line also misuses `.lstrip()` with the tellee's nick. That isn't a huge issue, because it's never going to go past the whitespace, but it could cause other quirks like the fact that punctuation gets through even though the code's _intent_ appears to be treating the tellee as a separate argument that isn't part of the message:\r\n\r\n```irclog\r\n<~dgw> ;tell Creator|TL, comma test\r\n[\u2026]\r\n<&Kaede> [tell] Creator|TL: Monday, July 12, 2021 22:52:44 (IST) <dgw> tell Creator|TL , comma test\r\n```\r\n\r\n\n", "before_files": [{"content": "\"\"\"\ntell.py - Sopel Tell and Ask Plugin\nCopyright 2008, Sean B. Palmer, inamidst.com\nCopyright 2019, dgw, technobabbl.es\nLicensed under the Eiffel Forum License 2.\n\nhttps://sopel.chat\n\"\"\"\nfrom __future__ import generator_stop\n\nfrom collections import defaultdict\nimport io # don't use `codecs` for loading the DB; it will split lines on some IRC formatting\nimport logging\nimport os\nimport threading\nimport time\n\nfrom sopel import plugin, tools\nfrom sopel.config import types\nfrom sopel.tools.time import format_time, get_timezone\n\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass TellSection(types.StaticSection):\n use_private_reminder = types.BooleanAttribute(\n 'use_private_reminder', default=False)\n \"\"\"When set to ``true``, Sopel will send reminder as private message.\"\"\"\n maximum_public = types.ValidatedAttribute(\n 'maximum_public', parse=int, default=4)\n \"\"\"How many Sopel can send in public before using private message.\"\"\"\n\n\ndef configure(config):\n \"\"\"\n | name | example | purpose |\n | ---- | ------- | ------- |\n | use_private_reminder | false | Send reminders as private message |\n | maximum_public | 4 | Send up to this amount of reminders in public |\n \"\"\"\n config.define_section('tell', TellSection)\n config.tell.configure_setting(\n 'use_private_reminder',\n 'Should Sopel send tell/ask reminders as private message only?')\n if not config.tell.use_private_reminder:\n config.tell.configure_setting(\n 'maximum_public',\n 'How many tell/ask reminders Sopel will send as public message '\n 'before sending them as private messages?')\n\n\ndef load_reminders(filename):\n \"\"\"Load tell/ask reminders from a ``filename``.\n\n :param str filename: path to the tell/ask reminders file\n :return: a dict with the tell/ask reminders\n :rtype: dict\n \"\"\"\n result = defaultdict(list)\n with io.open(filename, 'r', encoding='utf-8') as fd:\n for line in fd:\n line = line.strip()\n if line:\n try:\n tellee, teller, verb, timenow, msg = line.split('\\t', 4)\n except ValueError:\n continue # TODO: Add warning log about malformed reminder\n result[tellee].append((teller, verb, timenow, msg))\n\n return result\n\n\ndef dump_reminders(filename, data):\n \"\"\"Dump tell/ask reminders (``data``) into a ``filename``.\n\n :param str filename: path to the tell/ask reminders file\n :param dict data: tell/ask reminders ``dict``\n \"\"\"\n with io.open(filename, 'w', encoding='utf-8') as fd:\n for tellee, reminders in data.items():\n for reminder in reminders:\n line = '\\t'.join((tellee,) + tuple(reminder))\n fd.write(line + '\\n')\n return True\n\n\ndef setup(bot):\n bot.config.define_section('tell', TellSection)\n fn = bot.config.basename + '.tell.db'\n bot.tell_filename = os.path.join(bot.config.core.homedir, fn)\n\n # Pre-7.0 migration logic. Remove in 8.0 or 9.0.\n old = bot.nick + '-' + bot.config.core.host + '.tell.db'\n old = os.path.join(bot.config.core.homedir, old)\n if os.path.isfile(old):\n LOGGER.info(\"Attempting to migrate old 'tell' database {}...\"\n .format(old))\n try:\n os.rename(old, bot.tell_filename)\n except OSError:\n LOGGER.error(\"Migration failed!\")\n LOGGER.error(\"Old filename: {}\".format(old))\n LOGGER.error(\"New filename: {}\".format(bot.tell_filename))\n LOGGER.error(\n \"See https://sopel.chat/usage/installing/upgrading-to-sopel-7/#reminder-db-migration\")\n else:\n LOGGER.info(\"Migration finished!\")\n # End migration logic\n\n if not os.path.exists(bot.tell_filename):\n with io.open(bot.tell_filename, 'w', encoding='utf-8') as fd:\n # if we can't open/write into the file, the tell plugin can't work\n fd.write('')\n\n if 'tell_lock' not in bot.memory:\n bot.memory['tell_lock'] = threading.Lock()\n\n if 'reminders' not in bot.memory:\n with bot.memory['tell_lock']:\n bot.memory['reminders'] = load_reminders(bot.tell_filename)\n\n\ndef shutdown(bot):\n for key in ['tell_lock', 'reminders']:\n try:\n del bot.memory[key]\n except KeyError:\n pass\n\n\[email protected]('tell', 'ask')\[email protected]_command('tell', 'ask')\[email protected]('$nickname, tell dgw he broke something again.')\ndef f_remind(bot, trigger):\n \"\"\"Give someone a message the next time they're seen\"\"\"\n teller = trigger.nick\n verb = trigger.group(1)\n\n if not trigger.group(3):\n bot.reply(\"%s whom?\" % verb)\n return\n\n tellee = trigger.group(3).rstrip('.,:;')\n msg = trigger.group(2).lstrip(tellee).lstrip()\n\n if not msg:\n bot.reply(\"%s %s what?\" % (verb, tellee))\n return\n\n tellee = tools.Identifier(tellee)\n\n if not os.path.exists(bot.tell_filename):\n return\n\n if len(tellee) > bot.isupport.get('NICKLEN', 30):\n bot.reply('That nickname is too long.')\n return\n\n if tellee[0] == '@':\n tellee = tellee[1:]\n\n if tellee == bot.nick:\n bot.reply(\"I'm here now; you can %s me whatever you want!\" % verb)\n return\n\n if tellee not in (tools.Identifier(teller), bot.nick, 'me'):\n tz = get_timezone(bot.db, bot.config, None, tellee)\n timenow = format_time(bot.db, bot.config, tz, tellee)\n with bot.memory['tell_lock']:\n if tellee not in bot.memory['reminders']:\n bot.memory['reminders'][tellee] = [(teller, verb, timenow, msg)]\n else:\n bot.memory['reminders'][tellee].append((teller, verb, timenow, msg))\n # save the reminders\n dump_reminders(bot.tell_filename, bot.memory['reminders'])\n\n response = \"I'll pass that on when %s is around.\" % tellee\n bot.reply(response)\n elif tools.Identifier(teller) == tellee:\n bot.reply('You can %s yourself that.' % verb)\n else:\n bot.reply(\"Hey, I'm not as stupid as Monty you know!\")\n\n\ndef get_nick_reminders(reminders, nick):\n lines = []\n template = \"%s: %s <%s> %s %s %s\"\n today = time.strftime('%d %b', time.gmtime())\n\n for (teller, verb, datetime, msg) in reminders:\n if datetime.startswith(today):\n datetime = datetime[len(today) + 1:]\n lines.append(template % (nick, datetime, teller, verb, nick, msg))\n\n return lines\n\n\ndef nick_match_tellee(nick, tellee):\n \"\"\"Tell if a ``nick`` matches a ``tellee``.\n\n :param str nick: Nick seen by the bot\n :param str tellee: Tellee name or pattern\n\n The check between ``nick`` and ``tellee`` is case-insensitive::\n\n >>> nick_match_tellee('Exirel', 'exirel')\n True\n >>> nick_match_tellee('exirel', 'EXIREL')\n True\n >>> nick_match_tellee('exirel', 'dgw')\n False\n\n If ``tellee`` ends with a wildcard token (``*`` or ``:``), then ``nick``\n matches if it starts with ``tellee`` (without the token)::\n\n >>> nick_match_tellee('Exirel', 'Exi*')\n True\n >>> nick_match_tellee('Exirel', 'exi:')\n True\n >>> nick_match_tellee('Exirel', 'Exi')\n False\n\n Note that this is still case-insensitive.\n \"\"\"\n if tellee[-1] in ['*', ':']: # these are wildcard token\n return nick.lower().startswith(tellee.lower().rstrip('*:'))\n return nick.lower() == tellee.lower()\n\n\[email protected]('(.*)')\[email protected]('low')\[email protected]\[email protected]_prefix('[tell] ')\ndef message(bot, trigger):\n nick = trigger.nick\n\n if not os.path.exists(bot.tell_filename):\n # plugin can't work without its storage file\n return\n\n # get all matching reminders\n reminders = []\n tellees = list(reversed(sorted(\n tellee\n for tellee in bot.memory['reminders']\n if nick_match_tellee(nick, tellee)\n )))\n\n with bot.memory['tell_lock']:\n # pop reminders for nick\n reminders = list(\n reminder\n for tellee in tellees\n for reminder in get_nick_reminders(\n bot.memory['reminders'].pop(tellee, []), nick)\n )\n\n # check if there are reminders to send\n if not reminders:\n return # nothing to do\n\n # then send reminders (as public and/or private messages)\n if bot.config.tell.use_private_reminder:\n # send reminders with private messages\n for line in reminders:\n bot.say(line, nick)\n else:\n # send up to 'maximum_public' reminders to the channel\n max_public = bot.config.tell.maximum_public\n for line in reminders[:max_public]:\n bot.say(line)\n\n # send other reminders directly to nick as private message\n if reminders[max_public:]:\n bot.reply('Further messages sent privately')\n for line in reminders[max_public:]:\n bot.say(line, nick)\n\n # save reminders left in memory\n with bot.memory['tell_lock']:\n dump_reminders(bot.tell_filename, bot.memory['reminders'])\n", "path": "sopel/modules/tell.py"}], "after_files": [{"content": "\"\"\"\ntell.py - Sopel Tell and Ask Plugin\nCopyright 2008, Sean B. Palmer, inamidst.com\nCopyright 2019, dgw, technobabbl.es\nLicensed under the Eiffel Forum License 2.\n\nhttps://sopel.chat\n\"\"\"\nfrom __future__ import generator_stop\n\nfrom collections import defaultdict\nimport io # don't use `codecs` for loading the DB; it will split lines on some IRC formatting\nimport logging\nimport os\nimport threading\nimport time\nimport unicodedata\n\nfrom sopel import formatting, plugin, tools\nfrom sopel.config import types\nfrom sopel.tools.time import format_time, get_timezone\n\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass TellSection(types.StaticSection):\n use_private_reminder = types.BooleanAttribute(\n 'use_private_reminder', default=False)\n \"\"\"When set to ``true``, Sopel will send reminder as private message.\"\"\"\n maximum_public = types.ValidatedAttribute(\n 'maximum_public', parse=int, default=4)\n \"\"\"How many Sopel can send in public before using private message.\"\"\"\n\n\ndef configure(config):\n \"\"\"\n | name | example | purpose |\n | ---- | ------- | ------- |\n | use_private_reminder | false | Send reminders as private message |\n | maximum_public | 4 | Send up to this amount of reminders in public |\n \"\"\"\n config.define_section('tell', TellSection)\n config.tell.configure_setting(\n 'use_private_reminder',\n 'Should Sopel send tell/ask reminders as private message only?')\n if not config.tell.use_private_reminder:\n config.tell.configure_setting(\n 'maximum_public',\n 'How many tell/ask reminders Sopel will send as public message '\n 'before sending them as private messages?')\n\n\ndef load_reminders(filename):\n \"\"\"Load tell/ask reminders from a ``filename``.\n\n :param str filename: path to the tell/ask reminders file\n :return: a dict with the tell/ask reminders\n :rtype: dict\n \"\"\"\n result = defaultdict(list)\n with io.open(filename, 'r', encoding='utf-8') as fd:\n for line in fd:\n line = line.strip()\n if line:\n try:\n tellee, teller, verb, timenow, msg = line.split('\\t', 4)\n except ValueError:\n continue # TODO: Add warning log about malformed reminder\n result[tellee].append((teller, verb, timenow, msg))\n\n return result\n\n\ndef dump_reminders(filename, data):\n \"\"\"Dump tell/ask reminders (``data``) into a ``filename``.\n\n :param str filename: path to the tell/ask reminders file\n :param dict data: tell/ask reminders ``dict``\n \"\"\"\n with io.open(filename, 'w', encoding='utf-8') as fd:\n for tellee, reminders in data.items():\n for reminder in reminders:\n line = '\\t'.join((tellee,) + tuple(reminder))\n fd.write(line + '\\n')\n return True\n\n\ndef setup(bot):\n bot.config.define_section('tell', TellSection)\n fn = bot.config.basename + '.tell.db'\n bot.tell_filename = os.path.join(bot.config.core.homedir, fn)\n\n # Pre-7.0 migration logic. Remove in 8.0 or 9.0.\n old = bot.nick + '-' + bot.config.core.host + '.tell.db'\n old = os.path.join(bot.config.core.homedir, old)\n if os.path.isfile(old):\n LOGGER.info(\"Attempting to migrate old 'tell' database {}...\"\n .format(old))\n try:\n os.rename(old, bot.tell_filename)\n except OSError:\n LOGGER.error(\"Migration failed!\")\n LOGGER.error(\"Old filename: {}\".format(old))\n LOGGER.error(\"New filename: {}\".format(bot.tell_filename))\n LOGGER.error(\n \"See https://sopel.chat/usage/installing/upgrading-to-sopel-7/#reminder-db-migration\")\n else:\n LOGGER.info(\"Migration finished!\")\n # End migration logic\n\n if not os.path.exists(bot.tell_filename):\n with io.open(bot.tell_filename, 'w', encoding='utf-8') as fd:\n # if we can't open/write into the file, the tell plugin can't work\n fd.write('')\n\n if 'tell_lock' not in bot.memory:\n bot.memory['tell_lock'] = threading.Lock()\n\n if 'reminders' not in bot.memory:\n with bot.memory['tell_lock']:\n bot.memory['reminders'] = load_reminders(bot.tell_filename)\n\n\ndef shutdown(bot):\n for key in ['tell_lock', 'reminders']:\n try:\n del bot.memory[key]\n except KeyError:\n pass\n\n\ndef _format_safe_lstrip(text):\n \"\"\"``str.lstrip()`` but without eating IRC formatting.\n\n :param str text: text to clean\n :rtype: str\n :raises TypeError: if the passed ``text`` is not a string\n\n Stolen and tweaked from the ``choose`` plugin's ``_format_safe()``\n function by the person who wrote it.\n \"\"\"\n if not isinstance(text, str):\n raise TypeError(\"A string is required.\")\n elif not text:\n # unnecessary optimization\n return ''\n\n start = 0\n\n # strip left\n pos = 0\n while pos < len(text):\n is_whitespace = unicodedata.category(text[pos]) == 'Zs'\n is_non_printing = (\n text[pos] in formatting.CONTROL_NON_PRINTING and\n text[pos] not in formatting.CONTROL_FORMATTING\n )\n if not is_whitespace and not is_non_printing:\n start = pos\n break\n pos += 1\n else:\n # skipped everything; string is all whitespace\n return ''\n\n return text[start:]\n\n\[email protected]('tell', 'ask')\[email protected]_command('tell', 'ask')\[email protected]('$nickname, tell dgw he broke something again.')\ndef f_remind(bot, trigger):\n \"\"\"Give someone a message the next time they're seen\"\"\"\n teller = trigger.nick\n verb = trigger.group(1)\n\n if not trigger.group(3):\n bot.reply(\"%s whom?\" % verb)\n return\n\n tellee = trigger.group(3).rstrip('.,:;')\n msg = _format_safe_lstrip(trigger.group(2).split(' ', 1)[1])\n\n if not msg:\n bot.reply(\"%s %s what?\" % (verb, tellee))\n return\n\n tellee = tools.Identifier(tellee)\n\n if not os.path.exists(bot.tell_filename):\n return\n\n if len(tellee) > bot.isupport.get('NICKLEN', 30):\n bot.reply('That nickname is too long.')\n return\n\n if tellee[0] == '@':\n tellee = tellee[1:]\n\n if tellee == bot.nick:\n bot.reply(\"I'm here now; you can %s me whatever you want!\" % verb)\n return\n\n if tellee not in (tools.Identifier(teller), bot.nick, 'me'):\n tz = get_timezone(bot.db, bot.config, None, tellee)\n timenow = format_time(bot.db, bot.config, tz, tellee)\n with bot.memory['tell_lock']:\n if tellee not in bot.memory['reminders']:\n bot.memory['reminders'][tellee] = [(teller, verb, timenow, msg)]\n else:\n bot.memory['reminders'][tellee].append((teller, verb, timenow, msg))\n # save the reminders\n dump_reminders(bot.tell_filename, bot.memory['reminders'])\n\n response = \"I'll pass that on when %s is around.\" % tellee\n bot.reply(response)\n elif tools.Identifier(teller) == tellee:\n bot.reply('You can %s yourself that.' % verb)\n else:\n bot.reply(\"Hey, I'm not as stupid as Monty you know!\")\n\n\ndef get_nick_reminders(reminders, nick):\n lines = []\n template = \"%s: %s <%s> %s %s %s\"\n today = time.strftime('%d %b', time.gmtime())\n\n for (teller, verb, datetime, msg) in reminders:\n if datetime.startswith(today):\n datetime = datetime[len(today) + 1:]\n lines.append(template % (nick, datetime, teller, verb, nick, msg))\n\n return lines\n\n\ndef nick_match_tellee(nick, tellee):\n \"\"\"Tell if a ``nick`` matches a ``tellee``.\n\n :param str nick: Nick seen by the bot\n :param str tellee: Tellee name or pattern\n\n The check between ``nick`` and ``tellee`` is case-insensitive::\n\n >>> nick_match_tellee('Exirel', 'exirel')\n True\n >>> nick_match_tellee('exirel', 'EXIREL')\n True\n >>> nick_match_tellee('exirel', 'dgw')\n False\n\n If ``tellee`` ends with a wildcard token (``*`` or ``:``), then ``nick``\n matches if it starts with ``tellee`` (without the token)::\n\n >>> nick_match_tellee('Exirel', 'Exi*')\n True\n >>> nick_match_tellee('Exirel', 'exi:')\n True\n >>> nick_match_tellee('Exirel', 'Exi')\n False\n\n Note that this is still case-insensitive.\n \"\"\"\n if tellee[-1] in ['*', ':']: # these are wildcard token\n return nick.lower().startswith(tellee.lower().rstrip('*:'))\n return nick.lower() == tellee.lower()\n\n\[email protected]('(.*)')\[email protected]('low')\[email protected]\[email protected]_prefix('[tell] ')\ndef message(bot, trigger):\n nick = trigger.nick\n\n if not os.path.exists(bot.tell_filename):\n # plugin can't work without its storage file\n return\n\n # get all matching reminders\n reminders = []\n tellees = list(reversed(sorted(\n tellee\n for tellee in bot.memory['reminders']\n if nick_match_tellee(nick, tellee)\n )))\n\n with bot.memory['tell_lock']:\n # pop reminders for nick\n reminders = list(\n reminder\n for tellee in tellees\n for reminder in get_nick_reminders(\n bot.memory['reminders'].pop(tellee, []), nick)\n )\n\n # check if there are reminders to send\n if not reminders:\n return # nothing to do\n\n # then send reminders (as public and/or private messages)\n if bot.config.tell.use_private_reminder:\n # send reminders with private messages\n for line in reminders:\n bot.say(line, nick)\n else:\n # send up to 'maximum_public' reminders to the channel\n max_public = bot.config.tell.maximum_public\n for line in reminders[:max_public]:\n bot.say(line)\n\n # send other reminders directly to nick as private message\n if reminders[max_public:]:\n bot.reply('Further messages sent privately')\n for line in reminders[max_public:]:\n bot.say(line, nick)\n\n # save reminders left in memory\n with bot.memory['tell_lock']:\n dump_reminders(bot.tell_filename, bot.memory['reminders'])\n", "path": "sopel/modules/tell.py"}]}
| 3,723 | 524 |
gh_patches_debug_12288
|
rasdani/github-patches
|
git_diff
|
piskvorky__gensim-3115
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
lsi_dispatcher is not working from command-line when not specifying maxsize argument
#### Problem description
When running `lsi_dispatcher` from the command-line, if you don't specify the `maxsize` argument explicitly, you get an error for the missing positional argument:
```
usage: lsi_dispatcher.py [-h] maxsize
lsi_dispatcher.py: error: the following arguments are required: maxsize
```
According to the documentation, this argument should be optional.
The issue seems to be that the nargs argument to `add_argument` is missing:
```python
parser.add_argument(
'maxsize', type=int, help='Maximum number of jobs to be kept pre-fetched in the queue.', default=MAX_JOBS_QUEUE
)
```
In order to make this argument optional, this should be:
```python
parser.add_argument(
'maxsize', nargs='?', type=int, help='Maximum number of jobs to be kept pre-fetched in the queue.', default=MAX_JOBS_QUEUE
)
```
#### Steps/code/corpus to reproduce
Include full tracebacks, logs and datasets if necessary. Please keep the examples minimal ("minimal reproducible example").
If your problem is with a specific Gensim model (word2vec, lsimodel, doc2vec, fasttext, ldamodel etc), include the following:
```python
$ python3 -m gensim.models.lsi_dispatcher
usage: lsi_dispatcher.py [-h] maxsize
lsi_dispatcher.py: error: the following arguments are required: maxsize
```
#### Versions
```python
Linux-5.4.0-67-generic-x86_64-with-glibc2.2
Python 3.8.5 (default, Jan 27 2021, 15:41:15)
[GCC 9.3.0]
Bits 64
NumPy 1.19.4
SciPy 1.6.0
gensim 4.0.1
FAST_VERSION 1
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gensim/models/lsi_dispatcher.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) 2010 Radim Rehurek <[email protected]>
5 # Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
6
7 """Dispatcher process which orchestrates distributed :class:`~gensim.models.lsimodel.LsiModel` computations.
8 Run this script only once, on any node in your cluster.
9
10 Notes
11 -----
12 The dispatcher expects to find worker scripts already running. Make sure you run as many workers as you like on
13 your machines **before** launching the dispatcher.
14
15
16 How to use distributed LSI
17 --------------------------
18
19 #. Install needed dependencies (Pyro4) ::
20
21 pip install gensim[distributed]
22
23 #. Setup serialization (on each machine) ::
24
25 export PYRO_SERIALIZERS_ACCEPTED=pickle
26 export PYRO_SERIALIZER=pickle
27
28 #. Run nameserver ::
29
30 python -m Pyro4.naming -n 0.0.0.0 &
31
32 #. Run workers (on each machine) ::
33
34 python -m gensim.models.lsi_worker &
35
36 #. Run dispatcher ::
37
38 python -m gensim.models.lsi_dispatcher &
39
40 #. Run :class:`~gensim.models.lsimodel.LsiModel` in distributed mode:
41
42 .. sourcecode:: pycon
43
44 >>> from gensim.test.utils import common_corpus, common_dictionary
45 >>> from gensim.models import LsiModel
46 >>>
47 >>> model = LsiModel(common_corpus, id2word=common_dictionary, distributed=True)
48
49 Command line arguments
50 ----------------------
51
52 .. program-output:: python -m gensim.models.lsi_dispatcher --help
53 :ellipsis: 0, -5
54
55 """
56
57 import os
58 import sys
59 import logging
60 import argparse
61 import threading
62 import time
63 from queue import Queue
64
65 import Pyro4
66
67 from gensim import utils
68
69
70 logger = logging.getLogger(__name__)
71
72 # How many jobs (=chunks of N documents) to keep "pre-fetched" in a queue?
73 # A small number is usually enough, unless iteration over the corpus is very very
74 # slow (slower than the actual computation of LSI), in which case you can override
75 # this value from command line. ie. run "python ./lsi_dispatcher.py 100"
76 MAX_JOBS_QUEUE = 10
77
78 # timeout for the Queue object put/get blocking methods.
79 # it should really be infinity, but then keyboard interrupts don't work.
80 # so this is really just a hack, see http://bugs.python.org/issue1360
81 HUGE_TIMEOUT = 365 * 24 * 60 * 60 # one year
82
83
84 class Dispatcher:
85 """Dispatcher object that communicates and coordinates individual workers.
86
87 Warnings
88 --------
89 There should never be more than one dispatcher running at any one time.
90
91 """
92 def __init__(self, maxsize=0):
93 """Partly initialize the dispatcher.
94
95 A full initialization (including initialization of the workers) requires a call to
96 :meth:`~gensim.models.lsi_dispatcher.Dispatcher.initialize`
97
98 Parameters
99 ----------
100 maxsize : int, optional
101 Maximum number of jobs to be kept pre-fetched in the queue.
102
103 """
104 self.maxsize = maxsize
105 self.workers = {}
106 self.callback = None # a pyro proxy to this object (unknown at init time, but will be set later)
107
108 @Pyro4.expose
109 def initialize(self, **model_params):
110 """Fully initialize the dispatcher and all its workers.
111
112 Parameters
113 ----------
114 **model_params
115 Keyword parameters used to initialize individual workers
116 (gets handed all the way down to :meth:`gensim.models.lsi_worker.Worker.initialize`).
117 See :class:`~gensim.models.lsimodel.LsiModel`.
118
119 Raises
120 ------
121 RuntimeError
122 When no workers are found (the :mod:`gensim.model.lsi_worker` script must be ran beforehand).
123
124 """
125 self.jobs = Queue(maxsize=self.maxsize)
126 self.lock_update = threading.Lock()
127 self._jobsdone = 0
128 self._jobsreceived = 0
129
130 # locate all available workers and store their proxies, for subsequent RMI calls
131 self.workers = {}
132 with utils.getNS() as ns:
133 self.callback = Pyro4.Proxy('PYRONAME:gensim.lsi_dispatcher') # = self
134 for name, uri in ns.list(prefix='gensim.lsi_worker').items():
135 try:
136 worker = Pyro4.Proxy(uri)
137 workerid = len(self.workers)
138 # make time consuming methods work asynchronously
139 logger.info("registering worker #%i from %s", workerid, uri)
140 worker.initialize(workerid, dispatcher=self.callback, **model_params)
141 self.workers[workerid] = worker
142 except Pyro4.errors.PyroError:
143 logger.exception("unresponsive worker at %s, deleting it from the name server", uri)
144 ns.remove(name)
145
146 if not self.workers:
147 raise RuntimeError('no workers found; run some lsi_worker scripts on your machines first!')
148
149 @Pyro4.expose
150 def getworkers(self):
151 """Get pyro URIs of all registered workers.
152
153 Returns
154 -------
155 list of URIs
156 The pyro URIs for each worker.
157
158 """
159 return [worker._pyroUri for worker in self.workers.values()]
160
161 @Pyro4.expose
162 def getjob(self, worker_id):
163 """Atomically pop a job from the queue.
164
165 Parameters
166 ----------
167 worker_id : int
168 The worker that requested the job.
169
170 Returns
171 -------
172 iterable of iterable of (int, float)
173 The corpus in BoW format.
174
175 """
176 logger.info("worker #%i requesting a new job", worker_id)
177 job = self.jobs.get(block=True, timeout=1)
178 logger.info("worker #%i got a new job (%i left)", worker_id, self.jobs.qsize())
179 return job
180
181 @Pyro4.expose
182 def putjob(self, job):
183 """Atomically add a job to the queue.
184
185 Parameters
186 ----------
187 job : iterable of list of (int, float)
188 The corpus in BoW format.
189
190 """
191 self._jobsreceived += 1
192 self.jobs.put(job, block=True, timeout=HUGE_TIMEOUT)
193 logger.info("added a new job (len(queue)=%i items)", self.jobs.qsize())
194
195 @Pyro4.expose
196 def getstate(self):
197 """Merge projections from across all workers and get the final projection.
198
199 Returns
200 -------
201 :class:`~gensim.models.lsimodel.Projection`
202 The current projection of the total model.
203
204 """
205 logger.info("end of input, assigning all remaining jobs")
206 logger.debug("jobs done: %s, jobs received: %s", self._jobsdone, self._jobsreceived)
207 while self._jobsdone < self._jobsreceived:
208 time.sleep(0.5) # check every half a second
209
210 # TODO: merge in parallel, so that we're done in `log_2(workers)` merges,
211 # and not `workers - 1` merges!
212 # but merging only takes place once, after all input data has been processed,
213 # so the overall effect would be small... compared to the amount of coding :-)
214 logger.info("merging states from %i workers", len(self.workers))
215 workers = list(self.workers.items())
216 result = workers[0][1].getstate()
217 for workerid, worker in workers[1:]:
218 logger.info("pulling state from worker %s", workerid)
219 result.merge(worker.getstate())
220 logger.info("sending out merged projection")
221 return result
222
223 @Pyro4.expose
224 def reset(self):
225 """Re-initialize all workers for a new decomposition."""
226 for workerid, worker in self.workers.items():
227 logger.info("resetting worker %s", workerid)
228 worker.reset()
229 worker.requestjob()
230 self._jobsdone = 0
231 self._jobsreceived = 0
232
233 @Pyro4.expose
234 @Pyro4.oneway
235 @utils.synchronous('lock_update')
236 def jobdone(self, workerid):
237 """A worker has finished its job. Log this event and then asynchronously transfer control back to the worker.
238
239 Callback used by workers to notify when their job is done.
240
241 The job done event is logged and then control is asynchronously transfered back to the worker
242 (who can then request another job). In this way, control flow basically oscillates between
243 :meth:`gensim.models.lsi_dispatcher.Dispatcher.jobdone` and :meth:`gensim.models.lsi_worker.Worker.requestjob`.
244
245 Parameters
246 ----------
247 workerid : int
248 The ID of the worker that finished the job (used for logging).
249
250 """
251 self._jobsdone += 1
252 logger.info("worker #%s finished job #%i", workerid, self._jobsdone)
253 worker = self.workers[workerid]
254 worker.requestjob() # tell the worker to ask for another job, asynchronously (one-way)
255
256 def jobsdone(self):
257 """Wrap :attr:`~gensim.models.lsi_dispatcher.Dispatcher._jobsdone`, needed for remote access through proxies.
258
259 Returns
260 -------
261 int
262 Number of jobs already completed.
263
264 """
265 return self._jobsdone
266
267 @Pyro4.oneway
268 def exit(self):
269 """Terminate all registered workers and then the dispatcher."""
270 for workerid, worker in self.workers.items():
271 logger.info("terminating worker %s", workerid)
272 worker.exit()
273 logger.info("terminating dispatcher")
274 os._exit(0) # exit the whole process (not just this thread ala sys.exit())
275
276
277 if __name__ == '__main__':
278 logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)
279 parser = argparse.ArgumentParser(description=__doc__[:-135], formatter_class=argparse.RawTextHelpFormatter)
280 parser.add_argument(
281 'maxsize', type=int, help='Maximum number of jobs to be kept pre-fetched in the queue.', default=MAX_JOBS_QUEUE
282 )
283 args = parser.parse_args()
284
285 logger.info("running %s", " ".join(sys.argv))
286 utils.pyro_daemon('gensim.lsi_dispatcher', Dispatcher(maxsize=args.maxsize))
287 logger.info("finished running %s", parser.prog)
288
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gensim/models/lsi_dispatcher.py b/gensim/models/lsi_dispatcher.py
--- a/gensim/models/lsi_dispatcher.py
+++ b/gensim/models/lsi_dispatcher.py
@@ -278,7 +278,11 @@
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)
parser = argparse.ArgumentParser(description=__doc__[:-135], formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
- 'maxsize', type=int, help='Maximum number of jobs to be kept pre-fetched in the queue.', default=MAX_JOBS_QUEUE
+ 'maxsize',
+ nargs='?',
+ type=int,
+ help='Maximum number of jobs to be kept pre-fetched in the queue.',
+ default=MAX_JOBS_QUEUE,
)
args = parser.parse_args()
|
{"golden_diff": "diff --git a/gensim/models/lsi_dispatcher.py b/gensim/models/lsi_dispatcher.py\n--- a/gensim/models/lsi_dispatcher.py\n+++ b/gensim/models/lsi_dispatcher.py\n@@ -278,7 +278,11 @@\n logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)\n parser = argparse.ArgumentParser(description=__doc__[:-135], formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument(\n- 'maxsize', type=int, help='Maximum number of jobs to be kept pre-fetched in the queue.', default=MAX_JOBS_QUEUE\n+ 'maxsize',\n+ nargs='?',\n+ type=int,\n+ help='Maximum number of jobs to be kept pre-fetched in the queue.',\n+ default=MAX_JOBS_QUEUE,\n )\n args = parser.parse_args()\n", "issue": "lsi_dispatcher is not working from command-line when not specifying maxsize argument\n#### Problem description\r\n\r\nWhen running `lsi_dispatcher` from the command-line, if you don't specify the `maxsize` argument explicitly, you get an error for the missing positional argument:\r\n\r\n```\r\nusage: lsi_dispatcher.py [-h] maxsize\r\nlsi_dispatcher.py: error: the following arguments are required: maxsize\r\n```\r\n\r\nAccording to the documentation, this argument should be optional.\r\n\r\nThe issue seems to be that the nargs argument to `add_argument` is missing:\r\n\r\n```python\r\n parser.add_argument(\r\n 'maxsize', type=int, help='Maximum number of jobs to be kept pre-fetched in the queue.', default=MAX_JOBS_QUEUE\r\n )\r\n ```\r\nIn order to make this argument optional, this should be:\r\n\r\n```python\r\n parser.add_argument(\r\n 'maxsize', nargs='?', type=int, help='Maximum number of jobs to be kept pre-fetched in the queue.', default=MAX_JOBS_QUEUE\r\n )\r\n```\r\n\r\n#### Steps/code/corpus to reproduce\r\n\r\nInclude full tracebacks, logs and datasets if necessary. Please keep the examples minimal (\"minimal reproducible example\").\r\n\r\nIf your problem is with a specific Gensim model (word2vec, lsimodel, doc2vec, fasttext, ldamodel etc), include the following:\r\n\r\n```python\r\n$ python3 -m gensim.models.lsi_dispatcher\r\nusage: lsi_dispatcher.py [-h] maxsize\r\nlsi_dispatcher.py: error: the following arguments are required: maxsize\r\n```\r\n\r\n#### Versions\r\n\r\n```python\r\nLinux-5.4.0-67-generic-x86_64-with-glibc2.2\r\nPython 3.8.5 (default, Jan 27 2021, 15:41:15) \r\n[GCC 9.3.0]\r\nBits 64\r\nNumPy 1.19.4\r\nSciPy 1.6.0\r\ngensim 4.0.1\r\nFAST_VERSION 1\r\n```\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2010 Radim Rehurek <[email protected]>\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\"\"\"Dispatcher process which orchestrates distributed :class:`~gensim.models.lsimodel.LsiModel` computations.\nRun this script only once, on any node in your cluster.\n\nNotes\n-----\nThe dispatcher expects to find worker scripts already running. Make sure you run as many workers as you like on\nyour machines **before** launching the dispatcher.\n\n\nHow to use distributed LSI\n--------------------------\n\n#. Install needed dependencies (Pyro4) ::\n\n pip install gensim[distributed]\n\n#. Setup serialization (on each machine) ::\n\n export PYRO_SERIALIZERS_ACCEPTED=pickle\n export PYRO_SERIALIZER=pickle\n\n#. Run nameserver ::\n\n python -m Pyro4.naming -n 0.0.0.0 &\n\n#. Run workers (on each machine) ::\n\n python -m gensim.models.lsi_worker &\n\n#. Run dispatcher ::\n\n python -m gensim.models.lsi_dispatcher &\n\n#. Run :class:`~gensim.models.lsimodel.LsiModel` in distributed mode:\n\n .. sourcecode:: pycon\n\n >>> from gensim.test.utils import common_corpus, common_dictionary\n >>> from gensim.models import LsiModel\n >>>\n >>> model = LsiModel(common_corpus, id2word=common_dictionary, distributed=True)\n\nCommand line arguments\n----------------------\n\n.. program-output:: python -m gensim.models.lsi_dispatcher --help\n :ellipsis: 0, -5\n\n\"\"\"\n\nimport os\nimport sys\nimport logging\nimport argparse\nimport threading\nimport time\nfrom queue import Queue\n\nimport Pyro4\n\nfrom gensim import utils\n\n\nlogger = logging.getLogger(__name__)\n\n# How many jobs (=chunks of N documents) to keep \"pre-fetched\" in a queue?\n# A small number is usually enough, unless iteration over the corpus is very very\n# slow (slower than the actual computation of LSI), in which case you can override\n# this value from command line. ie. run \"python ./lsi_dispatcher.py 100\"\nMAX_JOBS_QUEUE = 10\n\n# timeout for the Queue object put/get blocking methods.\n# it should really be infinity, but then keyboard interrupts don't work.\n# so this is really just a hack, see http://bugs.python.org/issue1360\nHUGE_TIMEOUT = 365 * 24 * 60 * 60 # one year\n\n\nclass Dispatcher:\n \"\"\"Dispatcher object that communicates and coordinates individual workers.\n\n Warnings\n --------\n There should never be more than one dispatcher running at any one time.\n\n \"\"\"\n def __init__(self, maxsize=0):\n \"\"\"Partly initialize the dispatcher.\n\n A full initialization (including initialization of the workers) requires a call to\n :meth:`~gensim.models.lsi_dispatcher.Dispatcher.initialize`\n\n Parameters\n ----------\n maxsize : int, optional\n Maximum number of jobs to be kept pre-fetched in the queue.\n\n \"\"\"\n self.maxsize = maxsize\n self.workers = {}\n self.callback = None # a pyro proxy to this object (unknown at init time, but will be set later)\n\n @Pyro4.expose\n def initialize(self, **model_params):\n \"\"\"Fully initialize the dispatcher and all its workers.\n\n Parameters\n ----------\n **model_params\n Keyword parameters used to initialize individual workers\n (gets handed all the way down to :meth:`gensim.models.lsi_worker.Worker.initialize`).\n See :class:`~gensim.models.lsimodel.LsiModel`.\n\n Raises\n ------\n RuntimeError\n When no workers are found (the :mod:`gensim.model.lsi_worker` script must be ran beforehand).\n\n \"\"\"\n self.jobs = Queue(maxsize=self.maxsize)\n self.lock_update = threading.Lock()\n self._jobsdone = 0\n self._jobsreceived = 0\n\n # locate all available workers and store their proxies, for subsequent RMI calls\n self.workers = {}\n with utils.getNS() as ns:\n self.callback = Pyro4.Proxy('PYRONAME:gensim.lsi_dispatcher') # = self\n for name, uri in ns.list(prefix='gensim.lsi_worker').items():\n try:\n worker = Pyro4.Proxy(uri)\n workerid = len(self.workers)\n # make time consuming methods work asynchronously\n logger.info(\"registering worker #%i from %s\", workerid, uri)\n worker.initialize(workerid, dispatcher=self.callback, **model_params)\n self.workers[workerid] = worker\n except Pyro4.errors.PyroError:\n logger.exception(\"unresponsive worker at %s, deleting it from the name server\", uri)\n ns.remove(name)\n\n if not self.workers:\n raise RuntimeError('no workers found; run some lsi_worker scripts on your machines first!')\n\n @Pyro4.expose\n def getworkers(self):\n \"\"\"Get pyro URIs of all registered workers.\n\n Returns\n -------\n list of URIs\n The pyro URIs for each worker.\n\n \"\"\"\n return [worker._pyroUri for worker in self.workers.values()]\n\n @Pyro4.expose\n def getjob(self, worker_id):\n \"\"\"Atomically pop a job from the queue.\n\n Parameters\n ----------\n worker_id : int\n The worker that requested the job.\n\n Returns\n -------\n iterable of iterable of (int, float)\n The corpus in BoW format.\n\n \"\"\"\n logger.info(\"worker #%i requesting a new job\", worker_id)\n job = self.jobs.get(block=True, timeout=1)\n logger.info(\"worker #%i got a new job (%i left)\", worker_id, self.jobs.qsize())\n return job\n\n @Pyro4.expose\n def putjob(self, job):\n \"\"\"Atomically add a job to the queue.\n\n Parameters\n ----------\n job : iterable of list of (int, float)\n The corpus in BoW format.\n\n \"\"\"\n self._jobsreceived += 1\n self.jobs.put(job, block=True, timeout=HUGE_TIMEOUT)\n logger.info(\"added a new job (len(queue)=%i items)\", self.jobs.qsize())\n\n @Pyro4.expose\n def getstate(self):\n \"\"\"Merge projections from across all workers and get the final projection.\n\n Returns\n -------\n :class:`~gensim.models.lsimodel.Projection`\n The current projection of the total model.\n\n \"\"\"\n logger.info(\"end of input, assigning all remaining jobs\")\n logger.debug(\"jobs done: %s, jobs received: %s\", self._jobsdone, self._jobsreceived)\n while self._jobsdone < self._jobsreceived:\n time.sleep(0.5) # check every half a second\n\n # TODO: merge in parallel, so that we're done in `log_2(workers)` merges,\n # and not `workers - 1` merges!\n # but merging only takes place once, after all input data has been processed,\n # so the overall effect would be small... compared to the amount of coding :-)\n logger.info(\"merging states from %i workers\", len(self.workers))\n workers = list(self.workers.items())\n result = workers[0][1].getstate()\n for workerid, worker in workers[1:]:\n logger.info(\"pulling state from worker %s\", workerid)\n result.merge(worker.getstate())\n logger.info(\"sending out merged projection\")\n return result\n\n @Pyro4.expose\n def reset(self):\n \"\"\"Re-initialize all workers for a new decomposition.\"\"\"\n for workerid, worker in self.workers.items():\n logger.info(\"resetting worker %s\", workerid)\n worker.reset()\n worker.requestjob()\n self._jobsdone = 0\n self._jobsreceived = 0\n\n @Pyro4.expose\n @Pyro4.oneway\n @utils.synchronous('lock_update')\n def jobdone(self, workerid):\n \"\"\"A worker has finished its job. Log this event and then asynchronously transfer control back to the worker.\n\n Callback used by workers to notify when their job is done.\n\n The job done event is logged and then control is asynchronously transfered back to the worker\n (who can then request another job). In this way, control flow basically oscillates between\n :meth:`gensim.models.lsi_dispatcher.Dispatcher.jobdone` and :meth:`gensim.models.lsi_worker.Worker.requestjob`.\n\n Parameters\n ----------\n workerid : int\n The ID of the worker that finished the job (used for logging).\n\n \"\"\"\n self._jobsdone += 1\n logger.info(\"worker #%s finished job #%i\", workerid, self._jobsdone)\n worker = self.workers[workerid]\n worker.requestjob() # tell the worker to ask for another job, asynchronously (one-way)\n\n def jobsdone(self):\n \"\"\"Wrap :attr:`~gensim.models.lsi_dispatcher.Dispatcher._jobsdone`, needed for remote access through proxies.\n\n Returns\n -------\n int\n Number of jobs already completed.\n\n \"\"\"\n return self._jobsdone\n\n @Pyro4.oneway\n def exit(self):\n \"\"\"Terminate all registered workers and then the dispatcher.\"\"\"\n for workerid, worker in self.workers.items():\n logger.info(\"terminating worker %s\", workerid)\n worker.exit()\n logger.info(\"terminating dispatcher\")\n os._exit(0) # exit the whole process (not just this thread ala sys.exit())\n\n\nif __name__ == '__main__':\n logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)\n parser = argparse.ArgumentParser(description=__doc__[:-135], formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument(\n 'maxsize', type=int, help='Maximum number of jobs to be kept pre-fetched in the queue.', default=MAX_JOBS_QUEUE\n )\n args = parser.parse_args()\n\n logger.info(\"running %s\", \" \".join(sys.argv))\n utils.pyro_daemon('gensim.lsi_dispatcher', Dispatcher(maxsize=args.maxsize))\n logger.info(\"finished running %s\", parser.prog)\n", "path": "gensim/models/lsi_dispatcher.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2010 Radim Rehurek <[email protected]>\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\"\"\"Dispatcher process which orchestrates distributed :class:`~gensim.models.lsimodel.LsiModel` computations.\nRun this script only once, on any node in your cluster.\n\nNotes\n-----\nThe dispatcher expects to find worker scripts already running. Make sure you run as many workers as you like on\nyour machines **before** launching the dispatcher.\n\n\nHow to use distributed LSI\n--------------------------\n\n#. Install needed dependencies (Pyro4) ::\n\n pip install gensim[distributed]\n\n#. Setup serialization (on each machine) ::\n\n export PYRO_SERIALIZERS_ACCEPTED=pickle\n export PYRO_SERIALIZER=pickle\n\n#. Run nameserver ::\n\n python -m Pyro4.naming -n 0.0.0.0 &\n\n#. Run workers (on each machine) ::\n\n python -m gensim.models.lsi_worker &\n\n#. Run dispatcher ::\n\n python -m gensim.models.lsi_dispatcher &\n\n#. Run :class:`~gensim.models.lsimodel.LsiModel` in distributed mode:\n\n .. sourcecode:: pycon\n\n >>> from gensim.test.utils import common_corpus, common_dictionary\n >>> from gensim.models import LsiModel\n >>>\n >>> model = LsiModel(common_corpus, id2word=common_dictionary, distributed=True)\n\nCommand line arguments\n----------------------\n\n.. program-output:: python -m gensim.models.lsi_dispatcher --help\n :ellipsis: 0, -5\n\n\"\"\"\n\nimport os\nimport sys\nimport logging\nimport argparse\nimport threading\nimport time\nfrom queue import Queue\n\nimport Pyro4\n\nfrom gensim import utils\n\n\nlogger = logging.getLogger(__name__)\n\n# How many jobs (=chunks of N documents) to keep \"pre-fetched\" in a queue?\n# A small number is usually enough, unless iteration over the corpus is very very\n# slow (slower than the actual computation of LSI), in which case you can override\n# this value from command line. ie. run \"python ./lsi_dispatcher.py 100\"\nMAX_JOBS_QUEUE = 10\n\n# timeout for the Queue object put/get blocking methods.\n# it should really be infinity, but then keyboard interrupts don't work.\n# so this is really just a hack, see http://bugs.python.org/issue1360\nHUGE_TIMEOUT = 365 * 24 * 60 * 60 # one year\n\n\nclass Dispatcher:\n \"\"\"Dispatcher object that communicates and coordinates individual workers.\n\n Warnings\n --------\n There should never be more than one dispatcher running at any one time.\n\n \"\"\"\n def __init__(self, maxsize=0):\n \"\"\"Partly initialize the dispatcher.\n\n A full initialization (including initialization of the workers) requires a call to\n :meth:`~gensim.models.lsi_dispatcher.Dispatcher.initialize`\n\n Parameters\n ----------\n maxsize : int, optional\n Maximum number of jobs to be kept pre-fetched in the queue.\n\n \"\"\"\n self.maxsize = maxsize\n self.workers = {}\n self.callback = None # a pyro proxy to this object (unknown at init time, but will be set later)\n\n @Pyro4.expose\n def initialize(self, **model_params):\n \"\"\"Fully initialize the dispatcher and all its workers.\n\n Parameters\n ----------\n **model_params\n Keyword parameters used to initialize individual workers\n (gets handed all the way down to :meth:`gensim.models.lsi_worker.Worker.initialize`).\n See :class:`~gensim.models.lsimodel.LsiModel`.\n\n Raises\n ------\n RuntimeError\n When no workers are found (the :mod:`gensim.model.lsi_worker` script must be ran beforehand).\n\n \"\"\"\n self.jobs = Queue(maxsize=self.maxsize)\n self.lock_update = threading.Lock()\n self._jobsdone = 0\n self._jobsreceived = 0\n\n # locate all available workers and store their proxies, for subsequent RMI calls\n self.workers = {}\n with utils.getNS() as ns:\n self.callback = Pyro4.Proxy('PYRONAME:gensim.lsi_dispatcher') # = self\n for name, uri in ns.list(prefix='gensim.lsi_worker').items():\n try:\n worker = Pyro4.Proxy(uri)\n workerid = len(self.workers)\n # make time consuming methods work asynchronously\n logger.info(\"registering worker #%i from %s\", workerid, uri)\n worker.initialize(workerid, dispatcher=self.callback, **model_params)\n self.workers[workerid] = worker\n except Pyro4.errors.PyroError:\n logger.exception(\"unresponsive worker at %s, deleting it from the name server\", uri)\n ns.remove(name)\n\n if not self.workers:\n raise RuntimeError('no workers found; run some lsi_worker scripts on your machines first!')\n\n @Pyro4.expose\n def getworkers(self):\n \"\"\"Get pyro URIs of all registered workers.\n\n Returns\n -------\n list of URIs\n The pyro URIs for each worker.\n\n \"\"\"\n return [worker._pyroUri for worker in self.workers.values()]\n\n @Pyro4.expose\n def getjob(self, worker_id):\n \"\"\"Atomically pop a job from the queue.\n\n Parameters\n ----------\n worker_id : int\n The worker that requested the job.\n\n Returns\n -------\n iterable of iterable of (int, float)\n The corpus in BoW format.\n\n \"\"\"\n logger.info(\"worker #%i requesting a new job\", worker_id)\n job = self.jobs.get(block=True, timeout=1)\n logger.info(\"worker #%i got a new job (%i left)\", worker_id, self.jobs.qsize())\n return job\n\n @Pyro4.expose\n def putjob(self, job):\n \"\"\"Atomically add a job to the queue.\n\n Parameters\n ----------\n job : iterable of list of (int, float)\n The corpus in BoW format.\n\n \"\"\"\n self._jobsreceived += 1\n self.jobs.put(job, block=True, timeout=HUGE_TIMEOUT)\n logger.info(\"added a new job (len(queue)=%i items)\", self.jobs.qsize())\n\n @Pyro4.expose\n def getstate(self):\n \"\"\"Merge projections from across all workers and get the final projection.\n\n Returns\n -------\n :class:`~gensim.models.lsimodel.Projection`\n The current projection of the total model.\n\n \"\"\"\n logger.info(\"end of input, assigning all remaining jobs\")\n logger.debug(\"jobs done: %s, jobs received: %s\", self._jobsdone, self._jobsreceived)\n while self._jobsdone < self._jobsreceived:\n time.sleep(0.5) # check every half a second\n\n # TODO: merge in parallel, so that we're done in `log_2(workers)` merges,\n # and not `workers - 1` merges!\n # but merging only takes place once, after all input data has been processed,\n # so the overall effect would be small... compared to the amount of coding :-)\n logger.info(\"merging states from %i workers\", len(self.workers))\n workers = list(self.workers.items())\n result = workers[0][1].getstate()\n for workerid, worker in workers[1:]:\n logger.info(\"pulling state from worker %s\", workerid)\n result.merge(worker.getstate())\n logger.info(\"sending out merged projection\")\n return result\n\n @Pyro4.expose\n def reset(self):\n \"\"\"Re-initialize all workers for a new decomposition.\"\"\"\n for workerid, worker in self.workers.items():\n logger.info(\"resetting worker %s\", workerid)\n worker.reset()\n worker.requestjob()\n self._jobsdone = 0\n self._jobsreceived = 0\n\n @Pyro4.expose\n @Pyro4.oneway\n @utils.synchronous('lock_update')\n def jobdone(self, workerid):\n \"\"\"A worker has finished its job. Log this event and then asynchronously transfer control back to the worker.\n\n Callback used by workers to notify when their job is done.\n\n The job done event is logged and then control is asynchronously transfered back to the worker\n (who can then request another job). In this way, control flow basically oscillates between\n :meth:`gensim.models.lsi_dispatcher.Dispatcher.jobdone` and :meth:`gensim.models.lsi_worker.Worker.requestjob`.\n\n Parameters\n ----------\n workerid : int\n The ID of the worker that finished the job (used for logging).\n\n \"\"\"\n self._jobsdone += 1\n logger.info(\"worker #%s finished job #%i\", workerid, self._jobsdone)\n worker = self.workers[workerid]\n worker.requestjob() # tell the worker to ask for another job, asynchronously (one-way)\n\n def jobsdone(self):\n \"\"\"Wrap :attr:`~gensim.models.lsi_dispatcher.Dispatcher._jobsdone`, needed for remote access through proxies.\n\n Returns\n -------\n int\n Number of jobs already completed.\n\n \"\"\"\n return self._jobsdone\n\n @Pyro4.oneway\n def exit(self):\n \"\"\"Terminate all registered workers and then the dispatcher.\"\"\"\n for workerid, worker in self.workers.items():\n logger.info(\"terminating worker %s\", workerid)\n worker.exit()\n logger.info(\"terminating dispatcher\")\n os._exit(0) # exit the whole process (not just this thread ala sys.exit())\n\n\nif __name__ == '__main__':\n logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)\n parser = argparse.ArgumentParser(description=__doc__[:-135], formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument(\n 'maxsize',\n nargs='?',\n type=int,\n help='Maximum number of jobs to be kept pre-fetched in the queue.',\n default=MAX_JOBS_QUEUE,\n )\n args = parser.parse_args()\n\n logger.info(\"running %s\", \" \".join(sys.argv))\n utils.pyro_daemon('gensim.lsi_dispatcher', Dispatcher(maxsize=args.maxsize))\n logger.info(\"finished running %s\", parser.prog)\n", "path": "gensim/models/lsi_dispatcher.py"}]}
| 3,760 | 193 |
gh_patches_debug_18541
|
rasdani/github-patches
|
git_diff
|
urllib3__urllib3-3310
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Create a mechanism to test different HTTP protocols (HTTP/1.1 and HTTP/2) with the same test case
Currently our test suite only has to worry about HTTP/1.1, but with HTTP/2 support coming we're going to need to run tests both in HTTP/1.1 and HTTP/2 modes to avoid duplicating the entire test suite for the high-level tests like for `PoolManager`.
My proposal for the mechanism would be to control Hypercorn's ALPN to only offer `http/1.1` or `h2` depending on the test suite and then either run tests twice using a fixture or allowing external control somehow (environment variable?) and then running a separate job on CI? Open to other ideas too!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dummyserver/socketserver.py`
Content:
```
1 #!/usr/bin/env python
2
3 """
4 Dummy server used for unit testing.
5 """
6
7 from __future__ import annotations
8
9 import logging
10 import os
11 import socket
12 import ssl
13 import sys
14 import threading
15 import typing
16 import warnings
17
18 import trustme
19 from cryptography.hazmat.backends import default_backend
20 from cryptography.hazmat.primitives import serialization
21
22 from urllib3.exceptions import HTTPWarning
23 from urllib3.util import ALPN_PROTOCOLS, resolve_cert_reqs, resolve_ssl_version
24
25 if typing.TYPE_CHECKING:
26 from typing_extensions import ParamSpec
27
28 P = ParamSpec("P")
29
30 log = logging.getLogger(__name__)
31
32 CERTS_PATH = os.path.join(os.path.dirname(__file__), "certs")
33 DEFAULT_CERTS: dict[str, typing.Any] = {
34 "certfile": os.path.join(CERTS_PATH, "server.crt"),
35 "keyfile": os.path.join(CERTS_PATH, "server.key"),
36 "cert_reqs": ssl.CERT_OPTIONAL,
37 "ca_certs": os.path.join(CERTS_PATH, "cacert.pem"),
38 "alpn_protocols": ALPN_PROTOCOLS,
39 }
40 DEFAULT_CA = os.path.join(CERTS_PATH, "cacert.pem")
41 DEFAULT_CA_KEY = os.path.join(CERTS_PATH, "cacert.key")
42
43
44 def _resolves_to_ipv6(host: str) -> bool:
45 """Returns True if the system resolves host to an IPv6 address by default."""
46 resolves_to_ipv6 = False
47 try:
48 for res in socket.getaddrinfo(host, None, socket.AF_UNSPEC):
49 af, _, _, _, _ = res
50 if af == socket.AF_INET6:
51 resolves_to_ipv6 = True
52 except socket.gaierror:
53 pass
54
55 return resolves_to_ipv6
56
57
58 def _has_ipv6(host: str) -> bool:
59 """Returns True if the system can bind an IPv6 address."""
60 sock = None
61 has_ipv6 = False
62
63 if socket.has_ipv6:
64 # has_ipv6 returns true if cPython was compiled with IPv6 support.
65 # It does not tell us if the system has IPv6 support enabled. To
66 # determine that we must bind to an IPv6 address.
67 # https://github.com/urllib3/urllib3/pull/611
68 # https://bugs.python.org/issue658327
69 try:
70 sock = socket.socket(socket.AF_INET6)
71 sock.bind((host, 0))
72 has_ipv6 = _resolves_to_ipv6("localhost")
73 except Exception:
74 pass
75
76 if sock:
77 sock.close()
78 return has_ipv6
79
80
81 # Some systems may have IPv6 support but DNS may not be configured
82 # properly. We can not count that localhost will resolve to ::1 on all
83 # systems. See https://github.com/urllib3/urllib3/pull/611 and
84 # https://bugs.python.org/issue18792
85 HAS_IPV6_AND_DNS = _has_ipv6("localhost")
86 HAS_IPV6 = _has_ipv6("::1")
87
88
89 # Different types of servers we have:
90
91
92 class NoIPv6Warning(HTTPWarning):
93 "IPv6 is not available"
94
95
96 class SocketServerThread(threading.Thread):
97 """
98 :param socket_handler: Callable which receives a socket argument for one
99 request.
100 :param ready_event: Event which gets set when the socket handler is
101 ready to receive requests.
102 """
103
104 USE_IPV6 = HAS_IPV6_AND_DNS
105
106 def __init__(
107 self,
108 socket_handler: typing.Callable[[socket.socket], None],
109 host: str = "localhost",
110 ready_event: threading.Event | None = None,
111 ) -> None:
112 super().__init__()
113 self.daemon = True
114
115 self.socket_handler = socket_handler
116 self.host = host
117 self.ready_event = ready_event
118
119 def _start_server(self) -> None:
120 if self.USE_IPV6:
121 sock = socket.socket(socket.AF_INET6)
122 else:
123 warnings.warn("No IPv6 support. Falling back to IPv4.", NoIPv6Warning)
124 sock = socket.socket(socket.AF_INET)
125 if sys.platform != "win32":
126 sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
127
128 with sock:
129 sock.bind((self.host, 0))
130 self.port = sock.getsockname()[1]
131
132 # Once listen() returns, the server socket is ready
133 sock.listen(1)
134
135 if self.ready_event:
136 self.ready_event.set()
137
138 self.socket_handler(sock)
139
140 def run(self) -> None:
141 self._start_server()
142
143
144 def ssl_options_to_context( # type: ignore[no-untyped-def]
145 keyfile=None,
146 certfile=None,
147 server_side=None,
148 cert_reqs=None,
149 ssl_version: str | int | None = None,
150 ca_certs=None,
151 do_handshake_on_connect=None,
152 suppress_ragged_eofs=None,
153 ciphers=None,
154 alpn_protocols=None,
155 ) -> ssl.SSLContext:
156 """Return an equivalent SSLContext based on ssl.wrap_socket args."""
157 ssl_version = resolve_ssl_version(ssl_version)
158 cert_none = resolve_cert_reqs("CERT_NONE")
159 if cert_reqs is None:
160 cert_reqs = cert_none
161 else:
162 cert_reqs = resolve_cert_reqs(cert_reqs)
163
164 ctx = ssl.SSLContext(ssl_version)
165 ctx.load_cert_chain(certfile, keyfile)
166 ctx.verify_mode = cert_reqs
167 if ctx.verify_mode != cert_none:
168 ctx.load_verify_locations(cafile=ca_certs)
169 if alpn_protocols and hasattr(ctx, "set_alpn_protocols"):
170 try:
171 ctx.set_alpn_protocols(alpn_protocols)
172 except NotImplementedError:
173 pass
174 return ctx
175
176
177 def get_unreachable_address() -> tuple[str, int]:
178 # reserved as per rfc2606
179 return ("something.invalid", 54321)
180
181
182 def encrypt_key_pem(private_key_pem: trustme.Blob, password: bytes) -> trustme.Blob:
183 private_key = serialization.load_pem_private_key(
184 private_key_pem.bytes(), password=None, backend=default_backend()
185 )
186 encrypted_key = private_key.private_bytes(
187 serialization.Encoding.PEM,
188 serialization.PrivateFormat.TraditionalOpenSSL,
189 serialization.BestAvailableEncryption(password),
190 )
191 return trustme.Blob(encrypted_key)
192
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/dummyserver/socketserver.py b/dummyserver/socketserver.py
--- a/dummyserver/socketserver.py
+++ b/dummyserver/socketserver.py
@@ -20,7 +20,7 @@
from cryptography.hazmat.primitives import serialization
from urllib3.exceptions import HTTPWarning
-from urllib3.util import ALPN_PROTOCOLS, resolve_cert_reqs, resolve_ssl_version
+from urllib3.util import resolve_cert_reqs, resolve_ssl_version
if typing.TYPE_CHECKING:
from typing_extensions import ParamSpec
@@ -35,7 +35,7 @@
"keyfile": os.path.join(CERTS_PATH, "server.key"),
"cert_reqs": ssl.CERT_OPTIONAL,
"ca_certs": os.path.join(CERTS_PATH, "cacert.pem"),
- "alpn_protocols": ALPN_PROTOCOLS,
+ "alpn_protocols": ["h2", "http/1.1"],
}
DEFAULT_CA = os.path.join(CERTS_PATH, "cacert.pem")
DEFAULT_CA_KEY = os.path.join(CERTS_PATH, "cacert.key")
|
{"golden_diff": "diff --git a/dummyserver/socketserver.py b/dummyserver/socketserver.py\n--- a/dummyserver/socketserver.py\n+++ b/dummyserver/socketserver.py\n@@ -20,7 +20,7 @@\n from cryptography.hazmat.primitives import serialization\n \n from urllib3.exceptions import HTTPWarning\n-from urllib3.util import ALPN_PROTOCOLS, resolve_cert_reqs, resolve_ssl_version\n+from urllib3.util import resolve_cert_reqs, resolve_ssl_version\n \n if typing.TYPE_CHECKING:\n from typing_extensions import ParamSpec\n@@ -35,7 +35,7 @@\n \"keyfile\": os.path.join(CERTS_PATH, \"server.key\"),\n \"cert_reqs\": ssl.CERT_OPTIONAL,\n \"ca_certs\": os.path.join(CERTS_PATH, \"cacert.pem\"),\n- \"alpn_protocols\": ALPN_PROTOCOLS,\n+ \"alpn_protocols\": [\"h2\", \"http/1.1\"],\n }\n DEFAULT_CA = os.path.join(CERTS_PATH, \"cacert.pem\")\n DEFAULT_CA_KEY = os.path.join(CERTS_PATH, \"cacert.key\")\n", "issue": "Create a mechanism to test different HTTP protocols (HTTP/1.1 and HTTP/2) with the same test case\nCurrently our test suite only has to worry about HTTP/1.1, but with HTTP/2 support coming we're going to need to run tests both in HTTP/1.1 and HTTP/2 modes to avoid duplicating the entire test suite for the high-level tests like for `PoolManager`.\r\n\r\nMy proposal for the mechanism would be to control Hypercorn's ALPN to only offer `http/1.1` or `h2` depending on the test suite and then either run tests twice using a fixture or allowing external control somehow (environment variable?) and then running a separate job on CI? Open to other ideas too!\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\nDummy server used for unit testing.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\nimport os\nimport socket\nimport ssl\nimport sys\nimport threading\nimport typing\nimport warnings\n\nimport trustme\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import serialization\n\nfrom urllib3.exceptions import HTTPWarning\nfrom urllib3.util import ALPN_PROTOCOLS, resolve_cert_reqs, resolve_ssl_version\n\nif typing.TYPE_CHECKING:\n from typing_extensions import ParamSpec\n\n P = ParamSpec(\"P\")\n\nlog = logging.getLogger(__name__)\n\nCERTS_PATH = os.path.join(os.path.dirname(__file__), \"certs\")\nDEFAULT_CERTS: dict[str, typing.Any] = {\n \"certfile\": os.path.join(CERTS_PATH, \"server.crt\"),\n \"keyfile\": os.path.join(CERTS_PATH, \"server.key\"),\n \"cert_reqs\": ssl.CERT_OPTIONAL,\n \"ca_certs\": os.path.join(CERTS_PATH, \"cacert.pem\"),\n \"alpn_protocols\": ALPN_PROTOCOLS,\n}\nDEFAULT_CA = os.path.join(CERTS_PATH, \"cacert.pem\")\nDEFAULT_CA_KEY = os.path.join(CERTS_PATH, \"cacert.key\")\n\n\ndef _resolves_to_ipv6(host: str) -> bool:\n \"\"\"Returns True if the system resolves host to an IPv6 address by default.\"\"\"\n resolves_to_ipv6 = False\n try:\n for res in socket.getaddrinfo(host, None, socket.AF_UNSPEC):\n af, _, _, _, _ = res\n if af == socket.AF_INET6:\n resolves_to_ipv6 = True\n except socket.gaierror:\n pass\n\n return resolves_to_ipv6\n\n\ndef _has_ipv6(host: str) -> bool:\n \"\"\"Returns True if the system can bind an IPv6 address.\"\"\"\n sock = None\n has_ipv6 = False\n\n if socket.has_ipv6:\n # has_ipv6 returns true if cPython was compiled with IPv6 support.\n # It does not tell us if the system has IPv6 support enabled. To\n # determine that we must bind to an IPv6 address.\n # https://github.com/urllib3/urllib3/pull/611\n # https://bugs.python.org/issue658327\n try:\n sock = socket.socket(socket.AF_INET6)\n sock.bind((host, 0))\n has_ipv6 = _resolves_to_ipv6(\"localhost\")\n except Exception:\n pass\n\n if sock:\n sock.close()\n return has_ipv6\n\n\n# Some systems may have IPv6 support but DNS may not be configured\n# properly. We can not count that localhost will resolve to ::1 on all\n# systems. See https://github.com/urllib3/urllib3/pull/611 and\n# https://bugs.python.org/issue18792\nHAS_IPV6_AND_DNS = _has_ipv6(\"localhost\")\nHAS_IPV6 = _has_ipv6(\"::1\")\n\n\n# Different types of servers we have:\n\n\nclass NoIPv6Warning(HTTPWarning):\n \"IPv6 is not available\"\n\n\nclass SocketServerThread(threading.Thread):\n \"\"\"\n :param socket_handler: Callable which receives a socket argument for one\n request.\n :param ready_event: Event which gets set when the socket handler is\n ready to receive requests.\n \"\"\"\n\n USE_IPV6 = HAS_IPV6_AND_DNS\n\n def __init__(\n self,\n socket_handler: typing.Callable[[socket.socket], None],\n host: str = \"localhost\",\n ready_event: threading.Event | None = None,\n ) -> None:\n super().__init__()\n self.daemon = True\n\n self.socket_handler = socket_handler\n self.host = host\n self.ready_event = ready_event\n\n def _start_server(self) -> None:\n if self.USE_IPV6:\n sock = socket.socket(socket.AF_INET6)\n else:\n warnings.warn(\"No IPv6 support. Falling back to IPv4.\", NoIPv6Warning)\n sock = socket.socket(socket.AF_INET)\n if sys.platform != \"win32\":\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n with sock:\n sock.bind((self.host, 0))\n self.port = sock.getsockname()[1]\n\n # Once listen() returns, the server socket is ready\n sock.listen(1)\n\n if self.ready_event:\n self.ready_event.set()\n\n self.socket_handler(sock)\n\n def run(self) -> None:\n self._start_server()\n\n\ndef ssl_options_to_context( # type: ignore[no-untyped-def]\n keyfile=None,\n certfile=None,\n server_side=None,\n cert_reqs=None,\n ssl_version: str | int | None = None,\n ca_certs=None,\n do_handshake_on_connect=None,\n suppress_ragged_eofs=None,\n ciphers=None,\n alpn_protocols=None,\n) -> ssl.SSLContext:\n \"\"\"Return an equivalent SSLContext based on ssl.wrap_socket args.\"\"\"\n ssl_version = resolve_ssl_version(ssl_version)\n cert_none = resolve_cert_reqs(\"CERT_NONE\")\n if cert_reqs is None:\n cert_reqs = cert_none\n else:\n cert_reqs = resolve_cert_reqs(cert_reqs)\n\n ctx = ssl.SSLContext(ssl_version)\n ctx.load_cert_chain(certfile, keyfile)\n ctx.verify_mode = cert_reqs\n if ctx.verify_mode != cert_none:\n ctx.load_verify_locations(cafile=ca_certs)\n if alpn_protocols and hasattr(ctx, \"set_alpn_protocols\"):\n try:\n ctx.set_alpn_protocols(alpn_protocols)\n except NotImplementedError:\n pass\n return ctx\n\n\ndef get_unreachable_address() -> tuple[str, int]:\n # reserved as per rfc2606\n return (\"something.invalid\", 54321)\n\n\ndef encrypt_key_pem(private_key_pem: trustme.Blob, password: bytes) -> trustme.Blob:\n private_key = serialization.load_pem_private_key(\n private_key_pem.bytes(), password=None, backend=default_backend()\n )\n encrypted_key = private_key.private_bytes(\n serialization.Encoding.PEM,\n serialization.PrivateFormat.TraditionalOpenSSL,\n serialization.BestAvailableEncryption(password),\n )\n return trustme.Blob(encrypted_key)\n", "path": "dummyserver/socketserver.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\nDummy server used for unit testing.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\nimport os\nimport socket\nimport ssl\nimport sys\nimport threading\nimport typing\nimport warnings\n\nimport trustme\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import serialization\n\nfrom urllib3.exceptions import HTTPWarning\nfrom urllib3.util import resolve_cert_reqs, resolve_ssl_version\n\nif typing.TYPE_CHECKING:\n from typing_extensions import ParamSpec\n\n P = ParamSpec(\"P\")\n\nlog = logging.getLogger(__name__)\n\nCERTS_PATH = os.path.join(os.path.dirname(__file__), \"certs\")\nDEFAULT_CERTS: dict[str, typing.Any] = {\n \"certfile\": os.path.join(CERTS_PATH, \"server.crt\"),\n \"keyfile\": os.path.join(CERTS_PATH, \"server.key\"),\n \"cert_reqs\": ssl.CERT_OPTIONAL,\n \"ca_certs\": os.path.join(CERTS_PATH, \"cacert.pem\"),\n \"alpn_protocols\": [\"h2\", \"http/1.1\"],\n}\nDEFAULT_CA = os.path.join(CERTS_PATH, \"cacert.pem\")\nDEFAULT_CA_KEY = os.path.join(CERTS_PATH, \"cacert.key\")\n\n\ndef _resolves_to_ipv6(host: str) -> bool:\n \"\"\"Returns True if the system resolves host to an IPv6 address by default.\"\"\"\n resolves_to_ipv6 = False\n try:\n for res in socket.getaddrinfo(host, None, socket.AF_UNSPEC):\n af, _, _, _, _ = res\n if af == socket.AF_INET6:\n resolves_to_ipv6 = True\n except socket.gaierror:\n pass\n\n return resolves_to_ipv6\n\n\ndef _has_ipv6(host: str) -> bool:\n \"\"\"Returns True if the system can bind an IPv6 address.\"\"\"\n sock = None\n has_ipv6 = False\n\n if socket.has_ipv6:\n # has_ipv6 returns true if cPython was compiled with IPv6 support.\n # It does not tell us if the system has IPv6 support enabled. To\n # determine that we must bind to an IPv6 address.\n # https://github.com/urllib3/urllib3/pull/611\n # https://bugs.python.org/issue658327\n try:\n sock = socket.socket(socket.AF_INET6)\n sock.bind((host, 0))\n has_ipv6 = _resolves_to_ipv6(\"localhost\")\n except Exception:\n pass\n\n if sock:\n sock.close()\n return has_ipv6\n\n\n# Some systems may have IPv6 support but DNS may not be configured\n# properly. We can not count that localhost will resolve to ::1 on all\n# systems. See https://github.com/urllib3/urllib3/pull/611 and\n# https://bugs.python.org/issue18792\nHAS_IPV6_AND_DNS = _has_ipv6(\"localhost\")\nHAS_IPV6 = _has_ipv6(\"::1\")\n\n\n# Different types of servers we have:\n\n\nclass NoIPv6Warning(HTTPWarning):\n \"IPv6 is not available\"\n\n\nclass SocketServerThread(threading.Thread):\n \"\"\"\n :param socket_handler: Callable which receives a socket argument for one\n request.\n :param ready_event: Event which gets set when the socket handler is\n ready to receive requests.\n \"\"\"\n\n USE_IPV6 = HAS_IPV6_AND_DNS\n\n def __init__(\n self,\n socket_handler: typing.Callable[[socket.socket], None],\n host: str = \"localhost\",\n ready_event: threading.Event | None = None,\n ) -> None:\n super().__init__()\n self.daemon = True\n\n self.socket_handler = socket_handler\n self.host = host\n self.ready_event = ready_event\n\n def _start_server(self) -> None:\n if self.USE_IPV6:\n sock = socket.socket(socket.AF_INET6)\n else:\n warnings.warn(\"No IPv6 support. Falling back to IPv4.\", NoIPv6Warning)\n sock = socket.socket(socket.AF_INET)\n if sys.platform != \"win32\":\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n with sock:\n sock.bind((self.host, 0))\n self.port = sock.getsockname()[1]\n\n # Once listen() returns, the server socket is ready\n sock.listen(1)\n\n if self.ready_event:\n self.ready_event.set()\n\n self.socket_handler(sock)\n\n def run(self) -> None:\n self._start_server()\n\n\ndef ssl_options_to_context( # type: ignore[no-untyped-def]\n keyfile=None,\n certfile=None,\n server_side=None,\n cert_reqs=None,\n ssl_version: str | int | None = None,\n ca_certs=None,\n do_handshake_on_connect=None,\n suppress_ragged_eofs=None,\n ciphers=None,\n alpn_protocols=None,\n) -> ssl.SSLContext:\n \"\"\"Return an equivalent SSLContext based on ssl.wrap_socket args.\"\"\"\n ssl_version = resolve_ssl_version(ssl_version)\n cert_none = resolve_cert_reqs(\"CERT_NONE\")\n if cert_reqs is None:\n cert_reqs = cert_none\n else:\n cert_reqs = resolve_cert_reqs(cert_reqs)\n\n ctx = ssl.SSLContext(ssl_version)\n ctx.load_cert_chain(certfile, keyfile)\n ctx.verify_mode = cert_reqs\n if ctx.verify_mode != cert_none:\n ctx.load_verify_locations(cafile=ca_certs)\n if alpn_protocols and hasattr(ctx, \"set_alpn_protocols\"):\n try:\n ctx.set_alpn_protocols(alpn_protocols)\n except NotImplementedError:\n pass\n return ctx\n\n\ndef get_unreachable_address() -> tuple[str, int]:\n # reserved as per rfc2606\n return (\"something.invalid\", 54321)\n\n\ndef encrypt_key_pem(private_key_pem: trustme.Blob, password: bytes) -> trustme.Blob:\n private_key = serialization.load_pem_private_key(\n private_key_pem.bytes(), password=None, backend=default_backend()\n )\n encrypted_key = private_key.private_bytes(\n serialization.Encoding.PEM,\n serialization.PrivateFormat.TraditionalOpenSSL,\n serialization.BestAvailableEncryption(password),\n )\n return trustme.Blob(encrypted_key)\n", "path": "dummyserver/socketserver.py"}]}
| 2,293 | 240 |
gh_patches_debug_39818
|
rasdani/github-patches
|
git_diff
|
akvo__akvo-rsr-2211
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Move to IATI country list by default
We now have our own country list in RSR, and IATI also uses a country list (which we also use for some fields, such as recipient country). I think we should completely switch to the IATI country list, but that also involves some other changes (filters on RSR pages, etc).
- [x] Projects
- [x] Remove the rsr_projectlocation.country field from the project editor
- [x] Remove all mentions of rsr_projectlocation.country in the validation sets
- [x] Do not hide recipient countries and regions in the RSR validation set
- [x] Convert any country of a project location to a recipient country
- [x] Remove the country field as a mandatory field for publishing in RSR
- [x] Update project page
- [x] Project header
- [x] Full report tab (remove country field from location)
- [x] Donation thanks page
- [x] Update projects list
- [x] Update mention of country per project
- [x] Update location filter
- [x] Update IATI import
- [x] Add comment to state that location.country is legacy
- [x] Project updates
- [x] Remove project update location countries (not used)
- [x] Remove location filter on updates list
- [x] Organisations
- [x] Add new iati_country field
- [x] Convert existing countries to IATI countries
- [x] Replace country field in 'old' organisation admin
- [x] Add comment to state that location.country is legacy
- [x] Update location filter on organisations list
- [x] Update organisations list
- [x] Employments
- [x] Convert the employment countries to IATI countries
- [x] Update 'Request to join an organisation' module
- [x] Make all migrations reversable
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rsr/migrations/0073_auto_20160525_1541.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from __future__ import unicode_literals
3
4 from django.db import models, migrations
5
6
7 def convert_org_locations_to_iati_countries(apps, schema_editor):
8 """
9 Convert the organisation location countries to the new 'iati_country' field.
10 """
11 OrganisationLocation = apps.get_model("rsr", "OrganisationLocation")
12
13 for location in OrganisationLocation.objects.all():
14 country = location.country
15 if country:
16 location.iati_country = country.iso_code.upper()
17 location.save(update_fields=['iati_country', ])
18
19
20 def convert_org_locations_to_iati_countries_reverse(apps, schema_editor):
21 """
22 Revert the organisation location countries to the old 'country' field.
23 """
24 OrganisationLocation = apps.get_model("rsr", "OrganisationLocation")
25 Country = apps.get_model("rsr", "Country")
26
27 for location in OrganisationLocation.objects.all():
28 country = location.iati_country
29 if country:
30 try:
31 location.country = Country.objects.get(iso_code=country.lower())
32 location.save(update_fields=['country', ])
33 except Country.DoesNotExist:
34 pass
35
36
37 def convert_employments_to_iati_countries(apps, schema_editor):
38 """
39 Convert the employment countries to the 'new_country_field' field. This field is renamed in
40 the following migration.
41 """
42 Employment = apps.get_model("rsr", "Employment")
43
44 for employment in Employment.objects.all():
45 country = employment.country
46 if country:
47 employment.new_country_field = country.iso_code.upper()
48 employment.save(update_fields=['new_country_field', ])
49
50
51 def convert_employments_to_iati_countries_reverse(apps, schema_editor):
52 """
53 Revert the employment countries to the old 'country' field.
54 """
55 Employment = apps.get_model("rsr", "Employment")
56 Country = apps.get_model("rsr", "Country")
57
58 for employment in Employment.objects.all():
59 new_country_field = employment.new_country_field
60 if new_country_field:
61 employment.country = Country.objects.get(iso_code=new_country_field.lower())
62 employment.save(update_fields=['country', ])
63
64
65 class Migration(migrations.Migration):
66
67 dependencies = [
68 ('rsr', '0072_auto_20160525_1528'),
69 ]
70
71 operations = [
72 migrations.RunPython(convert_org_locations_to_iati_countries,
73 convert_org_locations_to_iati_countries_reverse),
74 migrations.RunPython(convert_employments_to_iati_countries,
75 convert_employments_to_iati_countries_reverse),
76 ]
77
```
Path: `akvo/rsr/migrations/0071_auto_20160525_1036.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from __future__ import unicode_literals
3
4 from django.db import models, migrations
5
6
7 def remove_country_from_validation_sets(apps, schema_editor):
8 """
9 Removes the rsr_projectlocation.country field from all validation sets.
10 """
11 ProjectEditorValidationSet = apps.get_model("rsr", "ProjectEditorValidationSet")
12 validations_to_be_deleted = []
13
14 for validation_set in ProjectEditorValidationSet.objects.all():
15 for validation in validation_set.validations.all():
16 if 'rsr_projectlocation.country' in validation.validation:
17 validations_to_be_deleted.append(validation)
18
19 for delete_validation in validations_to_be_deleted:
20 delete_validation.delete()
21
22
23 def add_country_to_rsr_validation_set(apps, schema_editor):
24 """
25 Adds the rsr_projectlocation.country field to the RSR validation set, as mandatory field.
26 """
27 ProjectEditorValidationSet = apps.get_model("rsr", "ProjectEditorValidationSet")
28 ProjectEditorValidation = apps.get_model("rsr", "ProjectEditorValidation")
29 try:
30 rsr_validation_set = ProjectEditorValidationSet.objects.get(pk=1)
31 ProjectEditorValidation.objects.get_or_create(
32 validation_set=rsr_validation_set,
33 validation='rsr_projectlocation.country',
34 action=1
35 )
36 except ProjectEditorValidationSet.DoesNotExist:
37 # The RSR validation set (id = 1) can not be deleted and should always exist
38 pass
39
40
41 def do_not_hide_recipient_countries_and_regions(apps, schema_editor):
42 """
43 In the RSR validations set (id is always 1), remove the settings to hide the
44 rsr_recipientcountry and rsr_recipientregion models. Do hide some of the specific fields in
45 these models, such as the free text, percentage, vocabulary and vocabulary_uri.
46 """
47 ProjectEditorValidationSet = apps.get_model("rsr", "ProjectEditorValidationSet")
48 ProjectEditorValidation = apps.get_model("rsr", "ProjectEditorValidation")
49 validations_to_be_deleted = []
50 validations_to_be_added = [
51 ['rsr_recipientcountry', ['percentage', 'text']],
52 ['rsr_recipientregion', ['percentage', 'text', 'region_vocabulary',
53 'region_vocabulary_uri']],
54 ]
55
56 try:
57 rsr_validation_set = ProjectEditorValidationSet.objects.get(id=1)
58 for validation in rsr_validation_set.validations.all():
59 if validation.validation in ["rsr_recipientcountry", "rsr_recipientregion"] and \
60 validation.action == 2:
61 validations_to_be_deleted.append(validation)
62
63 for delete_validation in validations_to_be_deleted:
64 delete_validation.delete()
65
66 for add_validation in validations_to_be_added:
67 model_name = add_validation[0]
68 field_names = add_validation[1]
69 for field_name in field_names:
70 ProjectEditorValidation.objects.get_or_create(
71 validation_set=rsr_validation_set,
72 validation="{0}.{1}".format(model_name, field_name),
73 action=2
74 )
75 except ProjectEditorValidationSet.DoesNotExist:
76 # The RSR validation set (id = 1) can not be deleted and should always exist
77 pass
78
79
80 def hide_recipient_countries_and_regions(apps, schema_editor):
81 """
82 In the RSR validations set (id is always 1), re-add the settings to hide the
83 rsr_recipientcountry and rsr_recipientregion models.
84 """
85 ProjectEditorValidationSet = apps.get_model("rsr", "ProjectEditorValidationSet")
86 ProjectEditorValidation = apps.get_model("rsr", "ProjectEditorValidation")
87
88 try:
89 rsr_validation_set = ProjectEditorValidationSet.objects.get(pk=1)
90 ProjectEditorValidation.objects.get_or_create(
91 validation_set=rsr_validation_set,
92 validation='rsr_recipientcountry',
93 action=2
94 )
95 ProjectEditorValidation.objects.get_or_create(
96 validation_set=rsr_validation_set,
97 validation='rsr_recipientregion',
98 action=2
99 )
100 except ProjectEditorValidationSet.DoesNotExist:
101 # The RSR validation set (id = 1) can not be deleted and should always exist
102 pass
103
104
105 def convert_locations_to_recipient_countries(apps, schema_editor):
106 """
107 For each project retrieve the country code from the location and add a new recipient country
108 object if it does not exist yet.
109 """
110 Project = apps.get_model("rsr", "Project")
111 RecipientCountry = apps.get_model("rsr", "RecipientCountry")
112
113 for project in Project.objects.all():
114 country_codes = []
115 for location in project.locations.all():
116 country = location.country
117 if country:
118 country_codes.append(country.iso_code.upper())
119
120 for country_code in list(set(country_codes)):
121 RecipientCountry.objects.get_or_create(project=project, country=country_code)
122
123
124 def revert_locations_to_recipient_countries(apps, schema_editor):
125 """
126 Unfortunately we can't be sure which recipient country stems from a location, or whether it
127 already existed originally. Therefore we can only ignore it.
128 """
129 pass
130
131
132 class Migration(migrations.Migration):
133
134 dependencies = [
135 ('rsr', '0070_auto_20160519_1205'),
136 ]
137
138 operations = [
139 migrations.RunPython(remove_country_from_validation_sets,
140 add_country_to_rsr_validation_set),
141 migrations.RunPython(do_not_hide_recipient_countries_and_regions,
142 hide_recipient_countries_and_regions),
143 migrations.RunPython(convert_locations_to_recipient_countries,
144 revert_locations_to_recipient_countries),
145 ]
146
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/akvo/rsr/migrations/0071_auto_20160525_1036.py b/akvo/rsr/migrations/0071_auto_20160525_1036.py
--- a/akvo/rsr/migrations/0071_auto_20160525_1036.py
+++ b/akvo/rsr/migrations/0071_auto_20160525_1036.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
+from django.core.exceptions import MultipleObjectsReturned
from django.db import models, migrations
@@ -33,7 +34,7 @@
validation='rsr_projectlocation.country',
action=1
)
- except ProjectEditorValidationSet.DoesNotExist:
+ except (ProjectEditorValidationSet.DoesNotExist, MultipleObjectsReturned):
# The RSR validation set (id = 1) can not be deleted and should always exist
pass
@@ -72,7 +73,7 @@
validation="{0}.{1}".format(model_name, field_name),
action=2
)
- except ProjectEditorValidationSet.DoesNotExist:
+ except (ProjectEditorValidationSet.DoesNotExist, MultipleObjectsReturned):
# The RSR validation set (id = 1) can not be deleted and should always exist
pass
@@ -97,7 +98,7 @@
validation='rsr_recipientregion',
action=2
)
- except ProjectEditorValidationSet.DoesNotExist:
+ except (ProjectEditorValidationSet.DoesNotExist, MultipleObjectsReturned):
# The RSR validation set (id = 1) can not be deleted and should always exist
pass
@@ -118,7 +119,12 @@
country_codes.append(country.iso_code.upper())
for country_code in list(set(country_codes)):
- RecipientCountry.objects.get_or_create(project=project, country=country_code)
+ try:
+ RecipientCountry.objects.get_or_create(project=project, country=country_code)
+ except MultipleObjectsReturned:
+ # This could happen when a project has the same country specified as a recipient
+ # country multiple times
+ pass
def revert_locations_to_recipient_countries(apps, schema_editor):
diff --git a/akvo/rsr/migrations/0073_auto_20160525_1541.py b/akvo/rsr/migrations/0073_auto_20160525_1541.py
--- a/akvo/rsr/migrations/0073_auto_20160525_1541.py
+++ b/akvo/rsr/migrations/0073_auto_20160525_1541.py
@@ -58,8 +58,11 @@
for employment in Employment.objects.all():
new_country_field = employment.new_country_field
if new_country_field:
- employment.country = Country.objects.get(iso_code=new_country_field.lower())
- employment.save(update_fields=['country', ])
+ try:
+ employment.country = Country.objects.get(iso_code=new_country_field.lower())
+ employment.save(update_fields=['country', ])
+ except Country.DoesNotExist:
+ pass
class Migration(migrations.Migration):
|
{"golden_diff": "diff --git a/akvo/rsr/migrations/0071_auto_20160525_1036.py b/akvo/rsr/migrations/0071_auto_20160525_1036.py\n--- a/akvo/rsr/migrations/0071_auto_20160525_1036.py\n+++ b/akvo/rsr/migrations/0071_auto_20160525_1036.py\n@@ -1,6 +1,7 @@\n # -*- coding: utf-8 -*-\n from __future__ import unicode_literals\n \n+from django.core.exceptions import MultipleObjectsReturned\n from django.db import models, migrations\n \n \n@@ -33,7 +34,7 @@\n validation='rsr_projectlocation.country',\n action=1\n )\n- except ProjectEditorValidationSet.DoesNotExist:\n+ except (ProjectEditorValidationSet.DoesNotExist, MultipleObjectsReturned):\n # The RSR validation set (id = 1) can not be deleted and should always exist\n pass\n \n@@ -72,7 +73,7 @@\n validation=\"{0}.{1}\".format(model_name, field_name),\n action=2\n )\n- except ProjectEditorValidationSet.DoesNotExist:\n+ except (ProjectEditorValidationSet.DoesNotExist, MultipleObjectsReturned):\n # The RSR validation set (id = 1) can not be deleted and should always exist\n pass\n \n@@ -97,7 +98,7 @@\n validation='rsr_recipientregion',\n action=2\n )\n- except ProjectEditorValidationSet.DoesNotExist:\n+ except (ProjectEditorValidationSet.DoesNotExist, MultipleObjectsReturned):\n # The RSR validation set (id = 1) can not be deleted and should always exist\n pass\n \n@@ -118,7 +119,12 @@\n country_codes.append(country.iso_code.upper())\n \n for country_code in list(set(country_codes)):\n- RecipientCountry.objects.get_or_create(project=project, country=country_code)\n+ try:\n+ RecipientCountry.objects.get_or_create(project=project, country=country_code)\n+ except MultipleObjectsReturned:\n+ # This could happen when a project has the same country specified as a recipient\n+ # country multiple times\n+ pass\n \n \n def revert_locations_to_recipient_countries(apps, schema_editor):\ndiff --git a/akvo/rsr/migrations/0073_auto_20160525_1541.py b/akvo/rsr/migrations/0073_auto_20160525_1541.py\n--- a/akvo/rsr/migrations/0073_auto_20160525_1541.py\n+++ b/akvo/rsr/migrations/0073_auto_20160525_1541.py\n@@ -58,8 +58,11 @@\n for employment in Employment.objects.all():\n new_country_field = employment.new_country_field\n if new_country_field:\n- employment.country = Country.objects.get(iso_code=new_country_field.lower())\n- employment.save(update_fields=['country', ])\n+ try:\n+ employment.country = Country.objects.get(iso_code=new_country_field.lower())\n+ employment.save(update_fields=['country', ])\n+ except Country.DoesNotExist:\n+ pass\n \n \n class Migration(migrations.Migration):\n", "issue": "Move to IATI country list by default\nWe now have our own country list in RSR, and IATI also uses a country list (which we also use for some fields, such as recipient country). I think we should completely switch to the IATI country list, but that also involves some other changes (filters on RSR pages, etc).\n- [x] Projects\n - [x] Remove the rsr_projectlocation.country field from the project editor\n - [x] Remove all mentions of rsr_projectlocation.country in the validation sets\n - [x] Do not hide recipient countries and regions in the RSR validation set\n - [x] Convert any country of a project location to a recipient country\n - [x] Remove the country field as a mandatory field for publishing in RSR\n - [x] Update project page\n - [x] Project header\n - [x] Full report tab (remove country field from location)\n - [x] Donation thanks page\n - [x] Update projects list\n - [x] Update mention of country per project\n - [x] Update location filter\n - [x] Update IATI import\n - [x] Add comment to state that location.country is legacy\n- [x] Project updates\n - [x] Remove project update location countries (not used)\n - [x] Remove location filter on updates list\n- [x] Organisations\n - [x] Add new iati_country field\n - [x] Convert existing countries to IATI countries\n - [x] Replace country field in 'old' organisation admin\n - [x] Add comment to state that location.country is legacy\n - [x] Update location filter on organisations list\n - [x] Update organisations list\n- [x] Employments\n - [x] Convert the employment countries to IATI countries\n - [x] Update 'Request to join an organisation' module\n- [x] Make all migrations reversable\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\ndef convert_org_locations_to_iati_countries(apps, schema_editor):\n \"\"\"\n Convert the organisation location countries to the new 'iati_country' field.\n \"\"\"\n OrganisationLocation = apps.get_model(\"rsr\", \"OrganisationLocation\")\n\n for location in OrganisationLocation.objects.all():\n country = location.country\n if country:\n location.iati_country = country.iso_code.upper()\n location.save(update_fields=['iati_country', ])\n\n\ndef convert_org_locations_to_iati_countries_reverse(apps, schema_editor):\n \"\"\"\n Revert the organisation location countries to the old 'country' field.\n \"\"\"\n OrganisationLocation = apps.get_model(\"rsr\", \"OrganisationLocation\")\n Country = apps.get_model(\"rsr\", \"Country\")\n\n for location in OrganisationLocation.objects.all():\n country = location.iati_country\n if country:\n try:\n location.country = Country.objects.get(iso_code=country.lower())\n location.save(update_fields=['country', ])\n except Country.DoesNotExist:\n pass\n\n\ndef convert_employments_to_iati_countries(apps, schema_editor):\n \"\"\"\n Convert the employment countries to the 'new_country_field' field. This field is renamed in\n the following migration.\n \"\"\"\n Employment = apps.get_model(\"rsr\", \"Employment\")\n\n for employment in Employment.objects.all():\n country = employment.country\n if country:\n employment.new_country_field = country.iso_code.upper()\n employment.save(update_fields=['new_country_field', ])\n\n\ndef convert_employments_to_iati_countries_reverse(apps, schema_editor):\n \"\"\"\n Revert the employment countries to the old 'country' field.\n \"\"\"\n Employment = apps.get_model(\"rsr\", \"Employment\")\n Country = apps.get_model(\"rsr\", \"Country\")\n\n for employment in Employment.objects.all():\n new_country_field = employment.new_country_field\n if new_country_field:\n employment.country = Country.objects.get(iso_code=new_country_field.lower())\n employment.save(update_fields=['country', ])\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('rsr', '0072_auto_20160525_1528'),\n ]\n\n operations = [\n migrations.RunPython(convert_org_locations_to_iati_countries,\n convert_org_locations_to_iati_countries_reverse),\n migrations.RunPython(convert_employments_to_iati_countries,\n convert_employments_to_iati_countries_reverse),\n ]\n", "path": "akvo/rsr/migrations/0073_auto_20160525_1541.py"}, {"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\ndef remove_country_from_validation_sets(apps, schema_editor):\n \"\"\"\n Removes the rsr_projectlocation.country field from all validation sets.\n \"\"\"\n ProjectEditorValidationSet = apps.get_model(\"rsr\", \"ProjectEditorValidationSet\")\n validations_to_be_deleted = []\n\n for validation_set in ProjectEditorValidationSet.objects.all():\n for validation in validation_set.validations.all():\n if 'rsr_projectlocation.country' in validation.validation:\n validations_to_be_deleted.append(validation)\n\n for delete_validation in validations_to_be_deleted:\n delete_validation.delete()\n\n\ndef add_country_to_rsr_validation_set(apps, schema_editor):\n \"\"\"\n Adds the rsr_projectlocation.country field to the RSR validation set, as mandatory field.\n \"\"\"\n ProjectEditorValidationSet = apps.get_model(\"rsr\", \"ProjectEditorValidationSet\")\n ProjectEditorValidation = apps.get_model(\"rsr\", \"ProjectEditorValidation\")\n try:\n rsr_validation_set = ProjectEditorValidationSet.objects.get(pk=1)\n ProjectEditorValidation.objects.get_or_create(\n validation_set=rsr_validation_set,\n validation='rsr_projectlocation.country',\n action=1\n )\n except ProjectEditorValidationSet.DoesNotExist:\n # The RSR validation set (id = 1) can not be deleted and should always exist\n pass\n\n\ndef do_not_hide_recipient_countries_and_regions(apps, schema_editor):\n \"\"\"\n In the RSR validations set (id is always 1), remove the settings to hide the\n rsr_recipientcountry and rsr_recipientregion models. Do hide some of the specific fields in\n these models, such as the free text, percentage, vocabulary and vocabulary_uri.\n \"\"\"\n ProjectEditorValidationSet = apps.get_model(\"rsr\", \"ProjectEditorValidationSet\")\n ProjectEditorValidation = apps.get_model(\"rsr\", \"ProjectEditorValidation\")\n validations_to_be_deleted = []\n validations_to_be_added = [\n ['rsr_recipientcountry', ['percentage', 'text']],\n ['rsr_recipientregion', ['percentage', 'text', 'region_vocabulary',\n 'region_vocabulary_uri']],\n ]\n\n try:\n rsr_validation_set = ProjectEditorValidationSet.objects.get(id=1)\n for validation in rsr_validation_set.validations.all():\n if validation.validation in [\"rsr_recipientcountry\", \"rsr_recipientregion\"] and \\\n validation.action == 2:\n validations_to_be_deleted.append(validation)\n\n for delete_validation in validations_to_be_deleted:\n delete_validation.delete()\n\n for add_validation in validations_to_be_added:\n model_name = add_validation[0]\n field_names = add_validation[1]\n for field_name in field_names:\n ProjectEditorValidation.objects.get_or_create(\n validation_set=rsr_validation_set,\n validation=\"{0}.{1}\".format(model_name, field_name),\n action=2\n )\n except ProjectEditorValidationSet.DoesNotExist:\n # The RSR validation set (id = 1) can not be deleted and should always exist\n pass\n\n\ndef hide_recipient_countries_and_regions(apps, schema_editor):\n \"\"\"\n In the RSR validations set (id is always 1), re-add the settings to hide the\n rsr_recipientcountry and rsr_recipientregion models.\n \"\"\"\n ProjectEditorValidationSet = apps.get_model(\"rsr\", \"ProjectEditorValidationSet\")\n ProjectEditorValidation = apps.get_model(\"rsr\", \"ProjectEditorValidation\")\n\n try:\n rsr_validation_set = ProjectEditorValidationSet.objects.get(pk=1)\n ProjectEditorValidation.objects.get_or_create(\n validation_set=rsr_validation_set,\n validation='rsr_recipientcountry',\n action=2\n )\n ProjectEditorValidation.objects.get_or_create(\n validation_set=rsr_validation_set,\n validation='rsr_recipientregion',\n action=2\n )\n except ProjectEditorValidationSet.DoesNotExist:\n # The RSR validation set (id = 1) can not be deleted and should always exist\n pass\n\n\ndef convert_locations_to_recipient_countries(apps, schema_editor):\n \"\"\"\n For each project retrieve the country code from the location and add a new recipient country\n object if it does not exist yet.\n \"\"\"\n Project = apps.get_model(\"rsr\", \"Project\")\n RecipientCountry = apps.get_model(\"rsr\", \"RecipientCountry\")\n\n for project in Project.objects.all():\n country_codes = []\n for location in project.locations.all():\n country = location.country\n if country:\n country_codes.append(country.iso_code.upper())\n\n for country_code in list(set(country_codes)):\n RecipientCountry.objects.get_or_create(project=project, country=country_code)\n\n\ndef revert_locations_to_recipient_countries(apps, schema_editor):\n \"\"\"\n Unfortunately we can't be sure which recipient country stems from a location, or whether it\n already existed originally. Therefore we can only ignore it.\n \"\"\"\n pass\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('rsr', '0070_auto_20160519_1205'),\n ]\n\n operations = [\n migrations.RunPython(remove_country_from_validation_sets,\n add_country_to_rsr_validation_set),\n migrations.RunPython(do_not_hide_recipient_countries_and_regions,\n hide_recipient_countries_and_regions),\n migrations.RunPython(convert_locations_to_recipient_countries,\n revert_locations_to_recipient_countries),\n ]\n", "path": "akvo/rsr/migrations/0071_auto_20160525_1036.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\ndef convert_org_locations_to_iati_countries(apps, schema_editor):\n \"\"\"\n Convert the organisation location countries to the new 'iati_country' field.\n \"\"\"\n OrganisationLocation = apps.get_model(\"rsr\", \"OrganisationLocation\")\n\n for location in OrganisationLocation.objects.all():\n country = location.country\n if country:\n location.iati_country = country.iso_code.upper()\n location.save(update_fields=['iati_country', ])\n\n\ndef convert_org_locations_to_iati_countries_reverse(apps, schema_editor):\n \"\"\"\n Revert the organisation location countries to the old 'country' field.\n \"\"\"\n OrganisationLocation = apps.get_model(\"rsr\", \"OrganisationLocation\")\n Country = apps.get_model(\"rsr\", \"Country\")\n\n for location in OrganisationLocation.objects.all():\n country = location.iati_country\n if country:\n try:\n location.country = Country.objects.get(iso_code=country.lower())\n location.save(update_fields=['country', ])\n except Country.DoesNotExist:\n pass\n\n\ndef convert_employments_to_iati_countries(apps, schema_editor):\n \"\"\"\n Convert the employment countries to the 'new_country_field' field. This field is renamed in\n the following migration.\n \"\"\"\n Employment = apps.get_model(\"rsr\", \"Employment\")\n\n for employment in Employment.objects.all():\n country = employment.country\n if country:\n employment.new_country_field = country.iso_code.upper()\n employment.save(update_fields=['new_country_field', ])\n\n\ndef convert_employments_to_iati_countries_reverse(apps, schema_editor):\n \"\"\"\n Revert the employment countries to the old 'country' field.\n \"\"\"\n Employment = apps.get_model(\"rsr\", \"Employment\")\n Country = apps.get_model(\"rsr\", \"Country\")\n\n for employment in Employment.objects.all():\n new_country_field = employment.new_country_field\n if new_country_field:\n try:\n employment.country = Country.objects.get(iso_code=new_country_field.lower())\n employment.save(update_fields=['country', ])\n except Country.DoesNotExist:\n pass\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('rsr', '0072_auto_20160525_1528'),\n ]\n\n operations = [\n migrations.RunPython(convert_org_locations_to_iati_countries,\n convert_org_locations_to_iati_countries_reverse),\n migrations.RunPython(convert_employments_to_iati_countries,\n convert_employments_to_iati_countries_reverse),\n ]\n", "path": "akvo/rsr/migrations/0073_auto_20160525_1541.py"}, {"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.core.exceptions import MultipleObjectsReturned\nfrom django.db import models, migrations\n\n\ndef remove_country_from_validation_sets(apps, schema_editor):\n \"\"\"\n Removes the rsr_projectlocation.country field from all validation sets.\n \"\"\"\n ProjectEditorValidationSet = apps.get_model(\"rsr\", \"ProjectEditorValidationSet\")\n validations_to_be_deleted = []\n\n for validation_set in ProjectEditorValidationSet.objects.all():\n for validation in validation_set.validations.all():\n if 'rsr_projectlocation.country' in validation.validation:\n validations_to_be_deleted.append(validation)\n\n for delete_validation in validations_to_be_deleted:\n delete_validation.delete()\n\n\ndef add_country_to_rsr_validation_set(apps, schema_editor):\n \"\"\"\n Adds the rsr_projectlocation.country field to the RSR validation set, as mandatory field.\n \"\"\"\n ProjectEditorValidationSet = apps.get_model(\"rsr\", \"ProjectEditorValidationSet\")\n ProjectEditorValidation = apps.get_model(\"rsr\", \"ProjectEditorValidation\")\n try:\n rsr_validation_set = ProjectEditorValidationSet.objects.get(pk=1)\n ProjectEditorValidation.objects.get_or_create(\n validation_set=rsr_validation_set,\n validation='rsr_projectlocation.country',\n action=1\n )\n except (ProjectEditorValidationSet.DoesNotExist, MultipleObjectsReturned):\n # The RSR validation set (id = 1) can not be deleted and should always exist\n pass\n\n\ndef do_not_hide_recipient_countries_and_regions(apps, schema_editor):\n \"\"\"\n In the RSR validations set (id is always 1), remove the settings to hide the\n rsr_recipientcountry and rsr_recipientregion models. Do hide some of the specific fields in\n these models, such as the free text, percentage, vocabulary and vocabulary_uri.\n \"\"\"\n ProjectEditorValidationSet = apps.get_model(\"rsr\", \"ProjectEditorValidationSet\")\n ProjectEditorValidation = apps.get_model(\"rsr\", \"ProjectEditorValidation\")\n validations_to_be_deleted = []\n validations_to_be_added = [\n ['rsr_recipientcountry', ['percentage', 'text']],\n ['rsr_recipientregion', ['percentage', 'text', 'region_vocabulary',\n 'region_vocabulary_uri']],\n ]\n\n try:\n rsr_validation_set = ProjectEditorValidationSet.objects.get(id=1)\n for validation in rsr_validation_set.validations.all():\n if validation.validation in [\"rsr_recipientcountry\", \"rsr_recipientregion\"] and \\\n validation.action == 2:\n validations_to_be_deleted.append(validation)\n\n for delete_validation in validations_to_be_deleted:\n delete_validation.delete()\n\n for add_validation in validations_to_be_added:\n model_name = add_validation[0]\n field_names = add_validation[1]\n for field_name in field_names:\n ProjectEditorValidation.objects.get_or_create(\n validation_set=rsr_validation_set,\n validation=\"{0}.{1}\".format(model_name, field_name),\n action=2\n )\n except (ProjectEditorValidationSet.DoesNotExist, MultipleObjectsReturned):\n # The RSR validation set (id = 1) can not be deleted and should always exist\n pass\n\n\ndef hide_recipient_countries_and_regions(apps, schema_editor):\n \"\"\"\n In the RSR validations set (id is always 1), re-add the settings to hide the\n rsr_recipientcountry and rsr_recipientregion models.\n \"\"\"\n ProjectEditorValidationSet = apps.get_model(\"rsr\", \"ProjectEditorValidationSet\")\n ProjectEditorValidation = apps.get_model(\"rsr\", \"ProjectEditorValidation\")\n\n try:\n rsr_validation_set = ProjectEditorValidationSet.objects.get(pk=1)\n ProjectEditorValidation.objects.get_or_create(\n validation_set=rsr_validation_set,\n validation='rsr_recipientcountry',\n action=2\n )\n ProjectEditorValidation.objects.get_or_create(\n validation_set=rsr_validation_set,\n validation='rsr_recipientregion',\n action=2\n )\n except (ProjectEditorValidationSet.DoesNotExist, MultipleObjectsReturned):\n # The RSR validation set (id = 1) can not be deleted and should always exist\n pass\n\n\ndef convert_locations_to_recipient_countries(apps, schema_editor):\n \"\"\"\n For each project retrieve the country code from the location and add a new recipient country\n object if it does not exist yet.\n \"\"\"\n Project = apps.get_model(\"rsr\", \"Project\")\n RecipientCountry = apps.get_model(\"rsr\", \"RecipientCountry\")\n\n for project in Project.objects.all():\n country_codes = []\n for location in project.locations.all():\n country = location.country\n if country:\n country_codes.append(country.iso_code.upper())\n\n for country_code in list(set(country_codes)):\n try:\n RecipientCountry.objects.get_or_create(project=project, country=country_code)\n except MultipleObjectsReturned:\n # This could happen when a project has the same country specified as a recipient\n # country multiple times\n pass\n\n\ndef revert_locations_to_recipient_countries(apps, schema_editor):\n \"\"\"\n Unfortunately we can't be sure which recipient country stems from a location, or whether it\n already existed originally. Therefore we can only ignore it.\n \"\"\"\n pass\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('rsr', '0070_auto_20160519_1205'),\n ]\n\n operations = [\n migrations.RunPython(remove_country_from_validation_sets,\n add_country_to_rsr_validation_set),\n migrations.RunPython(do_not_hide_recipient_countries_and_regions,\n hide_recipient_countries_and_regions),\n migrations.RunPython(convert_locations_to_recipient_countries,\n revert_locations_to_recipient_countries),\n ]\n", "path": "akvo/rsr/migrations/0071_auto_20160525_1036.py"}]}
| 2,964 | 762 |
gh_patches_debug_27031
|
rasdani/github-patches
|
git_diff
|
aws-cloudformation__cfn-lint-2927
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`I3037` false positives in `AWS::ECS::TaskDefinition.ContainerDefinitions.Command`
### CloudFormation Lint Version
0.83.0
### What operating system are you using?
Mac
### Describe the bug
`I3037` issues (*List has a duplicate value*) are reported if the command specified in `AWS::ECS::TaskDefinition.ContainerDefinitions.Command` has repeating entries, e.g. the values of several command arguments are the same.
### Expected behavior
No issue is detected.
### Reproduction template
```json
{
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "This template deploys an ECS task definition.",
"Resources": {
"MyECSTaskDefinition": {
"Type": "AWS::ECS::TaskDefinition",
"Properties": {
"ContainerDefinitions": [
{
"Command": [
"do_something",
"--foo",
"1",
"--bar",
"1"
],
"Image": "my-image",
"Name": "my-task"
}
]
}
}
}
}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/resources/properties/ListDuplicatesAllowed.py`
Content:
```
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 import hashlib
6 import json
7
8 from cfnlint.helpers import RESOURCE_SPECS
9 from cfnlint.rules import CloudFormationLintRule, RuleMatch
10
11
12 class ListDuplicatesAllowed(CloudFormationLintRule):
13 """Check if duplicates exist in a List"""
14
15 id = "I3037"
16 shortdesc = "Check if a list that allows duplicates has any duplicates"
17 description = (
18 "Certain lists support duplicate items."
19 "Provide an alert when list of strings or numbers have repeats."
20 )
21 source_url = "https://github.com/aws-cloudformation/cfn-python-lint/blob/main/docs/rules.md#rules-1"
22 tags = ["resources", "property", "list"]
23
24 def initialize(self, cfn):
25 """Initialize the rule"""
26 for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get(
27 "ResourceTypes"
28 ):
29 self.resource_property_types.append(resource_type_spec)
30 for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get(
31 "PropertyTypes"
32 ):
33 self.resource_sub_property_types.append(property_type_spec)
34
35 def _check_duplicates(self, values, path, scenario=None):
36 """Check for Duplicates"""
37 matches = []
38
39 list_items = []
40 if isinstance(values, list):
41 for index, value in enumerate(values):
42 value_hash = hashlib.sha1(
43 json.dumps(value, sort_keys=True).encode("utf-8")
44 ).hexdigest()
45 if value_hash in list_items:
46 if not scenario:
47 message = "List has a duplicate value at {0}"
48 matches.append(
49 RuleMatch(
50 path + [index],
51 message.format("/".join(map(str, path + [index]))),
52 )
53 )
54 else:
55 scenario_text = " and ".join(
56 [f'condition "{k}" is {v}' for (k, v) in scenario.items()]
57 )
58 message = "List has a duplicate value at {0} when {1}"
59 matches.append(
60 RuleMatch(
61 path,
62 message.format("/".join(map(str, path)), scenario_text),
63 )
64 )
65
66 list_items.append(value_hash)
67
68 return matches
69
70 def check_duplicates(self, values, path, cfn):
71 """Check for duplicates"""
72 matches = []
73
74 if isinstance(values, list):
75 matches.extend(self._check_duplicates(values, path))
76 elif isinstance(values, dict):
77 props = cfn.get_object_without_conditions(values)
78 for prop in props:
79 matches.extend(
80 self._check_duplicates(
81 prop.get("Object"), path, prop.get("Scenario")
82 )
83 )
84
85 return matches
86
87 def check(self, cfn, properties, value_specs, path):
88 """Check itself"""
89 matches = []
90 for p_value, p_path in properties.items_safe(path[:]):
91 for prop in p_value:
92 if prop in value_specs:
93 property_type = value_specs.get(prop).get("Type")
94 primitive_type = value_specs.get(prop).get("PrimitiveItemType")
95 duplicates_allowed = value_specs.get(prop).get(
96 "DuplicatesAllowed", False
97 )
98 if (
99 property_type == "List"
100 and duplicates_allowed
101 and primitive_type in ["String", "Integer"]
102 ):
103 matches.extend(
104 self.check_duplicates(p_value[prop], p_path + [prop], cfn)
105 )
106
107 return matches
108
109 def match_resource_sub_properties(self, properties, property_type, path, cfn):
110 """Match for sub properties"""
111 matches = []
112
113 specs = (
114 RESOURCE_SPECS.get(cfn.regions[0])
115 .get("PropertyTypes")
116 .get(property_type, {})
117 .get("Properties", {})
118 )
119 matches.extend(self.check(cfn, properties, specs, path))
120
121 return matches
122
123 def match_resource_properties(self, properties, resource_type, path, cfn):
124 """Check CloudFormation Properties"""
125 matches = []
126
127 specs = (
128 RESOURCE_SPECS.get(cfn.regions[0])
129 .get("ResourceTypes")
130 .get(resource_type, {})
131 .get("Properties", {})
132 )
133 matches.extend(self.check(cfn, properties, specs, path))
134
135 return matches
136
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cfnlint/rules/resources/properties/ListDuplicatesAllowed.py b/src/cfnlint/rules/resources/properties/ListDuplicatesAllowed.py
--- a/src/cfnlint/rules/resources/properties/ListDuplicatesAllowed.py
+++ b/src/cfnlint/rules/resources/properties/ListDuplicatesAllowed.py
@@ -21,6 +21,10 @@
source_url = "https://github.com/aws-cloudformation/cfn-python-lint/blob/main/docs/rules.md#rules-1"
tags = ["resources", "property", "list"]
+ def __init__(self):
+ super().__init__()
+ self.exceptions = ["Command"]
+
def initialize(self, cfn):
"""Initialize the rule"""
for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get(
@@ -71,11 +75,15 @@
"""Check for duplicates"""
matches = []
+ if path[-1] in self.exceptions:
+ return matches
if isinstance(values, list):
matches.extend(self._check_duplicates(values, path))
elif isinstance(values, dict):
props = cfn.get_object_without_conditions(values)
for prop in props:
+ if prop in self.exceptions:
+ continue
matches.extend(
self._check_duplicates(
prop.get("Object"), path, prop.get("Scenario")
|
{"golden_diff": "diff --git a/src/cfnlint/rules/resources/properties/ListDuplicatesAllowed.py b/src/cfnlint/rules/resources/properties/ListDuplicatesAllowed.py\n--- a/src/cfnlint/rules/resources/properties/ListDuplicatesAllowed.py\n+++ b/src/cfnlint/rules/resources/properties/ListDuplicatesAllowed.py\n@@ -21,6 +21,10 @@\n source_url = \"https://github.com/aws-cloudformation/cfn-python-lint/blob/main/docs/rules.md#rules-1\"\n tags = [\"resources\", \"property\", \"list\"]\n \n+ def __init__(self):\n+ super().__init__()\n+ self.exceptions = [\"Command\"]\n+\n def initialize(self, cfn):\n \"\"\"Initialize the rule\"\"\"\n for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get(\n@@ -71,11 +75,15 @@\n \"\"\"Check for duplicates\"\"\"\n matches = []\n \n+ if path[-1] in self.exceptions:\n+ return matches\n if isinstance(values, list):\n matches.extend(self._check_duplicates(values, path))\n elif isinstance(values, dict):\n props = cfn.get_object_without_conditions(values)\n for prop in props:\n+ if prop in self.exceptions:\n+ continue\n matches.extend(\n self._check_duplicates(\n prop.get(\"Object\"), path, prop.get(\"Scenario\")\n", "issue": "`I3037` false positives in `AWS::ECS::TaskDefinition.ContainerDefinitions.Command`\n### CloudFormation Lint Version\n\n0.83.0\n\n### What operating system are you using?\n\nMac\n\n### Describe the bug\n\n`I3037` issues (*List has a duplicate value*) are reported if the command specified in `AWS::ECS::TaskDefinition.ContainerDefinitions.Command` has repeating entries, e.g. the values of several command arguments are the same.\n\n### Expected behavior\n\nNo issue is detected.\n\n### Reproduction template\n\n```json\r\n{\r\n \"AWSTemplateFormatVersion\": \"2010-09-09\",\r\n \"Description\": \"This template deploys an ECS task definition.\",\r\n \"Resources\": {\r\n \"MyECSTaskDefinition\": {\r\n \"Type\": \"AWS::ECS::TaskDefinition\",\r\n \"Properties\": {\r\n \"ContainerDefinitions\": [\r\n {\r\n \"Command\": [\r\n \"do_something\",\r\n \"--foo\",\r\n \"1\",\r\n \"--bar\",\r\n \"1\"\r\n ],\r\n \"Image\": \"my-image\",\r\n \"Name\": \"my-task\"\r\n }\r\n ]\r\n }\r\n }\r\n }\r\n}\r\n```\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport hashlib\nimport json\n\nfrom cfnlint.helpers import RESOURCE_SPECS\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\n\n\nclass ListDuplicatesAllowed(CloudFormationLintRule):\n \"\"\"Check if duplicates exist in a List\"\"\"\n\n id = \"I3037\"\n shortdesc = \"Check if a list that allows duplicates has any duplicates\"\n description = (\n \"Certain lists support duplicate items.\"\n \"Provide an alert when list of strings or numbers have repeats.\"\n )\n source_url = \"https://github.com/aws-cloudformation/cfn-python-lint/blob/main/docs/rules.md#rules-1\"\n tags = [\"resources\", \"property\", \"list\"]\n\n def initialize(self, cfn):\n \"\"\"Initialize the rule\"\"\"\n for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get(\n \"ResourceTypes\"\n ):\n self.resource_property_types.append(resource_type_spec)\n for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get(\n \"PropertyTypes\"\n ):\n self.resource_sub_property_types.append(property_type_spec)\n\n def _check_duplicates(self, values, path, scenario=None):\n \"\"\"Check for Duplicates\"\"\"\n matches = []\n\n list_items = []\n if isinstance(values, list):\n for index, value in enumerate(values):\n value_hash = hashlib.sha1(\n json.dumps(value, sort_keys=True).encode(\"utf-8\")\n ).hexdigest()\n if value_hash in list_items:\n if not scenario:\n message = \"List has a duplicate value at {0}\"\n matches.append(\n RuleMatch(\n path + [index],\n message.format(\"/\".join(map(str, path + [index]))),\n )\n )\n else:\n scenario_text = \" and \".join(\n [f'condition \"{k}\" is {v}' for (k, v) in scenario.items()]\n )\n message = \"List has a duplicate value at {0} when {1}\"\n matches.append(\n RuleMatch(\n path,\n message.format(\"/\".join(map(str, path)), scenario_text),\n )\n )\n\n list_items.append(value_hash)\n\n return matches\n\n def check_duplicates(self, values, path, cfn):\n \"\"\"Check for duplicates\"\"\"\n matches = []\n\n if isinstance(values, list):\n matches.extend(self._check_duplicates(values, path))\n elif isinstance(values, dict):\n props = cfn.get_object_without_conditions(values)\n for prop in props:\n matches.extend(\n self._check_duplicates(\n prop.get(\"Object\"), path, prop.get(\"Scenario\")\n )\n )\n\n return matches\n\n def check(self, cfn, properties, value_specs, path):\n \"\"\"Check itself\"\"\"\n matches = []\n for p_value, p_path in properties.items_safe(path[:]):\n for prop in p_value:\n if prop in value_specs:\n property_type = value_specs.get(prop).get(\"Type\")\n primitive_type = value_specs.get(prop).get(\"PrimitiveItemType\")\n duplicates_allowed = value_specs.get(prop).get(\n \"DuplicatesAllowed\", False\n )\n if (\n property_type == \"List\"\n and duplicates_allowed\n and primitive_type in [\"String\", \"Integer\"]\n ):\n matches.extend(\n self.check_duplicates(p_value[prop], p_path + [prop], cfn)\n )\n\n return matches\n\n def match_resource_sub_properties(self, properties, property_type, path, cfn):\n \"\"\"Match for sub properties\"\"\"\n matches = []\n\n specs = (\n RESOURCE_SPECS.get(cfn.regions[0])\n .get(\"PropertyTypes\")\n .get(property_type, {})\n .get(\"Properties\", {})\n )\n matches.extend(self.check(cfn, properties, specs, path))\n\n return matches\n\n def match_resource_properties(self, properties, resource_type, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = []\n\n specs = (\n RESOURCE_SPECS.get(cfn.regions[0])\n .get(\"ResourceTypes\")\n .get(resource_type, {})\n .get(\"Properties\", {})\n )\n matches.extend(self.check(cfn, properties, specs, path))\n\n return matches\n", "path": "src/cfnlint/rules/resources/properties/ListDuplicatesAllowed.py"}], "after_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport hashlib\nimport json\n\nfrom cfnlint.helpers import RESOURCE_SPECS\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\n\n\nclass ListDuplicatesAllowed(CloudFormationLintRule):\n \"\"\"Check if duplicates exist in a List\"\"\"\n\n id = \"I3037\"\n shortdesc = \"Check if a list that allows duplicates has any duplicates\"\n description = (\n \"Certain lists support duplicate items.\"\n \"Provide an alert when list of strings or numbers have repeats.\"\n )\n source_url = \"https://github.com/aws-cloudformation/cfn-python-lint/blob/main/docs/rules.md#rules-1\"\n tags = [\"resources\", \"property\", \"list\"]\n\n def __init__(self):\n super().__init__()\n self.exceptions = [\"Command\"]\n\n def initialize(self, cfn):\n \"\"\"Initialize the rule\"\"\"\n for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get(\n \"ResourceTypes\"\n ):\n self.resource_property_types.append(resource_type_spec)\n for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get(\n \"PropertyTypes\"\n ):\n self.resource_sub_property_types.append(property_type_spec)\n\n def _check_duplicates(self, values, path, scenario=None):\n \"\"\"Check for Duplicates\"\"\"\n matches = []\n\n list_items = []\n if isinstance(values, list):\n for index, value in enumerate(values):\n value_hash = hashlib.sha1(\n json.dumps(value, sort_keys=True).encode(\"utf-8\")\n ).hexdigest()\n if value_hash in list_items:\n if not scenario:\n message = \"List has a duplicate value at {0}\"\n matches.append(\n RuleMatch(\n path + [index],\n message.format(\"/\".join(map(str, path + [index]))),\n )\n )\n else:\n scenario_text = \" and \".join(\n [f'condition \"{k}\" is {v}' for (k, v) in scenario.items()]\n )\n message = \"List has a duplicate value at {0} when {1}\"\n matches.append(\n RuleMatch(\n path,\n message.format(\"/\".join(map(str, path)), scenario_text),\n )\n )\n\n list_items.append(value_hash)\n\n return matches\n\n def check_duplicates(self, values, path, cfn):\n \"\"\"Check for duplicates\"\"\"\n matches = []\n\n if path[-1] in self.exceptions:\n return matches\n if isinstance(values, list):\n matches.extend(self._check_duplicates(values, path))\n elif isinstance(values, dict):\n props = cfn.get_object_without_conditions(values)\n for prop in props:\n if prop in self.exceptions:\n continue\n matches.extend(\n self._check_duplicates(\n prop.get(\"Object\"), path, prop.get(\"Scenario\")\n )\n )\n\n return matches\n\n def check(self, cfn, properties, value_specs, path):\n \"\"\"Check itself\"\"\"\n matches = []\n for p_value, p_path in properties.items_safe(path[:]):\n for prop in p_value:\n if prop in value_specs:\n property_type = value_specs.get(prop).get(\"Type\")\n primitive_type = value_specs.get(prop).get(\"PrimitiveItemType\")\n duplicates_allowed = value_specs.get(prop).get(\n \"DuplicatesAllowed\", False\n )\n if (\n property_type == \"List\"\n and duplicates_allowed\n and primitive_type in [\"String\", \"Integer\"]\n ):\n matches.extend(\n self.check_duplicates(p_value[prop], p_path + [prop], cfn)\n )\n\n return matches\n\n def match_resource_sub_properties(self, properties, property_type, path, cfn):\n \"\"\"Match for sub properties\"\"\"\n matches = []\n\n specs = (\n RESOURCE_SPECS.get(cfn.regions[0])\n .get(\"PropertyTypes\")\n .get(property_type, {})\n .get(\"Properties\", {})\n )\n matches.extend(self.check(cfn, properties, specs, path))\n\n return matches\n\n def match_resource_properties(self, properties, resource_type, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = []\n\n specs = (\n RESOURCE_SPECS.get(cfn.regions[0])\n .get(\"ResourceTypes\")\n .get(resource_type, {})\n .get(\"Properties\", {})\n )\n matches.extend(self.check(cfn, properties, specs, path))\n\n return matches\n", "path": "src/cfnlint/rules/resources/properties/ListDuplicatesAllowed.py"}]}
| 1,738 | 287 |
gh_patches_debug_8210
|
rasdani/github-patches
|
git_diff
|
ivy-llc__ivy-16201
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cosh
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/paddle/tensor/tensor.py`
Content:
```
1 # local
2 import ivy
3 import ivy.functional.frontends.paddle as paddle_frontend
4 from ivy.functional.frontends.paddle.func_wrapper import (
5 _to_ivy_array,
6 )
7 from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
8
9
10 class Tensor:
11 def __init__(self, array, dtype=None, place="cpu", stop_gradient=True):
12 self._ivy_array = (
13 ivy.array(array, dtype=dtype, device=place)
14 if not isinstance(array, ivy.Array)
15 else array
16 )
17 self._dtype = dtype
18 self._place = place
19 self._stop_gradient = stop_gradient
20
21 def __repr__(self):
22 return (
23 str(self._ivy_array.__repr__())
24 .replace("ivy.array", "ivy.frontends.paddle.Tensor")
25 .replace("dev", "place")
26 )
27
28 # Properties #
29 # ---------- #
30
31 @property
32 def ivy_array(self):
33 return self._ivy_array
34
35 @property
36 def place(self):
37 return self.ivy_array.device
38
39 @property
40 def dtype(self):
41 return self._ivy_array.dtype
42
43 @property
44 def shape(self):
45 return self._ivy_array.shape
46
47 @property
48 def ndim(self):
49 return self.dim()
50
51 # Setters #
52 # --------#
53
54 @ivy_array.setter
55 def ivy_array(self, array):
56 self._ivy_array = (
57 ivy.array(array) if not isinstance(array, ivy.Array) else array
58 )
59
60 # Special Methods #
61 # -------------------#
62
63 def __getitem__(self, item):
64 ivy_args = ivy.nested_map([self, item], _to_ivy_array)
65 ret = ivy.get_item(*ivy_args)
66 return paddle_frontend.Tensor(ret)
67
68 def __setitem__(self, item, value):
69 item, value = ivy.nested_map([item, value], _to_ivy_array)
70 self.ivy_array[item] = value
71
72 def __iter__(self):
73 if self.ndim == 0:
74 raise TypeError("iteration over a 0-d tensor not supported")
75 for i in range(self.shape[0]):
76 yield self[i]
77
78 # Instance Methods #
79 # ---------------- #
80
81 def reshape(self, *args, shape=None):
82 if args and shape:
83 raise TypeError("reshape() got multiple values for argument 'shape'")
84 if shape is not None:
85 return paddle_frontend.reshape(self._ivy_array, shape)
86 if args:
87 if isinstance(args[0], (tuple, list)):
88 shape = args[0]
89 return paddle_frontend.reshape(self._ivy_array, shape)
90 else:
91 return paddle_frontend.reshape(self._ivy_array, args)
92 return paddle_frontend.reshape(self._ivy_array)
93
94 def dim(self):
95 return self.ivy_array.ndim
96
97 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
98 def abs(self):
99 return paddle_frontend.abs(self)
100
101 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
102 def ceil(self):
103 return paddle_frontend.ceil(self)
104
105 @with_unsupported_dtypes({"2.4.2 and below": ("float16",)}, "paddle")
106 def asinh(self, name=None):
107 return ivy.asinh(self._ivy_array)
108
109 @with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle")
110 def asin(self, name=None):
111 return ivy.asin(self._ivy_array)
112
113 @with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle")
114 def log(self, name=None):
115 return ivy.log(self._ivy_array)
116
117 @with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle")
118 def sin(self, name=None):
119 return ivy.sin(self._ivy_array)
120
121 @with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle")
122 def sinh(self, name=None):
123 return ivy.sinh(self._ivy_array)
124
125 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
126 def argmax(self, axis=None, keepdim=False, dtype=None, name=None):
127 return ivy.argmax(self._ivy_array, axis=axis, keepdims=keepdim, dtype=dtype)
128
129 @with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle")
130 def cos(self, name=None):
131 return ivy.cos(self._ivy_array)
132
133 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
134 def exp(self, name=None):
135 return ivy.exp(self._ivy_array)
136
137 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
138 def log10(self, name=None):
139 return ivy.log10(self._ivy_array)
140
141 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
142 def argsort(self, axis=-1, descending=False, name=None):
143 return ivy.argsort(self._ivy_array, axis=axis, descending=descending)
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ivy/functional/frontends/paddle/tensor/tensor.py b/ivy/functional/frontends/paddle/tensor/tensor.py
--- a/ivy/functional/frontends/paddle/tensor/tensor.py
+++ b/ivy/functional/frontends/paddle/tensor/tensor.py
@@ -110,6 +110,10 @@
def asin(self, name=None):
return ivy.asin(self._ivy_array)
+ @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
+ def cosh(self, name=None):
+ return ivy.cosh(self._ivy_array)
+
@with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle")
def log(self, name=None):
return ivy.log(self._ivy_array)
|
{"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/tensor.py b/ivy/functional/frontends/paddle/tensor/tensor.py\n--- a/ivy/functional/frontends/paddle/tensor/tensor.py\n+++ b/ivy/functional/frontends/paddle/tensor/tensor.py\n@@ -110,6 +110,10 @@\n def asin(self, name=None):\r\n return ivy.asin(self._ivy_array)\r\n \r\n+ @with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n+ def cosh(self, name=None):\r\n+ return ivy.cosh(self._ivy_array)\r\n+\r\n @with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def log(self, name=None):\r\n return ivy.log(self._ivy_array)\n", "issue": "cosh\n\n", "before_files": [{"content": "# local\r\nimport ivy\r\nimport ivy.functional.frontends.paddle as paddle_frontend\r\nfrom ivy.functional.frontends.paddle.func_wrapper import (\r\n _to_ivy_array,\r\n)\r\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\r\n\r\n\r\nclass Tensor:\r\n def __init__(self, array, dtype=None, place=\"cpu\", stop_gradient=True):\r\n self._ivy_array = (\r\n ivy.array(array, dtype=dtype, device=place)\r\n if not isinstance(array, ivy.Array)\r\n else array\r\n )\r\n self._dtype = dtype\r\n self._place = place\r\n self._stop_gradient = stop_gradient\r\n\r\n def __repr__(self):\r\n return (\r\n str(self._ivy_array.__repr__())\r\n .replace(\"ivy.array\", \"ivy.frontends.paddle.Tensor\")\r\n .replace(\"dev\", \"place\")\r\n )\r\n\r\n # Properties #\r\n # ---------- #\r\n\r\n @property\r\n def ivy_array(self):\r\n return self._ivy_array\r\n\r\n @property\r\n def place(self):\r\n return self.ivy_array.device\r\n\r\n @property\r\n def dtype(self):\r\n return self._ivy_array.dtype\r\n\r\n @property\r\n def shape(self):\r\n return self._ivy_array.shape\r\n\r\n @property\r\n def ndim(self):\r\n return self.dim()\r\n\r\n # Setters #\r\n # --------#\r\n\r\n @ivy_array.setter\r\n def ivy_array(self, array):\r\n self._ivy_array = (\r\n ivy.array(array) if not isinstance(array, ivy.Array) else array\r\n )\r\n\r\n # Special Methods #\r\n # -------------------#\r\n\r\n def __getitem__(self, item):\r\n ivy_args = ivy.nested_map([self, item], _to_ivy_array)\r\n ret = ivy.get_item(*ivy_args)\r\n return paddle_frontend.Tensor(ret)\r\n\r\n def __setitem__(self, item, value):\r\n item, value = ivy.nested_map([item, value], _to_ivy_array)\r\n self.ivy_array[item] = value\r\n\r\n def __iter__(self):\r\n if self.ndim == 0:\r\n raise TypeError(\"iteration over a 0-d tensor not supported\")\r\n for i in range(self.shape[0]):\r\n yield self[i]\r\n\r\n # Instance Methods #\r\n # ---------------- #\r\n\r\n def reshape(self, *args, shape=None):\r\n if args and shape:\r\n raise TypeError(\"reshape() got multiple values for argument 'shape'\")\r\n if shape is not None:\r\n return paddle_frontend.reshape(self._ivy_array, shape)\r\n if args:\r\n if isinstance(args[0], (tuple, list)):\r\n shape = args[0]\r\n return paddle_frontend.reshape(self._ivy_array, shape)\r\n else:\r\n return paddle_frontend.reshape(self._ivy_array, args)\r\n return paddle_frontend.reshape(self._ivy_array)\r\n\r\n def dim(self):\r\n return self.ivy_array.ndim\r\n\r\n @with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def abs(self):\r\n return paddle_frontend.abs(self)\r\n\r\n @with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def ceil(self):\r\n return paddle_frontend.ceil(self)\r\n\r\n @with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\",)}, \"paddle\")\r\n def asinh(self, name=None):\r\n return ivy.asinh(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def asin(self, name=None):\r\n return ivy.asin(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def log(self, name=None):\r\n return ivy.log(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def sin(self, name=None):\r\n return ivy.sin(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def sinh(self, name=None):\r\n return ivy.sinh(self._ivy_array)\r\n\r\n @with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def argmax(self, axis=None, keepdim=False, dtype=None, name=None):\r\n return ivy.argmax(self._ivy_array, axis=axis, keepdims=keepdim, dtype=dtype)\r\n\r\n @with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def cos(self, name=None):\r\n return ivy.cos(self._ivy_array)\r\n\r\n @with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def exp(self, name=None):\r\n return ivy.exp(self._ivy_array)\r\n\r\n @with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def log10(self, name=None):\r\n return ivy.log10(self._ivy_array)\r\n\r\n @with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def argsort(self, axis=-1, descending=False, name=None):\r\n return ivy.argsort(self._ivy_array, axis=axis, descending=descending)\r\n", "path": "ivy/functional/frontends/paddle/tensor/tensor.py"}], "after_files": [{"content": "# local\r\nimport ivy\r\nimport ivy.functional.frontends.paddle as paddle_frontend\r\nfrom ivy.functional.frontends.paddle.func_wrapper import (\r\n _to_ivy_array,\r\n)\r\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\r\n\r\n\r\nclass Tensor:\r\n def __init__(self, array, dtype=None, place=\"cpu\", stop_gradient=True):\r\n self._ivy_array = (\r\n ivy.array(array, dtype=dtype, device=place)\r\n if not isinstance(array, ivy.Array)\r\n else array\r\n )\r\n self._dtype = dtype\r\n self._place = place\r\n self._stop_gradient = stop_gradient\r\n\r\n def __repr__(self):\r\n return (\r\n str(self._ivy_array.__repr__())\r\n .replace(\"ivy.array\", \"ivy.frontends.paddle.Tensor\")\r\n .replace(\"dev\", \"place\")\r\n )\r\n\r\n # Properties #\r\n # ---------- #\r\n\r\n @property\r\n def ivy_array(self):\r\n return self._ivy_array\r\n\r\n @property\r\n def place(self):\r\n return self.ivy_array.device\r\n\r\n @property\r\n def dtype(self):\r\n return self._ivy_array.dtype\r\n\r\n @property\r\n def shape(self):\r\n return self._ivy_array.shape\r\n\r\n @property\r\n def ndim(self):\r\n return self.dim()\r\n\r\n # Setters #\r\n # --------#\r\n\r\n @ivy_array.setter\r\n def ivy_array(self, array):\r\n self._ivy_array = (\r\n ivy.array(array) if not isinstance(array, ivy.Array) else array\r\n )\r\n\r\n # Special Methods #\r\n # -------------------#\r\n\r\n def __getitem__(self, item):\r\n ivy_args = ivy.nested_map([self, item], _to_ivy_array)\r\n ret = ivy.get_item(*ivy_args)\r\n return paddle_frontend.Tensor(ret)\r\n\r\n def __setitem__(self, item, value):\r\n item, value = ivy.nested_map([item, value], _to_ivy_array)\r\n self.ivy_array[item] = value\r\n\r\n def __iter__(self):\r\n if self.ndim == 0:\r\n raise TypeError(\"iteration over a 0-d tensor not supported\")\r\n for i in range(self.shape[0]):\r\n yield self[i]\r\n\r\n # Instance Methods #\r\n # ---------------- #\r\n\r\n def reshape(self, *args, shape=None):\r\n if args and shape:\r\n raise TypeError(\"reshape() got multiple values for argument 'shape'\")\r\n if shape is not None:\r\n return paddle_frontend.reshape(self._ivy_array, shape)\r\n if args:\r\n if isinstance(args[0], (tuple, list)):\r\n shape = args[0]\r\n return paddle_frontend.reshape(self._ivy_array, shape)\r\n else:\r\n return paddle_frontend.reshape(self._ivy_array, args)\r\n return paddle_frontend.reshape(self._ivy_array)\r\n\r\n def dim(self):\r\n return self.ivy_array.ndim\r\n\r\n @with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def abs(self):\r\n return paddle_frontend.abs(self)\r\n\r\n @with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def ceil(self):\r\n return paddle_frontend.ceil(self)\r\n\r\n @with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\",)}, \"paddle\")\r\n def asinh(self, name=None):\r\n return ivy.asinh(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def asin(self, name=None):\r\n return ivy.asin(self._ivy_array)\r\n\r\n @with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def cosh(self, name=None):\r\n return ivy.cosh(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def log(self, name=None):\r\n return ivy.log(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def sin(self, name=None):\r\n return ivy.sin(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def sinh(self, name=None):\r\n return ivy.sinh(self._ivy_array)\r\n\r\n @with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def argmax(self, axis=None, keepdim=False, dtype=None, name=None):\r\n return ivy.argmax(self._ivy_array, axis=axis, keepdims=keepdim, dtype=dtype)\r\n\r\n @with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def cos(self, name=None):\r\n return ivy.cos(self._ivy_array)\r\n\r\n @with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def exp(self, name=None):\r\n return ivy.exp(self._ivy_array)\r\n\r\n @with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def log10(self, name=None):\r\n return ivy.log10(self._ivy_array)\r\n\r\n @with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def argsort(self, axis=-1, descending=False, name=None):\r\n return ivy.argsort(self._ivy_array, axis=axis, descending=descending)\r\n", "path": "ivy/functional/frontends/paddle/tensor/tensor.py"}]}
| 1,851 | 205 |
gh_patches_debug_30550
|
rasdani/github-patches
|
git_diff
|
localstack__localstack-6558
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bug: SQS performance regression with ASF
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Behavior
Hi, I'm testing the image `localstack/localstack:0.14.5`, and setting `LEGACY_EDGE_PROXY=1` makes the following cleanup code much slower:
`for queue in client.list_queues()['QueueUrls']: client.purge_queue(QueueUrl=queue)`
`LEGACY_EDGE_PROXY=1`: `56.2 ms ± 2.9 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)`
`LEGACY_EDGE_PROXY=0`: `258 ms ± 17.6 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)`
### Expected Behavior
I expected performance to improve or not to regress.
### How are you starting LocalStack?
With a docker-compose file
### Steps To Reproduce
# Benchmark setup:
Docker-compose file:
```
version: "3.9"
services:
localstack:
image: localstack/localstack:latest
environment:
- DEFAULT_REGION=eu-west-1
- SERVICES=s3,sns,sqs
- LEGACY_EDGE_PROXY=1 # or 0
ipc: host
```
```
# docker-compose rm -f localstack && docker-compose up localstack
```
```
import boto3
client = boto3.resource(
'sqs',
endpoint_url="http://localstack:4566/",
region_name='eu-west-1',
aws_secret_access_key="x",
aws_access_key_id="x",
use_ssl=False,
).meta.client
for i in range(10):
client.create_queue(QueueName=str(i))
queues = client.list_queues()['QueueUrls']
assert len(queues) == 10
```
```
In [3]: %timeit for queue in client.list_queues()['QueueUrls']: client.purge_queue(QueueUrl=queue)
```
### Environment
```markdown
- OS: Ubuntu 22.04 LTS, Linux 5.15.0-40-generic #43-Ubuntu SMP Wed Jun 15 12:54:21 UTC 2022 x86_64
- docker-compose 1.29.2
- Docker: 20.10.12, build 20.10.12-0ubuntu4
- LocalStack: 0.14.5
```
### Anything else?
Results don't improve over time, it's not a cold start issue.
https://github.com/localstack/localstack/issues/6398
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `localstack/aws/app.py`
Content:
```
1 from localstack.aws import handlers
2 from localstack.aws.handlers.metric_handler import MetricHandler
3 from localstack.aws.handlers.service_plugin import ServiceLoader
4 from localstack.services.plugins import SERVICE_PLUGINS, ServiceManager, ServicePluginManager
5
6 from .gateway import Gateway
7 from .handlers.fallback import EmptyResponseHandler
8 from .handlers.service import ServiceRequestRouter
9
10
11 class LocalstackAwsGateway(Gateway):
12 def __init__(self, service_manager: ServiceManager = None) -> None:
13 super().__init__()
14
15 # basic server components
16 self.service_manager = service_manager or ServicePluginManager()
17 self.service_request_router = ServiceRequestRouter()
18 # lazy-loads services into the router
19 load_service = ServiceLoader(self.service_manager, self.service_request_router)
20
21 metric_collector = MetricHandler()
22 # the main request handler chain
23 self.request_handlers.extend(
24 [
25 handlers.push_request_context,
26 metric_collector.create_metric_handler_item,
27 handlers.parse_service_name, # enforce_cors and content_decoder depend on the service name
28 handlers.enforce_cors,
29 handlers.content_decoder,
30 handlers.serve_localstack_resources, # try to serve internal resources in /_localstack first
31 handlers.serve_default_listeners, # legacy proxy default listeners
32 handlers.serve_edge_router_rules,
33 # start aws handler chain
34 handlers.inject_auth_header_if_missing,
35 handlers.add_region_from_header,
36 handlers.add_account_id,
37 handlers.parse_service_request,
38 metric_collector.record_parsed_request,
39 handlers.serve_custom_service_request_handlers,
40 load_service, # once we have the service request we can make sure we load the service
41 self.service_request_router, # once we know the service is loaded we can route the request
42 # if the chain is still running, set an empty response
43 EmptyResponseHandler(404, b'{"message": "Not Found"}'),
44 ]
45 )
46
47 # exception handlers in the chain
48 self.exception_handlers.extend(
49 [
50 handlers.log_exception,
51 handlers.handle_service_exception,
52 handlers.handle_internal_failure,
53 ]
54 )
55
56 # response post-processing
57 self.response_handlers.extend(
58 [
59 handlers.parse_service_response,
60 handlers.run_custom_response_handlers,
61 handlers.add_cors_response_headers,
62 handlers.log_response,
63 handlers.count_service_request,
64 handlers.pop_request_context,
65 metric_collector.update_metric_collection,
66 ]
67 )
68
69
70 def main():
71 """
72 Serve the LocalstackGateway with the default configuration directly through hypercorn. This is mostly for
73 development purposes and documentation on how to serve the Gateway.
74 """
75 from .serving.hypercorn import serve
76
77 use_ssl = True
78 port = 4566
79
80 # serve the LocalStackAwsGateway in a dev app
81 from localstack.utils.bootstrap import setup_logging
82
83 setup_logging()
84
85 if use_ssl:
86 from localstack.services.generic_proxy import (
87 GenericProxy,
88 install_predefined_cert_if_available,
89 )
90
91 install_predefined_cert_if_available()
92 _, cert_file_name, key_file_name = GenericProxy.create_ssl_cert(serial_number=port)
93 ssl_creds = (cert_file_name, key_file_name)
94 else:
95 ssl_creds = None
96
97 gw = LocalstackAwsGateway(SERVICE_PLUGINS)
98
99 serve(gw, use_reloader=True, port=port, ssl_creds=ssl_creds)
100
101
102 if __name__ == "__main__":
103 main()
104
```
Path: `localstack/aws/handlers/legacy.py`
Content:
```
1 """ Handlers for compatibility with legacy edge proxy and the quart http framework."""
2
3 import logging
4 import re
5 from typing import Mapping
6
7 from requests import Response as RequestsResponse
8
9 from localstack.constants import HEADER_LOCALSTACK_EDGE_URL, HEADER_LOCALSTACK_REQUEST_URL
10 from localstack.http import Response
11 from localstack.http.request import restore_payload
12 from localstack.services.generic_proxy import ProxyListener, modify_and_forward
13
14 from ..api import RequestContext
15 from ..chain import Handler, HandlerChain
16 from .routes import RouterHandler
17
18 LOG = logging.getLogger(__name__)
19
20
21 def push_request_context(_chain: HandlerChain, context: RequestContext, _response: Response):
22 # hack for legacy compatibility. various parts of localstack access the global flask/quart/our own request
23 # context. since we're neither in a flask nor a quart context, we're pushing our own context object into their
24 # proxy objects, which is terrible, but works because mostly code just accesses "context.request", so we don't
25 # have to bother pushing a real quart/flask context.
26 import flask.globals
27 import quart.globals
28
29 from localstack.utils.aws import request_context
30
31 quart.globals._request_ctx_stack.push(context)
32 flask.globals._request_ctx_stack.push(context)
33 request_context.THREAD_LOCAL.request_context = context.request
34
35
36 def pop_request_context(_chain: HandlerChain, _context: RequestContext, _response: Response):
37 # hack for legacy compatibility
38 import flask.globals
39 import quart.globals
40
41 from localstack.utils.aws import request_context
42
43 quart.globals._request_ctx_stack.pop()
44 flask.globals._request_ctx_stack.pop()
45 request_context.THREAD_LOCAL.request_context = None
46
47
48 class EdgeRouterHandler(RouterHandler):
49 def __init__(self, respond_not_found=False) -> None:
50 from localstack.services.edge import ROUTER
51
52 super().__init__(ROUTER, respond_not_found)
53
54
55 class GenericProxyHandler(Handler):
56 """
57 This handler maps HandlerChain requests to the generic proxy ProxyListener interface `forward_request`.
58 """
59
60 def __call__(self, chain: HandlerChain, context: RequestContext, response: Response):
61 request = context.request
62
63 # a werkzeug Request consumes form/multipart data from the socket stream, so we need to restore the payload here
64 data = restore_payload(request)
65
66 # TODO: rethink whether this proxy handling is necessary
67 context.request.headers[HEADER_LOCALSTACK_REQUEST_URL] = context.request.base_url
68
69 result = self.forward_request(
70 context,
71 method=request.method,
72 path=request.full_path if request.query_string else request.path,
73 data=data,
74 headers=request.headers,
75 )
76
77 if type(result) == int:
78 chain.respond(status_code=result)
79 return
80
81 if isinstance(result, tuple):
82 # special case for Kinesis SubscribeToShard
83 if len(result) == 2:
84 response.status_code = 200
85 response.set_response(result[0])
86 response.headers.update(dict(result[1]))
87 chain.stop()
88 return
89
90 if isinstance(result, RequestsResponse):
91 response.status_code = result.status_code
92 response.set_response(result.content)
93 # make sure headers are set after the content, so potential content-length headers are overwritten
94 response.headers.update(dict(result.headers))
95
96 # make sure content-length is re-calculated correctly, unless it's a HEAD request
97 if request.method != "HEAD":
98 length = response.calculate_content_length()
99 if length is not None:
100 response.headers["Content-Length"] = str(length)
101 chain.stop()
102 return
103
104 raise ValueError("cannot create response for result %s" % result)
105
106 def forward_request(
107 self, context: RequestContext, method: str, path: str, data: bytes, headers: Mapping
108 ):
109 raise NotImplementedError
110
111
112 class LegacyPluginHandler(GenericProxyHandler):
113 """
114 This adapter exposes Services that are developed as ProxyListener as Handler.
115 """
116
117 def forward_request(
118 self, context: RequestContext, method: str, path: str, data: bytes, headers: Mapping
119 ):
120 from localstack.services.edge import do_forward_request
121
122 # TODO: rethink whether this proxy handling is necessary
123 request = context.request
124 orig_req_url = request.headers.pop(HEADER_LOCALSTACK_REQUEST_URL, "")
125 request.headers[HEADER_LOCALSTACK_EDGE_URL] = (
126 re.sub(r"^([^:]+://[^/]+).*", r"\1", orig_req_url) or request.host_url
127 )
128
129 return do_forward_request(
130 api=context.service.service_name,
131 method=method,
132 path=path,
133 data=data,
134 headers=headers,
135 port=None,
136 )
137
138
139 class _NoHandlerCalled(Exception):
140 pass
141
142
143 class _DummyProxyListener(ProxyListener):
144 def forward_request(self, method, path, data, headers):
145 raise _NoHandlerCalled
146
147
148 class DefaultListenerHandler(GenericProxyHandler):
149 """
150 Adapter that exposes the ProxyListener.DEFAULT_LISTENERS as a Handler.
151 """
152
153 def __call__(self, chain: HandlerChain, context: RequestContext, response: Response):
154 if not ProxyListener.DEFAULT_LISTENERS:
155 return
156
157 try:
158 super(DefaultListenerHandler, self).__call__(chain, context, response)
159 except _NoHandlerCalled:
160 # may be raised by the _DummyProxyListener, which is reached if no other listener is called,
161 # in which case we don't want to return a result or stop the chain.
162 return
163
164 def forward_request(
165 self, context: RequestContext, method: str, path: str, data: bytes, headers: Mapping
166 ):
167 request = context.request
168
169 return modify_and_forward(
170 method=method,
171 path=path,
172 data_bytes=data,
173 headers=headers,
174 forward_base_url=None,
175 listeners=[_DummyProxyListener()],
176 client_address=request.remote_addr,
177 server_address=request.host,
178 )
179
```
Path: `localstack/aws/handlers/__init__.py`
Content:
```
1 """ A set of common handlers to build an AWS server application."""
2
3 from .. import chain
4 from . import analytics, auth, codec, cors, fallback, internal, legacy, logging, region, service
5
6 enforce_cors = cors.CorsEnforcer()
7 add_cors_response_headers = cors.CorsResponseEnricher()
8 content_decoder = codec.ContentDecoder()
9 parse_service_name = service.ServiceNameParser()
10 parse_service_request = service.ServiceRequestParser()
11 add_account_id = auth.AccountIdEnricher()
12 inject_auth_header_if_missing = auth.MissingAuthHeaderInjector()
13 add_region_from_header = region.RegionContextEnricher()
14 log_exception = logging.ExceptionLogger()
15 log_response = logging.ResponseLogger()
16 count_service_request = analytics.ServiceRequestCounter()
17 handle_service_exception = service.ServiceExceptionSerializer()
18 handle_internal_failure = fallback.InternalFailureHandler()
19 serve_custom_service_request_handlers = chain.CompositeHandler()
20 serve_localstack_resources = internal.LocalstackResourceHandler()
21 run_custom_response_handlers = chain.CompositeResponseHandler()
22 parse_service_response = service.ServiceResponseParser()
23 # legacy compatibility handlers
24 serve_edge_router_rules = legacy.EdgeRouterHandler()
25 serve_default_listeners = legacy.DefaultListenerHandler()
26 pop_request_context = legacy.pop_request_context
27 push_request_context = legacy.push_request_context
28
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/localstack/aws/app.py b/localstack/aws/app.py
--- a/localstack/aws/app.py
+++ b/localstack/aws/app.py
@@ -57,6 +57,7 @@
self.response_handlers.extend(
[
handlers.parse_service_response,
+ handlers.set_close_connection_header,
handlers.run_custom_response_handlers,
handlers.add_cors_response_headers,
handlers.log_response,
diff --git a/localstack/aws/handlers/__init__.py b/localstack/aws/handlers/__init__.py
--- a/localstack/aws/handlers/__init__.py
+++ b/localstack/aws/handlers/__init__.py
@@ -23,5 +23,6 @@
# legacy compatibility handlers
serve_edge_router_rules = legacy.EdgeRouterHandler()
serve_default_listeners = legacy.DefaultListenerHandler()
+set_close_connection_header = legacy.set_close_connection_header
pop_request_context = legacy.pop_request_context
push_request_context = legacy.push_request_context
diff --git a/localstack/aws/handlers/legacy.py b/localstack/aws/handlers/legacy.py
--- a/localstack/aws/handlers/legacy.py
+++ b/localstack/aws/handlers/legacy.py
@@ -45,6 +45,18 @@
request_context.THREAD_LOCAL.request_context = None
+def set_close_connection_header(_chain: HandlerChain, context: RequestContext, response: Response):
+ """This is a hack to work around performance issues with h11 and boto. See
+ https://github.com/localstack/localstack/issues/6557"""
+ if conn := context.request.headers.get("Connection"):
+ if conn.lower() == "keep-alive":
+ # don't set Connection: close header if keep-alive is explicitly asked for
+ return
+
+ if "Connection" not in response.headers:
+ response.headers["Connection"] = "close"
+
+
class EdgeRouterHandler(RouterHandler):
def __init__(self, respond_not_found=False) -> None:
from localstack.services.edge import ROUTER
|
{"golden_diff": "diff --git a/localstack/aws/app.py b/localstack/aws/app.py\n--- a/localstack/aws/app.py\n+++ b/localstack/aws/app.py\n@@ -57,6 +57,7 @@\n self.response_handlers.extend(\n [\n handlers.parse_service_response,\n+ handlers.set_close_connection_header,\n handlers.run_custom_response_handlers,\n handlers.add_cors_response_headers,\n handlers.log_response,\ndiff --git a/localstack/aws/handlers/__init__.py b/localstack/aws/handlers/__init__.py\n--- a/localstack/aws/handlers/__init__.py\n+++ b/localstack/aws/handlers/__init__.py\n@@ -23,5 +23,6 @@\n # legacy compatibility handlers\n serve_edge_router_rules = legacy.EdgeRouterHandler()\n serve_default_listeners = legacy.DefaultListenerHandler()\n+set_close_connection_header = legacy.set_close_connection_header\n pop_request_context = legacy.pop_request_context\n push_request_context = legacy.push_request_context\ndiff --git a/localstack/aws/handlers/legacy.py b/localstack/aws/handlers/legacy.py\n--- a/localstack/aws/handlers/legacy.py\n+++ b/localstack/aws/handlers/legacy.py\n@@ -45,6 +45,18 @@\n request_context.THREAD_LOCAL.request_context = None\n \n \n+def set_close_connection_header(_chain: HandlerChain, context: RequestContext, response: Response):\n+ \"\"\"This is a hack to work around performance issues with h11 and boto. See\n+ https://github.com/localstack/localstack/issues/6557\"\"\"\n+ if conn := context.request.headers.get(\"Connection\"):\n+ if conn.lower() == \"keep-alive\":\n+ # don't set Connection: close header if keep-alive is explicitly asked for\n+ return\n+\n+ if \"Connection\" not in response.headers:\n+ response.headers[\"Connection\"] = \"close\"\n+\n+\n class EdgeRouterHandler(RouterHandler):\n def __init__(self, respond_not_found=False) -> None:\n from localstack.services.edge import ROUTER\n", "issue": "bug: SQS performance regression with ASF\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues\n\n### Current Behavior\n\nHi, I'm testing the image `localstack/localstack:0.14.5`, and setting `LEGACY_EDGE_PROXY=1` makes the following cleanup code much slower:\r\n\r\n`for queue in client.list_queues()['QueueUrls']: client.purge_queue(QueueUrl=queue)`\r\n\r\n`LEGACY_EDGE_PROXY=1`: `56.2 ms \u00b1 2.9 ms per loop (mean \u00b1 std. dev. of 7 runs, 10 loops each)`\r\n\r\n`LEGACY_EDGE_PROXY=0`: `258 ms \u00b1 17.6 ms per loop (mean \u00b1 std. dev. of 7 runs, 1 loop each)`\r\n\n\n### Expected Behavior\n\nI expected performance to improve or not to regress.\n\n### How are you starting LocalStack?\n\nWith a docker-compose file\n\n### Steps To Reproduce\n\n# Benchmark setup:\r\n\r\nDocker-compose file:\r\n```\r\nversion: \"3.9\"\r\nservices:\r\n localstack:\r\n image: localstack/localstack:latest\r\n environment:\r\n - DEFAULT_REGION=eu-west-1\r\n - SERVICES=s3,sns,sqs\r\n - LEGACY_EDGE_PROXY=1 # or 0\r\n ipc: host\r\n```\r\n\r\n```\r\n# docker-compose rm -f localstack && docker-compose up localstack\r\n```\r\n\r\n```\r\nimport boto3\r\n\r\nclient = boto3.resource(\r\n 'sqs',\r\n endpoint_url=\"http://localstack:4566/\",\r\n region_name='eu-west-1',\r\n aws_secret_access_key=\"x\",\r\n aws_access_key_id=\"x\",\r\n use_ssl=False,\r\n).meta.client\r\n\r\nfor i in range(10):\r\n client.create_queue(QueueName=str(i))\r\n\r\nqueues = client.list_queues()['QueueUrls']\r\nassert len(queues) == 10\r\n```\r\n\r\n```\r\nIn [3]: %timeit for queue in client.list_queues()['QueueUrls']: client.purge_queue(QueueUrl=queue)\r\n```\n\n### Environment\n\n```markdown\n- OS: Ubuntu 22.04 LTS, Linux 5.15.0-40-generic #43-Ubuntu SMP Wed Jun 15 12:54:21 UTC 2022 x86_64 \r\n- docker-compose 1.29.2\r\n- Docker: 20.10.12, build 20.10.12-0ubuntu4\r\n- LocalStack: 0.14.5\n```\n\n\n### Anything else?\n\nResults don't improve over time, it's not a cold start issue.\r\n\r\nhttps://github.com/localstack/localstack/issues/6398\n", "before_files": [{"content": "from localstack.aws import handlers\nfrom localstack.aws.handlers.metric_handler import MetricHandler\nfrom localstack.aws.handlers.service_plugin import ServiceLoader\nfrom localstack.services.plugins import SERVICE_PLUGINS, ServiceManager, ServicePluginManager\n\nfrom .gateway import Gateway\nfrom .handlers.fallback import EmptyResponseHandler\nfrom .handlers.service import ServiceRequestRouter\n\n\nclass LocalstackAwsGateway(Gateway):\n def __init__(self, service_manager: ServiceManager = None) -> None:\n super().__init__()\n\n # basic server components\n self.service_manager = service_manager or ServicePluginManager()\n self.service_request_router = ServiceRequestRouter()\n # lazy-loads services into the router\n load_service = ServiceLoader(self.service_manager, self.service_request_router)\n\n metric_collector = MetricHandler()\n # the main request handler chain\n self.request_handlers.extend(\n [\n handlers.push_request_context,\n metric_collector.create_metric_handler_item,\n handlers.parse_service_name, # enforce_cors and content_decoder depend on the service name\n handlers.enforce_cors,\n handlers.content_decoder,\n handlers.serve_localstack_resources, # try to serve internal resources in /_localstack first\n handlers.serve_default_listeners, # legacy proxy default listeners\n handlers.serve_edge_router_rules,\n # start aws handler chain\n handlers.inject_auth_header_if_missing,\n handlers.add_region_from_header,\n handlers.add_account_id,\n handlers.parse_service_request,\n metric_collector.record_parsed_request,\n handlers.serve_custom_service_request_handlers,\n load_service, # once we have the service request we can make sure we load the service\n self.service_request_router, # once we know the service is loaded we can route the request\n # if the chain is still running, set an empty response\n EmptyResponseHandler(404, b'{\"message\": \"Not Found\"}'),\n ]\n )\n\n # exception handlers in the chain\n self.exception_handlers.extend(\n [\n handlers.log_exception,\n handlers.handle_service_exception,\n handlers.handle_internal_failure,\n ]\n )\n\n # response post-processing\n self.response_handlers.extend(\n [\n handlers.parse_service_response,\n handlers.run_custom_response_handlers,\n handlers.add_cors_response_headers,\n handlers.log_response,\n handlers.count_service_request,\n handlers.pop_request_context,\n metric_collector.update_metric_collection,\n ]\n )\n\n\ndef main():\n \"\"\"\n Serve the LocalstackGateway with the default configuration directly through hypercorn. This is mostly for\n development purposes and documentation on how to serve the Gateway.\n \"\"\"\n from .serving.hypercorn import serve\n\n use_ssl = True\n port = 4566\n\n # serve the LocalStackAwsGateway in a dev app\n from localstack.utils.bootstrap import setup_logging\n\n setup_logging()\n\n if use_ssl:\n from localstack.services.generic_proxy import (\n GenericProxy,\n install_predefined_cert_if_available,\n )\n\n install_predefined_cert_if_available()\n _, cert_file_name, key_file_name = GenericProxy.create_ssl_cert(serial_number=port)\n ssl_creds = (cert_file_name, key_file_name)\n else:\n ssl_creds = None\n\n gw = LocalstackAwsGateway(SERVICE_PLUGINS)\n\n serve(gw, use_reloader=True, port=port, ssl_creds=ssl_creds)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "localstack/aws/app.py"}, {"content": "\"\"\" Handlers for compatibility with legacy edge proxy and the quart http framework.\"\"\"\n\nimport logging\nimport re\nfrom typing import Mapping\n\nfrom requests import Response as RequestsResponse\n\nfrom localstack.constants import HEADER_LOCALSTACK_EDGE_URL, HEADER_LOCALSTACK_REQUEST_URL\nfrom localstack.http import Response\nfrom localstack.http.request import restore_payload\nfrom localstack.services.generic_proxy import ProxyListener, modify_and_forward\n\nfrom ..api import RequestContext\nfrom ..chain import Handler, HandlerChain\nfrom .routes import RouterHandler\n\nLOG = logging.getLogger(__name__)\n\n\ndef push_request_context(_chain: HandlerChain, context: RequestContext, _response: Response):\n # hack for legacy compatibility. various parts of localstack access the global flask/quart/our own request\n # context. since we're neither in a flask nor a quart context, we're pushing our own context object into their\n # proxy objects, which is terrible, but works because mostly code just accesses \"context.request\", so we don't\n # have to bother pushing a real quart/flask context.\n import flask.globals\n import quart.globals\n\n from localstack.utils.aws import request_context\n\n quart.globals._request_ctx_stack.push(context)\n flask.globals._request_ctx_stack.push(context)\n request_context.THREAD_LOCAL.request_context = context.request\n\n\ndef pop_request_context(_chain: HandlerChain, _context: RequestContext, _response: Response):\n # hack for legacy compatibility\n import flask.globals\n import quart.globals\n\n from localstack.utils.aws import request_context\n\n quart.globals._request_ctx_stack.pop()\n flask.globals._request_ctx_stack.pop()\n request_context.THREAD_LOCAL.request_context = None\n\n\nclass EdgeRouterHandler(RouterHandler):\n def __init__(self, respond_not_found=False) -> None:\n from localstack.services.edge import ROUTER\n\n super().__init__(ROUTER, respond_not_found)\n\n\nclass GenericProxyHandler(Handler):\n \"\"\"\n This handler maps HandlerChain requests to the generic proxy ProxyListener interface `forward_request`.\n \"\"\"\n\n def __call__(self, chain: HandlerChain, context: RequestContext, response: Response):\n request = context.request\n\n # a werkzeug Request consumes form/multipart data from the socket stream, so we need to restore the payload here\n data = restore_payload(request)\n\n # TODO: rethink whether this proxy handling is necessary\n context.request.headers[HEADER_LOCALSTACK_REQUEST_URL] = context.request.base_url\n\n result = self.forward_request(\n context,\n method=request.method,\n path=request.full_path if request.query_string else request.path,\n data=data,\n headers=request.headers,\n )\n\n if type(result) == int:\n chain.respond(status_code=result)\n return\n\n if isinstance(result, tuple):\n # special case for Kinesis SubscribeToShard\n if len(result) == 2:\n response.status_code = 200\n response.set_response(result[0])\n response.headers.update(dict(result[1]))\n chain.stop()\n return\n\n if isinstance(result, RequestsResponse):\n response.status_code = result.status_code\n response.set_response(result.content)\n # make sure headers are set after the content, so potential content-length headers are overwritten\n response.headers.update(dict(result.headers))\n\n # make sure content-length is re-calculated correctly, unless it's a HEAD request\n if request.method != \"HEAD\":\n length = response.calculate_content_length()\n if length is not None:\n response.headers[\"Content-Length\"] = str(length)\n chain.stop()\n return\n\n raise ValueError(\"cannot create response for result %s\" % result)\n\n def forward_request(\n self, context: RequestContext, method: str, path: str, data: bytes, headers: Mapping\n ):\n raise NotImplementedError\n\n\nclass LegacyPluginHandler(GenericProxyHandler):\n \"\"\"\n This adapter exposes Services that are developed as ProxyListener as Handler.\n \"\"\"\n\n def forward_request(\n self, context: RequestContext, method: str, path: str, data: bytes, headers: Mapping\n ):\n from localstack.services.edge import do_forward_request\n\n # TODO: rethink whether this proxy handling is necessary\n request = context.request\n orig_req_url = request.headers.pop(HEADER_LOCALSTACK_REQUEST_URL, \"\")\n request.headers[HEADER_LOCALSTACK_EDGE_URL] = (\n re.sub(r\"^([^:]+://[^/]+).*\", r\"\\1\", orig_req_url) or request.host_url\n )\n\n return do_forward_request(\n api=context.service.service_name,\n method=method,\n path=path,\n data=data,\n headers=headers,\n port=None,\n )\n\n\nclass _NoHandlerCalled(Exception):\n pass\n\n\nclass _DummyProxyListener(ProxyListener):\n def forward_request(self, method, path, data, headers):\n raise _NoHandlerCalled\n\n\nclass DefaultListenerHandler(GenericProxyHandler):\n \"\"\"\n Adapter that exposes the ProxyListener.DEFAULT_LISTENERS as a Handler.\n \"\"\"\n\n def __call__(self, chain: HandlerChain, context: RequestContext, response: Response):\n if not ProxyListener.DEFAULT_LISTENERS:\n return\n\n try:\n super(DefaultListenerHandler, self).__call__(chain, context, response)\n except _NoHandlerCalled:\n # may be raised by the _DummyProxyListener, which is reached if no other listener is called,\n # in which case we don't want to return a result or stop the chain.\n return\n\n def forward_request(\n self, context: RequestContext, method: str, path: str, data: bytes, headers: Mapping\n ):\n request = context.request\n\n return modify_and_forward(\n method=method,\n path=path,\n data_bytes=data,\n headers=headers,\n forward_base_url=None,\n listeners=[_DummyProxyListener()],\n client_address=request.remote_addr,\n server_address=request.host,\n )\n", "path": "localstack/aws/handlers/legacy.py"}, {"content": "\"\"\" A set of common handlers to build an AWS server application.\"\"\"\n\nfrom .. import chain\nfrom . import analytics, auth, codec, cors, fallback, internal, legacy, logging, region, service\n\nenforce_cors = cors.CorsEnforcer()\nadd_cors_response_headers = cors.CorsResponseEnricher()\ncontent_decoder = codec.ContentDecoder()\nparse_service_name = service.ServiceNameParser()\nparse_service_request = service.ServiceRequestParser()\nadd_account_id = auth.AccountIdEnricher()\ninject_auth_header_if_missing = auth.MissingAuthHeaderInjector()\nadd_region_from_header = region.RegionContextEnricher()\nlog_exception = logging.ExceptionLogger()\nlog_response = logging.ResponseLogger()\ncount_service_request = analytics.ServiceRequestCounter()\nhandle_service_exception = service.ServiceExceptionSerializer()\nhandle_internal_failure = fallback.InternalFailureHandler()\nserve_custom_service_request_handlers = chain.CompositeHandler()\nserve_localstack_resources = internal.LocalstackResourceHandler()\nrun_custom_response_handlers = chain.CompositeResponseHandler()\nparse_service_response = service.ServiceResponseParser()\n# legacy compatibility handlers\nserve_edge_router_rules = legacy.EdgeRouterHandler()\nserve_default_listeners = legacy.DefaultListenerHandler()\npop_request_context = legacy.pop_request_context\npush_request_context = legacy.push_request_context\n", "path": "localstack/aws/handlers/__init__.py"}], "after_files": [{"content": "from localstack.aws import handlers\nfrom localstack.aws.handlers.metric_handler import MetricHandler\nfrom localstack.aws.handlers.service_plugin import ServiceLoader\nfrom localstack.services.plugins import SERVICE_PLUGINS, ServiceManager, ServicePluginManager\n\nfrom .gateway import Gateway\nfrom .handlers.fallback import EmptyResponseHandler\nfrom .handlers.service import ServiceRequestRouter\n\n\nclass LocalstackAwsGateway(Gateway):\n def __init__(self, service_manager: ServiceManager = None) -> None:\n super().__init__()\n\n # basic server components\n self.service_manager = service_manager or ServicePluginManager()\n self.service_request_router = ServiceRequestRouter()\n # lazy-loads services into the router\n load_service = ServiceLoader(self.service_manager, self.service_request_router)\n\n metric_collector = MetricHandler()\n # the main request handler chain\n self.request_handlers.extend(\n [\n handlers.push_request_context,\n metric_collector.create_metric_handler_item,\n handlers.parse_service_name, # enforce_cors and content_decoder depend on the service name\n handlers.enforce_cors,\n handlers.content_decoder,\n handlers.serve_localstack_resources, # try to serve internal resources in /_localstack first\n handlers.serve_default_listeners, # legacy proxy default listeners\n handlers.serve_edge_router_rules,\n # start aws handler chain\n handlers.inject_auth_header_if_missing,\n handlers.add_region_from_header,\n handlers.add_account_id,\n handlers.parse_service_request,\n metric_collector.record_parsed_request,\n handlers.serve_custom_service_request_handlers,\n load_service, # once we have the service request we can make sure we load the service\n self.service_request_router, # once we know the service is loaded we can route the request\n # if the chain is still running, set an empty response\n EmptyResponseHandler(404, b'{\"message\": \"Not Found\"}'),\n ]\n )\n\n # exception handlers in the chain\n self.exception_handlers.extend(\n [\n handlers.log_exception,\n handlers.handle_service_exception,\n handlers.handle_internal_failure,\n ]\n )\n\n # response post-processing\n self.response_handlers.extend(\n [\n handlers.parse_service_response,\n handlers.set_close_connection_header,\n handlers.run_custom_response_handlers,\n handlers.add_cors_response_headers,\n handlers.log_response,\n handlers.count_service_request,\n handlers.pop_request_context,\n metric_collector.update_metric_collection,\n ]\n )\n\n\ndef main():\n \"\"\"\n Serve the LocalstackGateway with the default configuration directly through hypercorn. This is mostly for\n development purposes and documentation on how to serve the Gateway.\n \"\"\"\n from .serving.hypercorn import serve\n\n use_ssl = True\n port = 4566\n\n # serve the LocalStackAwsGateway in a dev app\n from localstack.utils.bootstrap import setup_logging\n\n setup_logging()\n\n if use_ssl:\n from localstack.services.generic_proxy import (\n GenericProxy,\n install_predefined_cert_if_available,\n )\n\n install_predefined_cert_if_available()\n _, cert_file_name, key_file_name = GenericProxy.create_ssl_cert(serial_number=port)\n ssl_creds = (cert_file_name, key_file_name)\n else:\n ssl_creds = None\n\n gw = LocalstackAwsGateway(SERVICE_PLUGINS)\n\n serve(gw, use_reloader=True, port=port, ssl_creds=ssl_creds)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "localstack/aws/app.py"}, {"content": "\"\"\" Handlers for compatibility with legacy edge proxy and the quart http framework.\"\"\"\n\nimport logging\nimport re\nfrom typing import Mapping\n\nfrom requests import Response as RequestsResponse\n\nfrom localstack.constants import HEADER_LOCALSTACK_EDGE_URL, HEADER_LOCALSTACK_REQUEST_URL\nfrom localstack.http import Response\nfrom localstack.http.request import restore_payload\nfrom localstack.services.generic_proxy import ProxyListener, modify_and_forward\n\nfrom ..api import RequestContext\nfrom ..chain import Handler, HandlerChain\nfrom .routes import RouterHandler\n\nLOG = logging.getLogger(__name__)\n\n\ndef push_request_context(_chain: HandlerChain, context: RequestContext, _response: Response):\n # hack for legacy compatibility. various parts of localstack access the global flask/quart/our own request\n # context. since we're neither in a flask nor a quart context, we're pushing our own context object into their\n # proxy objects, which is terrible, but works because mostly code just accesses \"context.request\", so we don't\n # have to bother pushing a real quart/flask context.\n import flask.globals\n import quart.globals\n\n from localstack.utils.aws import request_context\n\n quart.globals._request_ctx_stack.push(context)\n flask.globals._request_ctx_stack.push(context)\n request_context.THREAD_LOCAL.request_context = context.request\n\n\ndef pop_request_context(_chain: HandlerChain, _context: RequestContext, _response: Response):\n # hack for legacy compatibility\n import flask.globals\n import quart.globals\n\n from localstack.utils.aws import request_context\n\n quart.globals._request_ctx_stack.pop()\n flask.globals._request_ctx_stack.pop()\n request_context.THREAD_LOCAL.request_context = None\n\n\ndef set_close_connection_header(_chain: HandlerChain, context: RequestContext, response: Response):\n \"\"\"This is a hack to work around performance issues with h11 and boto. See\n https://github.com/localstack/localstack/issues/6557\"\"\"\n if conn := context.request.headers.get(\"Connection\"):\n if conn.lower() == \"keep-alive\":\n # don't set Connection: close header if keep-alive is explicitly asked for\n return\n\n if \"Connection\" not in response.headers:\n response.headers[\"Connection\"] = \"close\"\n\n\nclass EdgeRouterHandler(RouterHandler):\n def __init__(self, respond_not_found=False) -> None:\n from localstack.services.edge import ROUTER\n\n super().__init__(ROUTER, respond_not_found)\n\n\nclass GenericProxyHandler(Handler):\n \"\"\"\n This handler maps HandlerChain requests to the generic proxy ProxyListener interface `forward_request`.\n \"\"\"\n\n def __call__(self, chain: HandlerChain, context: RequestContext, response: Response):\n request = context.request\n\n # a werkzeug Request consumes form/multipart data from the socket stream, so we need to restore the payload here\n data = restore_payload(request)\n\n # TODO: rethink whether this proxy handling is necessary\n context.request.headers[HEADER_LOCALSTACK_REQUEST_URL] = context.request.base_url\n\n result = self.forward_request(\n context,\n method=request.method,\n path=request.full_path if request.query_string else request.path,\n data=data,\n headers=request.headers,\n )\n\n if type(result) == int:\n chain.respond(status_code=result)\n return\n\n if isinstance(result, tuple):\n # special case for Kinesis SubscribeToShard\n if len(result) == 2:\n response.status_code = 200\n response.set_response(result[0])\n response.headers.update(dict(result[1]))\n chain.stop()\n return\n\n if isinstance(result, RequestsResponse):\n response.status_code = result.status_code\n response.set_response(result.content)\n # make sure headers are set after the content, so potential content-length headers are overwritten\n response.headers.update(dict(result.headers))\n\n # make sure content-length is re-calculated correctly, unless it's a HEAD request\n if request.method != \"HEAD\":\n length = response.calculate_content_length()\n if length is not None:\n response.headers[\"Content-Length\"] = str(length)\n chain.stop()\n return\n\n raise ValueError(\"cannot create response for result %s\" % result)\n\n def forward_request(\n self, context: RequestContext, method: str, path: str, data: bytes, headers: Mapping\n ):\n raise NotImplementedError\n\n\nclass LegacyPluginHandler(GenericProxyHandler):\n \"\"\"\n This adapter exposes Services that are developed as ProxyListener as Handler.\n \"\"\"\n\n def forward_request(\n self, context: RequestContext, method: str, path: str, data: bytes, headers: Mapping\n ):\n from localstack.services.edge import do_forward_request\n\n # TODO: rethink whether this proxy handling is necessary\n request = context.request\n orig_req_url = request.headers.pop(HEADER_LOCALSTACK_REQUEST_URL, \"\")\n request.headers[HEADER_LOCALSTACK_EDGE_URL] = (\n re.sub(r\"^([^:]+://[^/]+).*\", r\"\\1\", orig_req_url) or request.host_url\n )\n\n return do_forward_request(\n api=context.service.service_name,\n method=method,\n path=path,\n data=data,\n headers=headers,\n port=None,\n )\n\n\nclass _NoHandlerCalled(Exception):\n pass\n\n\nclass _DummyProxyListener(ProxyListener):\n def forward_request(self, method, path, data, headers):\n raise _NoHandlerCalled\n\n\nclass DefaultListenerHandler(GenericProxyHandler):\n \"\"\"\n Adapter that exposes the ProxyListener.DEFAULT_LISTENERS as a Handler.\n \"\"\"\n\n def __call__(self, chain: HandlerChain, context: RequestContext, response: Response):\n if not ProxyListener.DEFAULT_LISTENERS:\n return\n\n try:\n super(DefaultListenerHandler, self).__call__(chain, context, response)\n except _NoHandlerCalled:\n # may be raised by the _DummyProxyListener, which is reached if no other listener is called,\n # in which case we don't want to return a result or stop the chain.\n return\n\n def forward_request(\n self, context: RequestContext, method: str, path: str, data: bytes, headers: Mapping\n ):\n request = context.request\n\n return modify_and_forward(\n method=method,\n path=path,\n data_bytes=data,\n headers=headers,\n forward_base_url=None,\n listeners=[_DummyProxyListener()],\n client_address=request.remote_addr,\n server_address=request.host,\n )\n", "path": "localstack/aws/handlers/legacy.py"}, {"content": "\"\"\" A set of common handlers to build an AWS server application.\"\"\"\n\nfrom .. import chain\nfrom . import analytics, auth, codec, cors, fallback, internal, legacy, logging, region, service\n\nenforce_cors = cors.CorsEnforcer()\nadd_cors_response_headers = cors.CorsResponseEnricher()\ncontent_decoder = codec.ContentDecoder()\nparse_service_name = service.ServiceNameParser()\nparse_service_request = service.ServiceRequestParser()\nadd_account_id = auth.AccountIdEnricher()\ninject_auth_header_if_missing = auth.MissingAuthHeaderInjector()\nadd_region_from_header = region.RegionContextEnricher()\nlog_exception = logging.ExceptionLogger()\nlog_response = logging.ResponseLogger()\ncount_service_request = analytics.ServiceRequestCounter()\nhandle_service_exception = service.ServiceExceptionSerializer()\nhandle_internal_failure = fallback.InternalFailureHandler()\nserve_custom_service_request_handlers = chain.CompositeHandler()\nserve_localstack_resources = internal.LocalstackResourceHandler()\nrun_custom_response_handlers = chain.CompositeResponseHandler()\nparse_service_response = service.ServiceResponseParser()\n# legacy compatibility handlers\nserve_edge_router_rules = legacy.EdgeRouterHandler()\nserve_default_listeners = legacy.DefaultListenerHandler()\nset_close_connection_header = legacy.set_close_connection_header\npop_request_context = legacy.pop_request_context\npush_request_context = legacy.push_request_context\n", "path": "localstack/aws/handlers/__init__.py"}]}
| 3,837 | 442 |
gh_patches_debug_19863
|
rasdani/github-patches
|
git_diff
|
quantumlib__Cirq-138
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Should Circuit#append be chainable?
It seems like a common pattern to want to do
circuit = Circuit().append(o1).append([o2, o3])
?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cirq/circuits/circuit.py`
Content:
```
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """The circuit data structure for the sequenced phase."""
16
17 import itertools
18
19 from typing import Iterable, List, Optional, Set
20
21 from cirq import ops
22 from cirq.circuits.insert_strategy import InsertStrategy
23 from cirq.circuits.moment import Moment
24 from cirq.ops import QubitId
25
26
27 class Circuit(object):
28 """A mutable list of groups of operations to apply to some qubits."""
29
30 def __init__(self, moments: Iterable[Moment] = ()):
31 """Initializes a circuit.
32
33 Args:
34 moments: The initial list of moments defining the circuit.
35 """
36 self.moments = list(moments)
37
38 def __eq__(self, other):
39 if not isinstance(other, type(self)):
40 return NotImplemented
41 return self.moments == other.moments
42
43 def __ne__(self, other):
44 return not self == other
45
46 __hash__ = None
47
48 def _first_moment_operating_on(self, qubits: Iterable[ops.QubitId],
49 indices: Iterable[int]) -> Optional[int]:
50 qubits = frozenset(qubits)
51 for m in indices:
52 if self._has_op_at(m, qubits):
53 return m
54 return None
55
56 def next_moment_operating_on(self,
57 qubits: Iterable[ops.QubitId],
58 start_moment_index: int = 0,
59 max_distance: int = None) -> Optional[int]:
60 """Finds the index of the next moment that touches the given qubits.
61
62 Args:
63 qubits: We're looking for operations affecting any of these qubits.
64 start_moment_index: The starting point of the search.
65 max_distance: The number of moments (starting from the start index
66 and moving forward) to check. Defaults to no limit.
67
68 Returns:
69 None if there is no matching moment, otherwise the index of the
70 earliest matching moment.
71
72 Raises:
73 ValueError: negative max_distance.
74 """
75 if max_distance is None:
76 max_distance = float('inf')
77 elif max_distance < 0:
78 raise ValueError('Negative max_distance: {}'.format(max_distance))
79
80 # Don't bother searching indices past the end of the list.
81 max_distance = int(
82 min(max_distance, len(self.moments) - start_moment_index))
83
84 return self._first_moment_operating_on(
85 qubits,
86 range(start_moment_index, start_moment_index + max_distance))
87
88 def prev_moment_operating_on(
89 self,
90 qubits: List[ops.QubitId],
91 end_moment_index: Optional[int] = None,
92 max_distance: Optional[int] = None) -> Optional[int]:
93 """Finds the index of the next moment that touches the given qubits.
94
95 Args:
96 qubits: We're looking for operations affecting any of these qubits.
97 end_moment_index: The moment index just after the starting point of
98 the reverse search. Defaults to the length of the list of
99 moments.
100 max_distance: The number of moments (starting just before from the
101 end index and moving backward) to check. Defaults to no limit.
102
103 Returns:
104 None if there is no matching moment, otherwise the index of the
105 latest matching moment.
106
107 Raises:
108 ValueError: negative max_distance.
109 """
110 if max_distance is None:
111 max_distance = float('inf')
112 elif max_distance < 0:
113 raise ValueError('Negative max_distance: {}'.format(max_distance))
114
115 if end_moment_index is None:
116 end_moment_index = len(self.moments)
117
118 # Don't bother searching indices past the start of the list.
119 max_distance = min(end_moment_index, max_distance)
120
121 # Don't bother searching indices past the end of the list.
122 if end_moment_index > len(self.moments):
123 d = end_moment_index - len(self.moments)
124 end_moment_index -= d
125 max_distance -= d
126 if max_distance <= 0:
127 return None
128
129 return self._first_moment_operating_on(qubits,
130 (end_moment_index - k - 1
131 for k in range(max_distance)))
132
133 def operation_at(self, qubit: ops.QubitId,
134 moment_index: int) -> Optional[ops.Operation]:
135 """Finds the operation on a qubit within a moment, if any.
136
137 Args:
138 qubit: The qubit to check for an operation on.
139 moment_index: The index of the moment to check for an operation
140 within. Allowed to be beyond the end of the circuit.
141
142 Returns:
143 None if there is no operation on the qubit at the given moment, or
144 else the operation.
145 """
146 if not 0 <= moment_index < len(self.moments):
147 return None
148 for op in self.moments[moment_index].operations:
149 if qubit in op.qubits:
150 return op
151 return None
152
153 def clear_operations_touching(self, qubits: Iterable[ops.QubitId],
154 moment_indices: Iterable[int]):
155 """Clears operations that are touching given qubits at given moments.
156
157 Args:
158 qubits: The qubits to check for operations on.
159 moment_indices: The indices of moments to check for operations
160 within.
161 """
162 qubits = frozenset(qubits)
163 for k in moment_indices:
164 if 0 <= k < len(self.moments):
165 self.moments[k] = self.moments[k].without_operations_touching(
166 qubits)
167
168 def _pick_or_create_inserted_op_moment_index(
169 self, splitter_index: int, op: ops.Operation,
170 strategy: InsertStrategy) -> int:
171 """Determines and prepares where an insertion will occur.
172
173 Args:
174 splitter_index: The index to insert at.
175 op: The operation that will be inserted.
176 strategy: The insertion strategy.
177
178 Returns:
179 The index of the (possibly new) moment where the insertion should
180 occur.
181
182 Raises:
183 ValueError: Unrecognized append strategy.
184 """
185
186 if (strategy is InsertStrategy.NEW or
187 strategy is InsertStrategy.NEW_THEN_INLINE):
188 self.moments.insert(splitter_index, Moment())
189 return splitter_index
190
191 if strategy is InsertStrategy.INLINE:
192 if (not self._has_op_at(splitter_index - 1, op.qubits) and
193 0 <= splitter_index - 1 < len(self.moments)):
194 return splitter_index - 1
195
196 return self._pick_or_create_inserted_op_moment_index(
197 splitter_index, op, InsertStrategy.NEW)
198
199 if strategy is InsertStrategy.EARLIEST:
200 if not self._has_op_at(splitter_index, op.qubits):
201 p = self.prev_moment_operating_on(op.qubits, splitter_index)
202 return p + 1 if p is not None else 0
203
204 return self._pick_or_create_inserted_op_moment_index(
205 splitter_index, op, InsertStrategy.INLINE)
206
207 raise ValueError('Unrecognized append strategy: {}'.format(strategy))
208
209 def _has_op_at(self, moment_index, qubits):
210 return (0 <= moment_index < len(self.moments) and
211 self.moments[moment_index].operates_on(qubits))
212
213 def insert(
214 self,
215 index: int,
216 operation_tree: ops.OP_TREE,
217 strategy: InsertStrategy = InsertStrategy.NEW_THEN_INLINE) -> int:
218 """Inserts operations into the middle of the circuit.
219
220 Args:
221 index: The index to insert all of the operations at.
222 operation_tree: An operation or tree of operations.
223 strategy: How to pick/create the moment to put operations into.
224
225 Returns:
226 The insertion index that will place operations just after the
227 operations that were inserted by this method.
228
229 Raises:
230 IndexError: Bad insertion index.
231 ValueError: Bad insertion strategy.
232 """
233 if not 0 <= index <= len(self.moments):
234 raise IndexError('Insert index out of range: {}'.format(index))
235
236 k = index
237 for op in ops.flatten_op_tree(operation_tree):
238 p = self._pick_or_create_inserted_op_moment_index(k, op, strategy)
239 while p >= len(self.moments):
240 self.moments.append(Moment())
241 self.moments[p] = self.moments[p].with_operation(op)
242 k = max(k, p + 1)
243 if strategy is InsertStrategy.NEW_THEN_INLINE:
244 strategy = InsertStrategy.INLINE
245 return k
246
247 def append(
248 self,
249 operation_tree: ops.OP_TREE,
250 strategy: InsertStrategy = InsertStrategy.NEW_THEN_INLINE):
251 """Appends operations onto the end of the circuit.
252
253 Args:
254 operation_tree: An operation or tree of operations.
255 strategy: How to pick/create the moment to put operations into.
256 """
257 self.insert(len(self.moments), operation_tree, strategy)
258
259 def qubits(self) -> Set[QubitId]:
260 """Returns the qubits acted upon by Operations in this circuit."""
261 return frozenset(q for m in self.moments for q in m.qubits)
262
263 def __repr__(self):
264 moment_lines = ('\n ' + repr(moment) for moment in self.moments)
265 return 'Circuit([{}])'.format(','.join(moment_lines))
266
267 def __str__(self):
268 moment_lines = ('\n ' + str(moment) for moment in self.moments)
269 return 'Circuit [{}\n]'.format(''.join(moment_lines))
270
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/cirq/circuits/circuit.py b/cirq/circuits/circuit.py
--- a/cirq/circuits/circuit.py
+++ b/cirq/circuits/circuit.py
@@ -14,8 +14,6 @@
"""The circuit data structure for the sequenced phase."""
-import itertools
-
from typing import Iterable, List, Optional, Set
from cirq import ops
@@ -35,6 +33,23 @@
"""
self.moments = list(moments)
+ @staticmethod
+ def from_ops(*operations: ops.OP_TREE,
+ strategy: InsertStrategy = InsertStrategy.NEW_THEN_INLINE
+ ) -> 'Circuit':
+ """Creates an empty circuit and appends the given operations.
+
+ Args:
+ operations: The operations to append to the new circuit.
+ strategy: How to append the operations.
+
+ Returns:
+ The constructed circuit containing the operations.
+ """
+ result = Circuit()
+ result.append(operations, strategy)
+ return result
+
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
|
{"golden_diff": "diff --git a/cirq/circuits/circuit.py b/cirq/circuits/circuit.py\n--- a/cirq/circuits/circuit.py\n+++ b/cirq/circuits/circuit.py\n@@ -14,8 +14,6 @@\n \n \"\"\"The circuit data structure for the sequenced phase.\"\"\"\n \n-import itertools\n-\n from typing import Iterable, List, Optional, Set\n \n from cirq import ops\n@@ -35,6 +33,23 @@\n \"\"\"\n self.moments = list(moments)\n \n+ @staticmethod\n+ def from_ops(*operations: ops.OP_TREE,\n+ strategy: InsertStrategy = InsertStrategy.NEW_THEN_INLINE\n+ ) -> 'Circuit':\n+ \"\"\"Creates an empty circuit and appends the given operations.\n+\n+ Args:\n+ operations: The operations to append to the new circuit.\n+ strategy: How to append the operations.\n+\n+ Returns:\n+ The constructed circuit containing the operations.\n+ \"\"\"\n+ result = Circuit()\n+ result.append(operations, strategy)\n+ return result\n+\n def __eq__(self, other):\n if not isinstance(other, type(self)):\n return NotImplemented\n", "issue": "Should Circuit#append be chainable?\nIt seems like a common pattern to want to do\r\ncircuit = Circuit().append(o1).append([o2, o3])\r\n?\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"The circuit data structure for the sequenced phase.\"\"\"\n\nimport itertools\n\nfrom typing import Iterable, List, Optional, Set\n\nfrom cirq import ops\nfrom cirq.circuits.insert_strategy import InsertStrategy\nfrom cirq.circuits.moment import Moment\nfrom cirq.ops import QubitId\n\n\nclass Circuit(object):\n \"\"\"A mutable list of groups of operations to apply to some qubits.\"\"\"\n\n def __init__(self, moments: Iterable[Moment] = ()):\n \"\"\"Initializes a circuit.\n\n Args:\n moments: The initial list of moments defining the circuit.\n \"\"\"\n self.moments = list(moments)\n\n def __eq__(self, other):\n if not isinstance(other, type(self)):\n return NotImplemented\n return self.moments == other.moments\n\n def __ne__(self, other):\n return not self == other\n\n __hash__ = None\n\n def _first_moment_operating_on(self, qubits: Iterable[ops.QubitId],\n indices: Iterable[int]) -> Optional[int]:\n qubits = frozenset(qubits)\n for m in indices:\n if self._has_op_at(m, qubits):\n return m\n return None\n\n def next_moment_operating_on(self,\n qubits: Iterable[ops.QubitId],\n start_moment_index: int = 0,\n max_distance: int = None) -> Optional[int]:\n \"\"\"Finds the index of the next moment that touches the given qubits.\n\n Args:\n qubits: We're looking for operations affecting any of these qubits.\n start_moment_index: The starting point of the search.\n max_distance: The number of moments (starting from the start index\n and moving forward) to check. Defaults to no limit.\n\n Returns:\n None if there is no matching moment, otherwise the index of the\n earliest matching moment.\n\n Raises:\n ValueError: negative max_distance.\n \"\"\"\n if max_distance is None:\n max_distance = float('inf')\n elif max_distance < 0:\n raise ValueError('Negative max_distance: {}'.format(max_distance))\n\n # Don't bother searching indices past the end of the list.\n max_distance = int(\n min(max_distance, len(self.moments) - start_moment_index))\n\n return self._first_moment_operating_on(\n qubits,\n range(start_moment_index, start_moment_index + max_distance))\n\n def prev_moment_operating_on(\n self,\n qubits: List[ops.QubitId],\n end_moment_index: Optional[int] = None,\n max_distance: Optional[int] = None) -> Optional[int]:\n \"\"\"Finds the index of the next moment that touches the given qubits.\n\n Args:\n qubits: We're looking for operations affecting any of these qubits.\n end_moment_index: The moment index just after the starting point of\n the reverse search. Defaults to the length of the list of\n moments.\n max_distance: The number of moments (starting just before from the\n end index and moving backward) to check. Defaults to no limit.\n\n Returns:\n None if there is no matching moment, otherwise the index of the\n latest matching moment.\n\n Raises:\n ValueError: negative max_distance.\n \"\"\"\n if max_distance is None:\n max_distance = float('inf')\n elif max_distance < 0:\n raise ValueError('Negative max_distance: {}'.format(max_distance))\n\n if end_moment_index is None:\n end_moment_index = len(self.moments)\n\n # Don't bother searching indices past the start of the list.\n max_distance = min(end_moment_index, max_distance)\n\n # Don't bother searching indices past the end of the list.\n if end_moment_index > len(self.moments):\n d = end_moment_index - len(self.moments)\n end_moment_index -= d\n max_distance -= d\n if max_distance <= 0:\n return None\n\n return self._first_moment_operating_on(qubits,\n (end_moment_index - k - 1\n for k in range(max_distance)))\n\n def operation_at(self, qubit: ops.QubitId,\n moment_index: int) -> Optional[ops.Operation]:\n \"\"\"Finds the operation on a qubit within a moment, if any.\n\n Args:\n qubit: The qubit to check for an operation on.\n moment_index: The index of the moment to check for an operation\n within. Allowed to be beyond the end of the circuit.\n\n Returns:\n None if there is no operation on the qubit at the given moment, or\n else the operation.\n \"\"\"\n if not 0 <= moment_index < len(self.moments):\n return None\n for op in self.moments[moment_index].operations:\n if qubit in op.qubits:\n return op\n return None\n\n def clear_operations_touching(self, qubits: Iterable[ops.QubitId],\n moment_indices: Iterable[int]):\n \"\"\"Clears operations that are touching given qubits at given moments.\n\n Args:\n qubits: The qubits to check for operations on.\n moment_indices: The indices of moments to check for operations\n within.\n \"\"\"\n qubits = frozenset(qubits)\n for k in moment_indices:\n if 0 <= k < len(self.moments):\n self.moments[k] = self.moments[k].without_operations_touching(\n qubits)\n\n def _pick_or_create_inserted_op_moment_index(\n self, splitter_index: int, op: ops.Operation,\n strategy: InsertStrategy) -> int:\n \"\"\"Determines and prepares where an insertion will occur.\n\n Args:\n splitter_index: The index to insert at.\n op: The operation that will be inserted.\n strategy: The insertion strategy.\n\n Returns:\n The index of the (possibly new) moment where the insertion should\n occur.\n\n Raises:\n ValueError: Unrecognized append strategy.\n \"\"\"\n\n if (strategy is InsertStrategy.NEW or\n strategy is InsertStrategy.NEW_THEN_INLINE):\n self.moments.insert(splitter_index, Moment())\n return splitter_index\n\n if strategy is InsertStrategy.INLINE:\n if (not self._has_op_at(splitter_index - 1, op.qubits) and\n 0 <= splitter_index - 1 < len(self.moments)):\n return splitter_index - 1\n\n return self._pick_or_create_inserted_op_moment_index(\n splitter_index, op, InsertStrategy.NEW)\n\n if strategy is InsertStrategy.EARLIEST:\n if not self._has_op_at(splitter_index, op.qubits):\n p = self.prev_moment_operating_on(op.qubits, splitter_index)\n return p + 1 if p is not None else 0\n\n return self._pick_or_create_inserted_op_moment_index(\n splitter_index, op, InsertStrategy.INLINE)\n\n raise ValueError('Unrecognized append strategy: {}'.format(strategy))\n\n def _has_op_at(self, moment_index, qubits):\n return (0 <= moment_index < len(self.moments) and\n self.moments[moment_index].operates_on(qubits))\n\n def insert(\n self,\n index: int,\n operation_tree: ops.OP_TREE,\n strategy: InsertStrategy = InsertStrategy.NEW_THEN_INLINE) -> int:\n \"\"\"Inserts operations into the middle of the circuit.\n\n Args:\n index: The index to insert all of the operations at.\n operation_tree: An operation or tree of operations.\n strategy: How to pick/create the moment to put operations into.\n\n Returns:\n The insertion index that will place operations just after the\n operations that were inserted by this method.\n\n Raises:\n IndexError: Bad insertion index.\n ValueError: Bad insertion strategy.\n \"\"\"\n if not 0 <= index <= len(self.moments):\n raise IndexError('Insert index out of range: {}'.format(index))\n\n k = index\n for op in ops.flatten_op_tree(operation_tree):\n p = self._pick_or_create_inserted_op_moment_index(k, op, strategy)\n while p >= len(self.moments):\n self.moments.append(Moment())\n self.moments[p] = self.moments[p].with_operation(op)\n k = max(k, p + 1)\n if strategy is InsertStrategy.NEW_THEN_INLINE:\n strategy = InsertStrategy.INLINE\n return k\n\n def append(\n self,\n operation_tree: ops.OP_TREE,\n strategy: InsertStrategy = InsertStrategy.NEW_THEN_INLINE):\n \"\"\"Appends operations onto the end of the circuit.\n\n Args:\n operation_tree: An operation or tree of operations.\n strategy: How to pick/create the moment to put operations into.\n \"\"\"\n self.insert(len(self.moments), operation_tree, strategy)\n\n def qubits(self) -> Set[QubitId]:\n \"\"\"Returns the qubits acted upon by Operations in this circuit.\"\"\"\n return frozenset(q for m in self.moments for q in m.qubits)\n\n def __repr__(self):\n moment_lines = ('\\n ' + repr(moment) for moment in self.moments)\n return 'Circuit([{}])'.format(','.join(moment_lines))\n\n def __str__(self):\n moment_lines = ('\\n ' + str(moment) for moment in self.moments)\n return 'Circuit [{}\\n]'.format(''.join(moment_lines))\n", "path": "cirq/circuits/circuit.py"}], "after_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"The circuit data structure for the sequenced phase.\"\"\"\n\nfrom typing import Iterable, List, Optional, Set\n\nfrom cirq import ops\nfrom cirq.circuits.insert_strategy import InsertStrategy\nfrom cirq.circuits.moment import Moment\nfrom cirq.ops import QubitId\n\n\nclass Circuit(object):\n \"\"\"A mutable list of groups of operations to apply to some qubits.\"\"\"\n\n def __init__(self, moments: Iterable[Moment] = ()):\n \"\"\"Initializes a circuit.\n\n Args:\n moments: The initial list of moments defining the circuit.\n \"\"\"\n self.moments = list(moments)\n\n @staticmethod\n def from_ops(*operations: ops.OP_TREE,\n strategy: InsertStrategy = InsertStrategy.NEW_THEN_INLINE\n ) -> 'Circuit':\n \"\"\"Creates an empty circuit and appends the given operations.\n\n Args:\n operations: The operations to append to the new circuit.\n strategy: How to append the operations.\n\n Returns:\n The constructed circuit containing the operations.\n \"\"\"\n result = Circuit()\n result.append(operations, strategy)\n return result\n\n def __eq__(self, other):\n if not isinstance(other, type(self)):\n return NotImplemented\n return self.moments == other.moments\n\n def __ne__(self, other):\n return not self == other\n\n __hash__ = None\n\n def _first_moment_operating_on(self, qubits: Iterable[ops.QubitId],\n indices: Iterable[int]) -> Optional[int]:\n qubits = frozenset(qubits)\n for m in indices:\n if self._has_op_at(m, qubits):\n return m\n return None\n\n def next_moment_operating_on(self,\n qubits: Iterable[ops.QubitId],\n start_moment_index: int = 0,\n max_distance: int = None) -> Optional[int]:\n \"\"\"Finds the index of the next moment that touches the given qubits.\n\n Args:\n qubits: We're looking for operations affecting any of these qubits.\n start_moment_index: The starting point of the search.\n max_distance: The number of moments (starting from the start index\n and moving forward) to check. Defaults to no limit.\n\n Returns:\n None if there is no matching moment, otherwise the index of the\n earliest matching moment.\n\n Raises:\n ValueError: negative max_distance.\n \"\"\"\n if max_distance is None:\n max_distance = float('inf')\n elif max_distance < 0:\n raise ValueError('Negative max_distance: {}'.format(max_distance))\n\n # Don't bother searching indices past the end of the list.\n max_distance = int(\n min(max_distance, len(self.moments) - start_moment_index))\n\n return self._first_moment_operating_on(\n qubits,\n range(start_moment_index, start_moment_index + max_distance))\n\n def prev_moment_operating_on(\n self,\n qubits: List[ops.QubitId],\n end_moment_index: Optional[int] = None,\n max_distance: Optional[int] = None) -> Optional[int]:\n \"\"\"Finds the index of the next moment that touches the given qubits.\n\n Args:\n qubits: We're looking for operations affecting any of these qubits.\n end_moment_index: The moment index just after the starting point of\n the reverse search. Defaults to the length of the list of\n moments.\n max_distance: The number of moments (starting just before from the\n end index and moving backward) to check. Defaults to no limit.\n\n Returns:\n None if there is no matching moment, otherwise the index of the\n latest matching moment.\n\n Raises:\n ValueError: negative max_distance.\n \"\"\"\n if max_distance is None:\n max_distance = float('inf')\n elif max_distance < 0:\n raise ValueError('Negative max_distance: {}'.format(max_distance))\n\n if end_moment_index is None:\n end_moment_index = len(self.moments)\n\n # Don't bother searching indices past the start of the list.\n max_distance = min(end_moment_index, max_distance)\n\n # Don't bother searching indices past the end of the list.\n if end_moment_index > len(self.moments):\n d = end_moment_index - len(self.moments)\n end_moment_index -= d\n max_distance -= d\n if max_distance <= 0:\n return None\n\n return self._first_moment_operating_on(qubits,\n (end_moment_index - k - 1\n for k in range(max_distance)))\n\n def operation_at(self, qubit: ops.QubitId,\n moment_index: int) -> Optional[ops.Operation]:\n \"\"\"Finds the operation on a qubit within a moment, if any.\n\n Args:\n qubit: The qubit to check for an operation on.\n moment_index: The index of the moment to check for an operation\n within. Allowed to be beyond the end of the circuit.\n\n Returns:\n None if there is no operation on the qubit at the given moment, or\n else the operation.\n \"\"\"\n if not 0 <= moment_index < len(self.moments):\n return None\n for op in self.moments[moment_index].operations:\n if qubit in op.qubits:\n return op\n return None\n\n def clear_operations_touching(self, qubits: Iterable[ops.QubitId],\n moment_indices: Iterable[int]):\n \"\"\"Clears operations that are touching given qubits at given moments.\n\n Args:\n qubits: The qubits to check for operations on.\n moment_indices: The indices of moments to check for operations\n within.\n \"\"\"\n qubits = frozenset(qubits)\n for k in moment_indices:\n if 0 <= k < len(self.moments):\n self.moments[k] = self.moments[k].without_operations_touching(\n qubits)\n\n def _pick_or_create_inserted_op_moment_index(\n self, splitter_index: int, op: ops.Operation,\n strategy: InsertStrategy) -> int:\n \"\"\"Determines and prepares where an insertion will occur.\n\n Args:\n splitter_index: The index to insert at.\n op: The operation that will be inserted.\n strategy: The insertion strategy.\n\n Returns:\n The index of the (possibly new) moment where the insertion should\n occur.\n\n Raises:\n ValueError: Unrecognized append strategy.\n \"\"\"\n\n if (strategy is InsertStrategy.NEW or\n strategy is InsertStrategy.NEW_THEN_INLINE):\n self.moments.insert(splitter_index, Moment())\n return splitter_index\n\n if strategy is InsertStrategy.INLINE:\n if (not self._has_op_at(splitter_index - 1, op.qubits) and\n 0 <= splitter_index - 1 < len(self.moments)):\n return splitter_index - 1\n\n return self._pick_or_create_inserted_op_moment_index(\n splitter_index, op, InsertStrategy.NEW)\n\n if strategy is InsertStrategy.EARLIEST:\n if not self._has_op_at(splitter_index, op.qubits):\n p = self.prev_moment_operating_on(op.qubits, splitter_index)\n return p + 1 if p is not None else 0\n\n return self._pick_or_create_inserted_op_moment_index(\n splitter_index, op, InsertStrategy.INLINE)\n\n raise ValueError('Unrecognized append strategy: {}'.format(strategy))\n\n def _has_op_at(self, moment_index, qubits):\n return (0 <= moment_index < len(self.moments) and\n self.moments[moment_index].operates_on(qubits))\n\n def insert(\n self,\n index: int,\n operation_tree: ops.OP_TREE,\n strategy: InsertStrategy = InsertStrategy.NEW_THEN_INLINE) -> int:\n \"\"\"Inserts operations into the middle of the circuit.\n\n Args:\n index: The index to insert all of the operations at.\n operation_tree: An operation or tree of operations.\n strategy: How to pick/create the moment to put operations into.\n\n Returns:\n The insertion index that will place operations just after the\n operations that were inserted by this method.\n\n Raises:\n IndexError: Bad insertion index.\n ValueError: Bad insertion strategy.\n \"\"\"\n if not 0 <= index <= len(self.moments):\n raise IndexError('Insert index out of range: {}'.format(index))\n\n k = index\n for op in ops.flatten_op_tree(operation_tree):\n p = self._pick_or_create_inserted_op_moment_index(k, op, strategy)\n while p >= len(self.moments):\n self.moments.append(Moment())\n self.moments[p] = self.moments[p].with_operation(op)\n k = max(k, p + 1)\n if strategy is InsertStrategy.NEW_THEN_INLINE:\n strategy = InsertStrategy.INLINE\n return k\n\n def append(\n self,\n operation_tree: ops.OP_TREE,\n strategy: InsertStrategy = InsertStrategy.NEW_THEN_INLINE):\n \"\"\"Appends operations onto the end of the circuit.\n\n Args:\n operation_tree: An operation or tree of operations.\n strategy: How to pick/create the moment to put operations into.\n \"\"\"\n self.insert(len(self.moments), operation_tree, strategy)\n\n def qubits(self) -> Set[QubitId]:\n \"\"\"Returns the qubits acted upon by Operations in this circuit.\"\"\"\n return frozenset(q for m in self.moments for q in m.qubits)\n\n def __repr__(self):\n moment_lines = ('\\n ' + repr(moment) for moment in self.moments)\n return 'Circuit([{}])'.format(','.join(moment_lines))\n\n def __str__(self):\n moment_lines = ('\\n ' + str(moment) for moment in self.moments)\n return 'Circuit [{}\\n]'.format(''.join(moment_lines))\n", "path": "cirq/circuits/circuit.py"}]}
| 3,214 | 258 |
gh_patches_debug_13777
|
rasdani/github-patches
|
git_diff
|
docker__docker-py-2865
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
exec_run: filedescriptor out of range in select() (python3)
The following code works with python2, but throws error on python3:
```
import docker
files = [open('/tmp/test_' + str(i), 'w+b') for i in range(0, 1024)]
client = docker.from_env()
cnt = client.containers.run("ubuntu:18.04", command="sleep 10", detach=True, auto_remove=True)
cnt.exec_run("ls -ls")
```
Output:
``` File "var/dockerpy_bug.py", line 6, in <module>
cnt.exec_run("ls -ls")
File "/usr/lib/python3/dist-packages/docker/models/containers.py", line 185, in exec_run
resp['Id'], detach=detach, tty=tty, stream=stream, socket=socket
File "/usr/lib/python3/dist-packages/docker/utils/decorators.py", line 19, in wrapped
return f(self, resource_id, *args, **kwargs)
File "/usr/lib/python3/dist-packages/docker/api/exec_api.py", line 165, in exec_start
return self._read_from_socket(res, stream, tty)
File "/usr/lib/python3/dist-packages/docker/api/client.py", line 377, in _read_from_socket
return six.binary_type().join(gen)
File "/usr/lib/python3/dist-packages/docker/utils/socket.py", line 75, in frames_iter
n = next_frame_size(socket)
File "/usr/lib/python3/dist-packages/docker/utils/socket.py", line 62, in next_frame_size
data = read_exactly(socket, 8)
File "/usr/lib/python3/dist-packages/docker/utils/socket.py", line 47, in read_exactly
next_data = read(socket, n - len(data))
File "/usr/lib/python3/dist-packages/docker/utils/socket.py", line 27, in read
select.select([socket], [], [])
ValueError: filedescriptor out of range in select()
```
if socket has fd > 1024, select will fail
Stack Overflow suggests using poll() in place of select(), as it's not bound by 1024 fd limit.
Environment :
ubuntu: 18.04
docker: 18.09.3
docker API: 1.39
python: 3.6.7
docker-py: 3.7.0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/utils/socket.py`
Content:
```
1 import errno
2 import os
3 import select
4 import socket as pysocket
5 import struct
6
7 try:
8 from ..transport import NpipeSocket
9 except ImportError:
10 NpipeSocket = type(None)
11
12
13 STDOUT = 1
14 STDERR = 2
15
16
17 class SocketError(Exception):
18 pass
19
20
21 # NpipeSockets have their own error types
22 # pywintypes.error: (109, 'ReadFile', 'The pipe has been ended.')
23 NPIPE_ENDED = 109
24
25
26 def read(socket, n=4096):
27 """
28 Reads at most n bytes from socket
29 """
30
31 recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
32
33 if not isinstance(socket, NpipeSocket):
34 select.select([socket], [], [])
35
36 try:
37 if hasattr(socket, 'recv'):
38 return socket.recv(n)
39 if isinstance(socket, getattr(pysocket, 'SocketIO')):
40 return socket.read(n)
41 return os.read(socket.fileno(), n)
42 except OSError as e:
43 if e.errno not in recoverable_errors:
44 raise
45 except Exception as e:
46 is_pipe_ended = (isinstance(socket, NpipeSocket) and
47 len(e.args) > 0 and
48 e.args[0] == NPIPE_ENDED)
49 if is_pipe_ended:
50 # npipes don't support duplex sockets, so we interpret
51 # a PIPE_ENDED error as a close operation (0-length read).
52 return ''
53 raise
54
55
56 def read_exactly(socket, n):
57 """
58 Reads exactly n bytes from socket
59 Raises SocketError if there isn't enough data
60 """
61 data = bytes()
62 while len(data) < n:
63 next_data = read(socket, n - len(data))
64 if not next_data:
65 raise SocketError("Unexpected EOF")
66 data += next_data
67 return data
68
69
70 def next_frame_header(socket):
71 """
72 Returns the stream and size of the next frame of data waiting to be read
73 from socket, according to the protocol defined here:
74
75 https://docs.docker.com/engine/api/v1.24/#attach-to-a-container
76 """
77 try:
78 data = read_exactly(socket, 8)
79 except SocketError:
80 return (-1, -1)
81
82 stream, actual = struct.unpack('>BxxxL', data)
83 return (stream, actual)
84
85
86 def frames_iter(socket, tty):
87 """
88 Return a generator of frames read from socket. A frame is a tuple where
89 the first item is the stream number and the second item is a chunk of data.
90
91 If the tty setting is enabled, the streams are multiplexed into the stdout
92 stream.
93 """
94 if tty:
95 return ((STDOUT, frame) for frame in frames_iter_tty(socket))
96 else:
97 return frames_iter_no_tty(socket)
98
99
100 def frames_iter_no_tty(socket):
101 """
102 Returns a generator of data read from the socket when the tty setting is
103 not enabled.
104 """
105 while True:
106 (stream, n) = next_frame_header(socket)
107 if n < 0:
108 break
109 while n > 0:
110 result = read(socket, n)
111 if result is None:
112 continue
113 data_length = len(result)
114 if data_length == 0:
115 # We have reached EOF
116 return
117 n -= data_length
118 yield (stream, result)
119
120
121 def frames_iter_tty(socket):
122 """
123 Return a generator of data read from the socket when the tty setting is
124 enabled.
125 """
126 while True:
127 result = read(socket)
128 if len(result) == 0:
129 # We have reached EOF
130 return
131 yield result
132
133
134 def consume_socket_output(frames, demux=False):
135 """
136 Iterate through frames read from the socket and return the result.
137
138 Args:
139
140 demux (bool):
141 If False, stdout and stderr are multiplexed, and the result is the
142 concatenation of all the frames. If True, the streams are
143 demultiplexed, and the result is a 2-tuple where each item is the
144 concatenation of frames belonging to the same stream.
145 """
146 if demux is False:
147 # If the streams are multiplexed, the generator returns strings, that
148 # we just need to concatenate.
149 return bytes().join(frames)
150
151 # If the streams are demultiplexed, the generator yields tuples
152 # (stdout, stderr)
153 out = [None, None]
154 for frame in frames:
155 # It is guaranteed that for each frame, one and only one stream
156 # is not None.
157 assert frame != (None, None)
158 if frame[0] is not None:
159 if out[0] is None:
160 out[0] = frame[0]
161 else:
162 out[0] += frame[0]
163 else:
164 if out[1] is None:
165 out[1] = frame[1]
166 else:
167 out[1] += frame[1]
168 return tuple(out)
169
170
171 def demux_adaptor(stream_id, data):
172 """
173 Utility to demultiplex stdout and stderr when reading frames from the
174 socket.
175 """
176 if stream_id == STDOUT:
177 return (data, None)
178 elif stream_id == STDERR:
179 return (None, data)
180 else:
181 raise ValueError(f'{stream_id} is not a valid stream')
182
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docker/utils/socket.py b/docker/utils/socket.py
--- a/docker/utils/socket.py
+++ b/docker/utils/socket.py
@@ -3,6 +3,7 @@
import select
import socket as pysocket
import struct
+import sys
try:
from ..transport import NpipeSocket
@@ -31,7 +32,13 @@
recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
if not isinstance(socket, NpipeSocket):
- select.select([socket], [], [])
+ if sys.platform == 'win32':
+ # Limited to 1024
+ select.select([socket], [], [])
+ else:
+ poll = select.poll()
+ poll.register(socket)
+ poll.poll()
try:
if hasattr(socket, 'recv'):
|
{"golden_diff": "diff --git a/docker/utils/socket.py b/docker/utils/socket.py\n--- a/docker/utils/socket.py\n+++ b/docker/utils/socket.py\n@@ -3,6 +3,7 @@\n import select\n import socket as pysocket\n import struct\n+import sys\n \n try:\n from ..transport import NpipeSocket\n@@ -31,7 +32,13 @@\n recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)\n \n if not isinstance(socket, NpipeSocket):\n- select.select([socket], [], [])\n+ if sys.platform == 'win32':\n+ # Limited to 1024\n+ select.select([socket], [], [])\n+ else:\n+ poll = select.poll()\n+ poll.register(socket)\n+ poll.poll()\n \n try:\n if hasattr(socket, 'recv'):\n", "issue": "exec_run: filedescriptor out of range in select() (python3)\nThe following code works with python2, but throws error on python3:\r\n```\r\nimport docker\r\n\r\nfiles = [open('/tmp/test_' + str(i), 'w+b') for i in range(0, 1024)]\r\nclient = docker.from_env()\r\ncnt = client.containers.run(\"ubuntu:18.04\", command=\"sleep 10\", detach=True, auto_remove=True)\r\ncnt.exec_run(\"ls -ls\")\r\n```\r\nOutput: \r\n``` File \"var/dockerpy_bug.py\", line 6, in <module>\r\n cnt.exec_run(\"ls -ls\")\r\n File \"/usr/lib/python3/dist-packages/docker/models/containers.py\", line 185, in exec_run\r\n resp['Id'], detach=detach, tty=tty, stream=stream, socket=socket\r\n File \"/usr/lib/python3/dist-packages/docker/utils/decorators.py\", line 19, in wrapped\r\n return f(self, resource_id, *args, **kwargs)\r\n File \"/usr/lib/python3/dist-packages/docker/api/exec_api.py\", line 165, in exec_start\r\n return self._read_from_socket(res, stream, tty)\r\n File \"/usr/lib/python3/dist-packages/docker/api/client.py\", line 377, in _read_from_socket\r\n return six.binary_type().join(gen)\r\n File \"/usr/lib/python3/dist-packages/docker/utils/socket.py\", line 75, in frames_iter\r\n n = next_frame_size(socket)\r\n File \"/usr/lib/python3/dist-packages/docker/utils/socket.py\", line 62, in next_frame_size\r\n data = read_exactly(socket, 8)\r\n File \"/usr/lib/python3/dist-packages/docker/utils/socket.py\", line 47, in read_exactly\r\n next_data = read(socket, n - len(data))\r\n File \"/usr/lib/python3/dist-packages/docker/utils/socket.py\", line 27, in read\r\n select.select([socket], [], [])\r\nValueError: filedescriptor out of range in select()\r\n```\r\nif socket has fd > 1024, select will fail\r\nStack Overflow suggests using poll() in place of select(), as it's not bound by 1024 fd limit.\r\n\r\nEnvironment :\r\nubuntu: 18.04\r\ndocker: 18.09.3\r\ndocker API: 1.39\r\npython: 3.6.7\r\ndocker-py: 3.7.0\n", "before_files": [{"content": "import errno\nimport os\nimport select\nimport socket as pysocket\nimport struct\n\ntry:\n from ..transport import NpipeSocket\nexcept ImportError:\n NpipeSocket = type(None)\n\n\nSTDOUT = 1\nSTDERR = 2\n\n\nclass SocketError(Exception):\n pass\n\n\n# NpipeSockets have their own error types\n# pywintypes.error: (109, 'ReadFile', 'The pipe has been ended.')\nNPIPE_ENDED = 109\n\n\ndef read(socket, n=4096):\n \"\"\"\n Reads at most n bytes from socket\n \"\"\"\n\n recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)\n\n if not isinstance(socket, NpipeSocket):\n select.select([socket], [], [])\n\n try:\n if hasattr(socket, 'recv'):\n return socket.recv(n)\n if isinstance(socket, getattr(pysocket, 'SocketIO')):\n return socket.read(n)\n return os.read(socket.fileno(), n)\n except OSError as e:\n if e.errno not in recoverable_errors:\n raise\n except Exception as e:\n is_pipe_ended = (isinstance(socket, NpipeSocket) and\n len(e.args) > 0 and\n e.args[0] == NPIPE_ENDED)\n if is_pipe_ended:\n # npipes don't support duplex sockets, so we interpret\n # a PIPE_ENDED error as a close operation (0-length read).\n return ''\n raise\n\n\ndef read_exactly(socket, n):\n \"\"\"\n Reads exactly n bytes from socket\n Raises SocketError if there isn't enough data\n \"\"\"\n data = bytes()\n while len(data) < n:\n next_data = read(socket, n - len(data))\n if not next_data:\n raise SocketError(\"Unexpected EOF\")\n data += next_data\n return data\n\n\ndef next_frame_header(socket):\n \"\"\"\n Returns the stream and size of the next frame of data waiting to be read\n from socket, according to the protocol defined here:\n\n https://docs.docker.com/engine/api/v1.24/#attach-to-a-container\n \"\"\"\n try:\n data = read_exactly(socket, 8)\n except SocketError:\n return (-1, -1)\n\n stream, actual = struct.unpack('>BxxxL', data)\n return (stream, actual)\n\n\ndef frames_iter(socket, tty):\n \"\"\"\n Return a generator of frames read from socket. A frame is a tuple where\n the first item is the stream number and the second item is a chunk of data.\n\n If the tty setting is enabled, the streams are multiplexed into the stdout\n stream.\n \"\"\"\n if tty:\n return ((STDOUT, frame) for frame in frames_iter_tty(socket))\n else:\n return frames_iter_no_tty(socket)\n\n\ndef frames_iter_no_tty(socket):\n \"\"\"\n Returns a generator of data read from the socket when the tty setting is\n not enabled.\n \"\"\"\n while True:\n (stream, n) = next_frame_header(socket)\n if n < 0:\n break\n while n > 0:\n result = read(socket, n)\n if result is None:\n continue\n data_length = len(result)\n if data_length == 0:\n # We have reached EOF\n return\n n -= data_length\n yield (stream, result)\n\n\ndef frames_iter_tty(socket):\n \"\"\"\n Return a generator of data read from the socket when the tty setting is\n enabled.\n \"\"\"\n while True:\n result = read(socket)\n if len(result) == 0:\n # We have reached EOF\n return\n yield result\n\n\ndef consume_socket_output(frames, demux=False):\n \"\"\"\n Iterate through frames read from the socket and return the result.\n\n Args:\n\n demux (bool):\n If False, stdout and stderr are multiplexed, and the result is the\n concatenation of all the frames. If True, the streams are\n demultiplexed, and the result is a 2-tuple where each item is the\n concatenation of frames belonging to the same stream.\n \"\"\"\n if demux is False:\n # If the streams are multiplexed, the generator returns strings, that\n # we just need to concatenate.\n return bytes().join(frames)\n\n # If the streams are demultiplexed, the generator yields tuples\n # (stdout, stderr)\n out = [None, None]\n for frame in frames:\n # It is guaranteed that for each frame, one and only one stream\n # is not None.\n assert frame != (None, None)\n if frame[0] is not None:\n if out[0] is None:\n out[0] = frame[0]\n else:\n out[0] += frame[0]\n else:\n if out[1] is None:\n out[1] = frame[1]\n else:\n out[1] += frame[1]\n return tuple(out)\n\n\ndef demux_adaptor(stream_id, data):\n \"\"\"\n Utility to demultiplex stdout and stderr when reading frames from the\n socket.\n \"\"\"\n if stream_id == STDOUT:\n return (data, None)\n elif stream_id == STDERR:\n return (None, data)\n else:\n raise ValueError(f'{stream_id} is not a valid stream')\n", "path": "docker/utils/socket.py"}], "after_files": [{"content": "import errno\nimport os\nimport select\nimport socket as pysocket\nimport struct\nimport sys\n\ntry:\n from ..transport import NpipeSocket\nexcept ImportError:\n NpipeSocket = type(None)\n\n\nSTDOUT = 1\nSTDERR = 2\n\n\nclass SocketError(Exception):\n pass\n\n\n# NpipeSockets have their own error types\n# pywintypes.error: (109, 'ReadFile', 'The pipe has been ended.')\nNPIPE_ENDED = 109\n\n\ndef read(socket, n=4096):\n \"\"\"\n Reads at most n bytes from socket\n \"\"\"\n\n recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)\n\n if not isinstance(socket, NpipeSocket):\n if sys.platform == 'win32':\n # Limited to 1024\n select.select([socket], [], [])\n else:\n poll = select.poll()\n poll.register(socket)\n poll.poll()\n\n try:\n if hasattr(socket, 'recv'):\n return socket.recv(n)\n if isinstance(socket, getattr(pysocket, 'SocketIO')):\n return socket.read(n)\n return os.read(socket.fileno(), n)\n except OSError as e:\n if e.errno not in recoverable_errors:\n raise\n except Exception as e:\n is_pipe_ended = (isinstance(socket, NpipeSocket) and\n len(e.args) > 0 and\n e.args[0] == NPIPE_ENDED)\n if is_pipe_ended:\n # npipes don't support duplex sockets, so we interpret\n # a PIPE_ENDED error as a close operation (0-length read).\n return ''\n raise\n\n\ndef read_exactly(socket, n):\n \"\"\"\n Reads exactly n bytes from socket\n Raises SocketError if there isn't enough data\n \"\"\"\n data = bytes()\n while len(data) < n:\n next_data = read(socket, n - len(data))\n if not next_data:\n raise SocketError(\"Unexpected EOF\")\n data += next_data\n return data\n\n\ndef next_frame_header(socket):\n \"\"\"\n Returns the stream and size of the next frame of data waiting to be read\n from socket, according to the protocol defined here:\n\n https://docs.docker.com/engine/api/v1.24/#attach-to-a-container\n \"\"\"\n try:\n data = read_exactly(socket, 8)\n except SocketError:\n return (-1, -1)\n\n stream, actual = struct.unpack('>BxxxL', data)\n return (stream, actual)\n\n\ndef frames_iter(socket, tty):\n \"\"\"\n Return a generator of frames read from socket. A frame is a tuple where\n the first item is the stream number and the second item is a chunk of data.\n\n If the tty setting is enabled, the streams are multiplexed into the stdout\n stream.\n \"\"\"\n if tty:\n return ((STDOUT, frame) for frame in frames_iter_tty(socket))\n else:\n return frames_iter_no_tty(socket)\n\n\ndef frames_iter_no_tty(socket):\n \"\"\"\n Returns a generator of data read from the socket when the tty setting is\n not enabled.\n \"\"\"\n while True:\n (stream, n) = next_frame_header(socket)\n if n < 0:\n break\n while n > 0:\n result = read(socket, n)\n if result is None:\n continue\n data_length = len(result)\n if data_length == 0:\n # We have reached EOF\n return\n n -= data_length\n yield (stream, result)\n\n\ndef frames_iter_tty(socket):\n \"\"\"\n Return a generator of data read from the socket when the tty setting is\n enabled.\n \"\"\"\n while True:\n result = read(socket)\n if len(result) == 0:\n # We have reached EOF\n return\n yield result\n\n\ndef consume_socket_output(frames, demux=False):\n \"\"\"\n Iterate through frames read from the socket and return the result.\n\n Args:\n\n demux (bool):\n If False, stdout and stderr are multiplexed, and the result is the\n concatenation of all the frames. If True, the streams are\n demultiplexed, and the result is a 2-tuple where each item is the\n concatenation of frames belonging to the same stream.\n \"\"\"\n if demux is False:\n # If the streams are multiplexed, the generator returns strings, that\n # we just need to concatenate.\n return bytes().join(frames)\n\n # If the streams are demultiplexed, the generator yields tuples\n # (stdout, stderr)\n out = [None, None]\n for frame in frames:\n # It is guaranteed that for each frame, one and only one stream\n # is not None.\n assert frame != (None, None)\n if frame[0] is not None:\n if out[0] is None:\n out[0] = frame[0]\n else:\n out[0] += frame[0]\n else:\n if out[1] is None:\n out[1] = frame[1]\n else:\n out[1] += frame[1]\n return tuple(out)\n\n\ndef demux_adaptor(stream_id, data):\n \"\"\"\n Utility to demultiplex stdout and stderr when reading frames from the\n socket.\n \"\"\"\n if stream_id == STDOUT:\n return (data, None)\n elif stream_id == STDERR:\n return (None, data)\n else:\n raise ValueError(f'{stream_id} is not a valid stream')\n", "path": "docker/utils/socket.py"}]}
| 2,416 | 186 |
gh_patches_debug_33201
|
rasdani/github-patches
|
git_diff
|
pex-tool__pex-128
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
It's possible to build a pex with multiple versions of the same package
``` bash
$ git clone https://github.com/sphinx-doc/sphinx && cd sphinx
$ pex . -v
snowballstemmer 1.2.0
MarkupSafe 0.23
Jinja2 2.8
six 1.9.0
Babel 2.0
Sphinx 1.3.1
pytz 2015.4
alabaster 0.7.6
Pygments 2.0.2
sphinx-rtd-theme 0.1.8
Sphinx 1.4a0.dev20150811
docutils 0.12
```
Note the two copies of sphinx pulled into the environment. This messes with things like PEX_SCRIPT which expects to see just one version of an entry point.
Support PathResource that comes from egg
I was seeing failures when creating an entry point to a script that was included in an egg. I'm not super familiar with the pex internals, but it seemed to only happen when the pex was actually executing (IE it found the script during the build process but not during execution).
My guess is that the eggs get converted to a PathResource at some point and since the PathResource is treated the same as a WheelResource it fails on paths that came from eggs because the scripts directory is in a different location.
My fix was to try both finding scripts from an egg location and from a wheel location when a PathResource is found. I'm happy to add/update tests if you can point me to the relevant ones.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/variables.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 # Due to the PEX_ properties, disable checkstyle.
5 # checkstyle: noqa
6
7 import os
8 from contextlib import contextmanager
9
10 from .common import die
11
12 __all__ = ('ENV', 'Variables')
13
14
15 class Variables(object):
16 """Environment variables supported by the PEX runtime."""
17
18 @classmethod
19 def process_pydoc(cls, pydoc):
20 if pydoc is None:
21 return 'Unknown', 'Unknown'
22 pydoc = pydoc.splitlines()
23 variable_type = pydoc[0]
24 variable_text = ' '.join(filter(None, (line.strip() for line in pydoc[2:])))
25 return variable_type, variable_text
26
27 @classmethod
28 def iter_help(cls):
29 for variable_name, value in sorted(cls.__dict__.items()):
30 if not variable_name.startswith('PEX_'):
31 continue
32 variable_type, variable_text = cls.process_pydoc(getattr(value, '__doc__'))
33 yield variable_name, variable_type, variable_text
34
35 def __init__(self, environ=None, use_defaults=True):
36 self._environ = environ.copy() if environ is not None else os.environ
37 self._use_defaults = use_defaults
38
39 def copy(self):
40 return self._environ.copy()
41
42 def delete(self, variable):
43 self._environ.pop(variable, None)
44
45 def set(self, variable, value):
46 self._environ[variable] = str(value)
47
48 def _defaulted(self, default):
49 return default if self._use_defaults else None
50
51 def _get_bool(self, variable, default=False):
52 value = self._environ.get(variable)
53 if value is not None:
54 if value.lower() in ('0', 'false'):
55 return False
56 elif value.lower() in ('1', 'true'):
57 return True
58 else:
59 die('Invalid value for %s, must be 0/1/false/true, got %r' % (variable, value))
60 else:
61 return self._defaulted(default)
62
63 def _get_string(self, variable, default=None):
64 return self._environ.get(variable, self._defaulted(default))
65
66 def _get_path(self, variable, default=None):
67 value = self._get_string(variable, default=default)
68 if value is not None:
69 return os.path.realpath(os.path.expanduser(value))
70
71 def _get_int(self, variable, default=None):
72 try:
73 return int(self._environ[variable])
74 except ValueError:
75 die('Invalid value for %s, must be an integer, got %r' % (variable, self._environ[variable]))
76 except KeyError:
77 return self._defaulted(default)
78
79 def strip_defaults(self):
80 """Returns a copy of these variables but with defaults stripped.
81
82 Any variables not explicitly set in the environment will have a value of `None`.
83 """
84 return Variables(environ=self.copy(), use_defaults=False)
85
86 @contextmanager
87 def patch(self, **kw):
88 """Update the environment for the duration of a context."""
89 old_environ = self._environ
90 self._environ = self._environ.copy()
91 self._environ.update(kw)
92 yield
93 self._environ = old_environ
94
95 @property
96 def PEX_ALWAYS_CACHE(self):
97 """Boolean
98
99 Always write PEX dependencies to disk prior to invoking regardless whether or not the
100 dependencies are zip-safe. For certain dependencies that are very large such as numpy, this
101 can reduce the RAM necessary to launch the PEX. The data will be written into $PEX_ROOT,
102 which by default is $HOME/.pex. Default: false.
103 """
104 return self._get_bool('PEX_ALWAYS_CACHE', default=False)
105
106 @property
107 def PEX_COVERAGE(self):
108 """Boolean
109
110 Enable coverage reporting for this PEX file. This requires that the "coverage" module is
111 available in the PEX environment. Default: false.
112 """
113 return self._get_bool('PEX_COVERAGE', default=False)
114
115 @property
116 def PEX_COVERAGE_FILENAME(self):
117 """Filename
118
119 Write the coverage data to the specified filename. If PEX_COVERAGE_FILENAME is not specified
120 but PEX_COVERAGE is, coverage information will be printed to stdout and not saved.
121 """
122 return self._get_path('PEX_COVERAGE_FILENAME', default=None)
123
124 @property
125 def PEX_FORCE_LOCAL(self):
126 """Boolean
127
128 Force this PEX to be not-zip-safe. This forces all code and dependencies to be written into
129 $PEX_ROOT prior to invocation. This is an option for applications with static assets that
130 refer to paths relative to __file__ instead of using pkgutil/pkg_resources. Default: false.
131 """
132 return self._get_bool('PEX_FORCE_LOCAL', default=False)
133
134 @property
135 def PEX_IGNORE_ERRORS(self):
136 """Boolean
137
138 Ignore any errors resolving dependencies when invoking the PEX file. This can be useful if you
139 know that a particular failing dependency is not necessary to run the application. Default:
140 false.
141 """
142 return self._get_bool('PEX_IGNORE_ERRORS', default=False)
143
144 @property
145 def PEX_INHERIT_PATH(self):
146 """Boolean
147
148 Allow inheriting packages from site-packages. By default, PEX scrubs any packages and
149 namespace packages from sys.path prior to invoking the application. This is generally not
150 advised, but can be used in situations when certain dependencies do not conform to standard
151 packaging practices and thus cannot be bundled into PEX files. Default: false.
152 """
153 return self._get_bool('PEX_INHERIT_PATH', default=False)
154
155 @property
156 def PEX_INTERPRETER(self):
157 """Boolean
158
159 Drop into a REPL instead of invoking the predefined entry point of this PEX. This can be
160 useful for inspecting the PEX environment interactively. It can also be used to treat the PEX
161 file as an interpreter in order to execute other scripts in the context of the PEX file, e.g.
162 "PEX_INTERPRETER=1 ./app.pex my_script.py". Equivalent to setting PEX_MODULE to empty.
163 Default: false.
164 """
165 return self._get_bool('PEX_INTERPRETER', default=False)
166
167 @property
168 def PEX_MODULE(self):
169 """String
170
171 Override the entry point into the PEX file. Can either be a module, e.g. 'SimpleHTTPServer',
172 or a specific entry point in module:symbol form, e.g. "myapp.bin:main".
173 """
174 return self._get_string('PEX_MODULE', default=None)
175
176 @property
177 def PEX_PROFILE(self):
178 """Boolean
179
180 Enable application profiling. If specified and PEX_PROFILE_FILENAME is not specified, PEX will
181 print profiling information to stdout.
182 """
183 return self._get_path('PEX_PROFILE', default=None)
184
185 @property
186 def PEX_PROFILE_FILENAME(self):
187 """Filename
188
189 Profile the application and dump a profile into the specified filename in the standard
190 "profile" module format.
191 """
192 return self._get_path('PEX_PROFILE_FILENAME', default=None)
193
194 @property
195 def PEX_PROFILE_SORT(self):
196 """String
197
198 Toggle the profile sorting algorithm used to print out profile columns. Default:
199 'cumulative'.
200 """
201 return self._get_string('PEX_PROFILE_SORT', default='cumulative')
202
203 @property
204 def PEX_PYTHON(self):
205 """String
206
207 Override the Python interpreter used to invoke this PEX. Can be either an absolute path to an
208 interpreter or a base name e.g. "python3.3". If a base name is provided, the $PATH will be
209 searched for an appropriate match.
210 """
211 return self._get_string('PEX_PYTHON', default=None)
212
213 @property
214 def PEX_ROOT(self):
215 """Directory
216
217 The directory location for PEX to cache any dependencies and code. PEX must write
218 not-zip-safe eggs and all wheels to disk in order to activate them. Default: ~/.pex
219 """
220 return self._get_path('PEX_ROOT', default=None)
221
222 @property
223 def PEX_PATH(self):
224 """A set of one or more PEX files
225
226 Merge the packages from other PEX files into the current environment. This allows you to
227 do things such as create a PEX file containing the "coverage" module or create PEX files
228 containing plugin entry points to be consumed by a main application. Paths should be
229 specified in the same manner as $PATH, e.g. PEX_PATH=/path/to/pex1.pex:/path/to/pex2.pex
230 and so forth.
231 """
232 return self._get_string('PEX_PATH', default='')
233
234 @property
235 def PEX_SCRIPT(self):
236 """String
237
238 The script name within the PEX environment to execute. This must either be an entry point as
239 defined in a distribution's console_scripts, or a script as defined in a distribution's
240 scripts section. While Python supports any script including shell scripts, PEX only supports
241 invocation of Python scripts in this fashion.
242 """
243 return self._get_string('PEX_SCRIPT', default=None)
244
245 @property
246 def PEX_TEARDOWN_VERBOSE(self):
247 """Boolean
248
249 Enable verbosity for when the interpreter shuts down. This is mostly only useful for
250 debugging PEX itself. Default: false.
251 """
252 return self._get_bool('PEX_TEARDOWN_VERBOSE', default=False)
253
254 @property
255 def PEX_VERBOSE(self):
256 """Integer
257
258 Set the verbosity level of PEX debug logging. The higher the number, the more logging, with 0
259 being disabled. This environment variable can be extremely useful in debugging PEX
260 environment issues. Default: 0
261 """
262 return self._get_int('PEX_VERBOSE', default=0)
263
264 # TODO(wickman) Remove and push into --flags. #94
265 @property
266 def PEX_HTTP_RETRIES(self):
267 """Integer
268
269 The number of HTTP retries when performing dependency resolution when building a PEX file.
270 Default: 5.
271 """
272 return self._get_int('PEX_HTTP_RETRIES', default=5)
273
274
275 # Global singleton environment
276 ENV = Variables()
277
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pex/variables.py b/pex/variables.py
--- a/pex/variables.py
+++ b/pex/variables.py
@@ -5,6 +5,7 @@
# checkstyle: noqa
import os
+import sys
from contextlib import contextmanager
from .common import die
@@ -32,9 +33,13 @@
variable_type, variable_text = cls.process_pydoc(getattr(value, '__doc__'))
yield variable_name, variable_type, variable_text
- def __init__(self, environ=None, use_defaults=True):
- self._environ = environ.copy() if environ is not None else os.environ
+ def __init__(self, environ=None, rc='~/.pexrc', use_defaults=True):
self._use_defaults = use_defaults
+ self._environ = environ.copy() if environ else os.environ
+ if not self.PEX_IGNORE_RCFILES:
+ rc_values = self._from_rc(rc).copy()
+ rc_values.update(self._environ)
+ self._environ = rc_values
def copy(self):
return self._environ.copy()
@@ -45,6 +50,22 @@
def set(self, variable, value):
self._environ[variable] = str(value)
+ def _from_rc(self, rc):
+ ret_vars = {}
+ for filename in [rc, os.path.join(os.path.dirname(sys.argv[0]), '.pexrc')]:
+ try:
+ with open(os.path.expanduser(filename)) as fh:
+ rc_items = map(self._get_kv, fh)
+ ret_vars.update(dict(filter(None, rc_items)))
+ except IOError:
+ continue
+ return ret_vars
+
+ def _get_kv(self, variable):
+ kv = variable.strip().split('=')
+ if len(list(filter(None, kv))) == 2:
+ return kv
+
def _defaulted(self, default):
return default if self._use_defaults else None
@@ -271,6 +292,14 @@
"""
return self._get_int('PEX_HTTP_RETRIES', default=5)
+ @property
+ def PEX_IGNORE_RCFILES(self):
+ """Boolean
+
+ Explicitly disable the reading/parsing of pexrc files (~/.pexrc). Default: false.
+ """
+ return self._get_bool('PEX_IGNORE_RCFILES', default=False)
+
# Global singleton environment
ENV = Variables()
|
{"golden_diff": "diff --git a/pex/variables.py b/pex/variables.py\n--- a/pex/variables.py\n+++ b/pex/variables.py\n@@ -5,6 +5,7 @@\n # checkstyle: noqa\n \n import os\n+import sys\n from contextlib import contextmanager\n \n from .common import die\n@@ -32,9 +33,13 @@\n variable_type, variable_text = cls.process_pydoc(getattr(value, '__doc__'))\n yield variable_name, variable_type, variable_text\n \n- def __init__(self, environ=None, use_defaults=True):\n- self._environ = environ.copy() if environ is not None else os.environ\n+ def __init__(self, environ=None, rc='~/.pexrc', use_defaults=True):\n self._use_defaults = use_defaults\n+ self._environ = environ.copy() if environ else os.environ\n+ if not self.PEX_IGNORE_RCFILES:\n+ rc_values = self._from_rc(rc).copy()\n+ rc_values.update(self._environ)\n+ self._environ = rc_values\n \n def copy(self):\n return self._environ.copy()\n@@ -45,6 +50,22 @@\n def set(self, variable, value):\n self._environ[variable] = str(value)\n \n+ def _from_rc(self, rc):\n+ ret_vars = {}\n+ for filename in [rc, os.path.join(os.path.dirname(sys.argv[0]), '.pexrc')]:\n+ try:\n+ with open(os.path.expanduser(filename)) as fh:\n+ rc_items = map(self._get_kv, fh)\n+ ret_vars.update(dict(filter(None, rc_items)))\n+ except IOError:\n+ continue\n+ return ret_vars\n+\n+ def _get_kv(self, variable):\n+ kv = variable.strip().split('=')\n+ if len(list(filter(None, kv))) == 2:\n+ return kv\n+\n def _defaulted(self, default):\n return default if self._use_defaults else None\n \n@@ -271,6 +292,14 @@\n \"\"\"\n return self._get_int('PEX_HTTP_RETRIES', default=5)\n \n+ @property\n+ def PEX_IGNORE_RCFILES(self):\n+ \"\"\"Boolean\n+\n+ Explicitly disable the reading/parsing of pexrc files (~/.pexrc). Default: false.\n+ \"\"\"\n+ return self._get_bool('PEX_IGNORE_RCFILES', default=False)\n+\n \n # Global singleton environment\n ENV = Variables()\n", "issue": "It's possible to build a pex with multiple versions of the same package\n``` bash\n\n$ git clone https://github.com/sphinx-doc/sphinx && cd sphinx\n$ pex . -v\n snowballstemmer 1.2.0\n MarkupSafe 0.23\n Jinja2 2.8\n six 1.9.0\n Babel 2.0\n Sphinx 1.3.1\n pytz 2015.4\n alabaster 0.7.6\n Pygments 2.0.2\n sphinx-rtd-theme 0.1.8\n Sphinx 1.4a0.dev20150811\n docutils 0.12\n```\n\nNote the two copies of sphinx pulled into the environment. This messes with things like PEX_SCRIPT which expects to see just one version of an entry point.\n\nSupport PathResource that comes from egg\nI was seeing failures when creating an entry point to a script that was included in an egg. I'm not super familiar with the pex internals, but it seemed to only happen when the pex was actually executing (IE it found the script during the build process but not during execution).\n\nMy guess is that the eggs get converted to a PathResource at some point and since the PathResource is treated the same as a WheelResource it fails on paths that came from eggs because the scripts directory is in a different location.\n\nMy fix was to try both finding scripts from an egg location and from a wheel location when a PathResource is found. I'm happy to add/update tests if you can point me to the relevant ones.\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n# Due to the PEX_ properties, disable checkstyle.\n# checkstyle: noqa\n\nimport os\nfrom contextlib import contextmanager\n\nfrom .common import die\n\n__all__ = ('ENV', 'Variables')\n\n\nclass Variables(object):\n \"\"\"Environment variables supported by the PEX runtime.\"\"\"\n\n @classmethod\n def process_pydoc(cls, pydoc):\n if pydoc is None:\n return 'Unknown', 'Unknown'\n pydoc = pydoc.splitlines()\n variable_type = pydoc[0]\n variable_text = ' '.join(filter(None, (line.strip() for line in pydoc[2:])))\n return variable_type, variable_text\n\n @classmethod\n def iter_help(cls):\n for variable_name, value in sorted(cls.__dict__.items()):\n if not variable_name.startswith('PEX_'):\n continue\n variable_type, variable_text = cls.process_pydoc(getattr(value, '__doc__'))\n yield variable_name, variable_type, variable_text\n\n def __init__(self, environ=None, use_defaults=True):\n self._environ = environ.copy() if environ is not None else os.environ\n self._use_defaults = use_defaults\n\n def copy(self):\n return self._environ.copy()\n\n def delete(self, variable):\n self._environ.pop(variable, None)\n\n def set(self, variable, value):\n self._environ[variable] = str(value)\n\n def _defaulted(self, default):\n return default if self._use_defaults else None\n\n def _get_bool(self, variable, default=False):\n value = self._environ.get(variable)\n if value is not None:\n if value.lower() in ('0', 'false'):\n return False\n elif value.lower() in ('1', 'true'):\n return True\n else:\n die('Invalid value for %s, must be 0/1/false/true, got %r' % (variable, value))\n else:\n return self._defaulted(default)\n\n def _get_string(self, variable, default=None):\n return self._environ.get(variable, self._defaulted(default))\n\n def _get_path(self, variable, default=None):\n value = self._get_string(variable, default=default)\n if value is not None:\n return os.path.realpath(os.path.expanduser(value))\n\n def _get_int(self, variable, default=None):\n try:\n return int(self._environ[variable])\n except ValueError:\n die('Invalid value for %s, must be an integer, got %r' % (variable, self._environ[variable]))\n except KeyError:\n return self._defaulted(default)\n\n def strip_defaults(self):\n \"\"\"Returns a copy of these variables but with defaults stripped.\n\n Any variables not explicitly set in the environment will have a value of `None`.\n \"\"\"\n return Variables(environ=self.copy(), use_defaults=False)\n\n @contextmanager\n def patch(self, **kw):\n \"\"\"Update the environment for the duration of a context.\"\"\"\n old_environ = self._environ\n self._environ = self._environ.copy()\n self._environ.update(kw)\n yield\n self._environ = old_environ\n\n @property\n def PEX_ALWAYS_CACHE(self):\n \"\"\"Boolean\n\n Always write PEX dependencies to disk prior to invoking regardless whether or not the\n dependencies are zip-safe. For certain dependencies that are very large such as numpy, this\n can reduce the RAM necessary to launch the PEX. The data will be written into $PEX_ROOT,\n which by default is $HOME/.pex. Default: false.\n \"\"\"\n return self._get_bool('PEX_ALWAYS_CACHE', default=False)\n\n @property\n def PEX_COVERAGE(self):\n \"\"\"Boolean\n\n Enable coverage reporting for this PEX file. This requires that the \"coverage\" module is\n available in the PEX environment. Default: false.\n \"\"\"\n return self._get_bool('PEX_COVERAGE', default=False)\n\n @property\n def PEX_COVERAGE_FILENAME(self):\n \"\"\"Filename\n\n Write the coverage data to the specified filename. If PEX_COVERAGE_FILENAME is not specified\n but PEX_COVERAGE is, coverage information will be printed to stdout and not saved.\n \"\"\"\n return self._get_path('PEX_COVERAGE_FILENAME', default=None)\n\n @property\n def PEX_FORCE_LOCAL(self):\n \"\"\"Boolean\n\n Force this PEX to be not-zip-safe. This forces all code and dependencies to be written into\n $PEX_ROOT prior to invocation. This is an option for applications with static assets that\n refer to paths relative to __file__ instead of using pkgutil/pkg_resources. Default: false.\n \"\"\"\n return self._get_bool('PEX_FORCE_LOCAL', default=False)\n\n @property\n def PEX_IGNORE_ERRORS(self):\n \"\"\"Boolean\n\n Ignore any errors resolving dependencies when invoking the PEX file. This can be useful if you\n know that a particular failing dependency is not necessary to run the application. Default:\n false.\n \"\"\"\n return self._get_bool('PEX_IGNORE_ERRORS', default=False)\n\n @property\n def PEX_INHERIT_PATH(self):\n \"\"\"Boolean\n\n Allow inheriting packages from site-packages. By default, PEX scrubs any packages and\n namespace packages from sys.path prior to invoking the application. This is generally not\n advised, but can be used in situations when certain dependencies do not conform to standard\n packaging practices and thus cannot be bundled into PEX files. Default: false.\n \"\"\"\n return self._get_bool('PEX_INHERIT_PATH', default=False)\n\n @property\n def PEX_INTERPRETER(self):\n \"\"\"Boolean\n\n Drop into a REPL instead of invoking the predefined entry point of this PEX. This can be\n useful for inspecting the PEX environment interactively. It can also be used to treat the PEX\n file as an interpreter in order to execute other scripts in the context of the PEX file, e.g.\n \"PEX_INTERPRETER=1 ./app.pex my_script.py\". Equivalent to setting PEX_MODULE to empty.\n Default: false.\n \"\"\"\n return self._get_bool('PEX_INTERPRETER', default=False)\n\n @property\n def PEX_MODULE(self):\n \"\"\"String\n\n Override the entry point into the PEX file. Can either be a module, e.g. 'SimpleHTTPServer',\n or a specific entry point in module:symbol form, e.g. \"myapp.bin:main\".\n \"\"\"\n return self._get_string('PEX_MODULE', default=None)\n\n @property\n def PEX_PROFILE(self):\n \"\"\"Boolean\n\n Enable application profiling. If specified and PEX_PROFILE_FILENAME is not specified, PEX will\n print profiling information to stdout.\n \"\"\"\n return self._get_path('PEX_PROFILE', default=None)\n\n @property\n def PEX_PROFILE_FILENAME(self):\n \"\"\"Filename\n\n Profile the application and dump a profile into the specified filename in the standard\n \"profile\" module format.\n \"\"\"\n return self._get_path('PEX_PROFILE_FILENAME', default=None)\n\n @property\n def PEX_PROFILE_SORT(self):\n \"\"\"String\n\n Toggle the profile sorting algorithm used to print out profile columns. Default:\n 'cumulative'.\n \"\"\"\n return self._get_string('PEX_PROFILE_SORT', default='cumulative')\n\n @property\n def PEX_PYTHON(self):\n \"\"\"String\n\n Override the Python interpreter used to invoke this PEX. Can be either an absolute path to an\n interpreter or a base name e.g. \"python3.3\". If a base name is provided, the $PATH will be\n searched for an appropriate match.\n \"\"\"\n return self._get_string('PEX_PYTHON', default=None)\n\n @property\n def PEX_ROOT(self):\n \"\"\"Directory\n\n The directory location for PEX to cache any dependencies and code. PEX must write\n not-zip-safe eggs and all wheels to disk in order to activate them. Default: ~/.pex\n \"\"\"\n return self._get_path('PEX_ROOT', default=None)\n\n @property\n def PEX_PATH(self):\n \"\"\"A set of one or more PEX files\n\n Merge the packages from other PEX files into the current environment. This allows you to\n do things such as create a PEX file containing the \"coverage\" module or create PEX files\n containing plugin entry points to be consumed by a main application. Paths should be\n specified in the same manner as $PATH, e.g. PEX_PATH=/path/to/pex1.pex:/path/to/pex2.pex\n and so forth.\n \"\"\"\n return self._get_string('PEX_PATH', default='')\n\n @property\n def PEX_SCRIPT(self):\n \"\"\"String\n\n The script name within the PEX environment to execute. This must either be an entry point as\n defined in a distribution's console_scripts, or a script as defined in a distribution's\n scripts section. While Python supports any script including shell scripts, PEX only supports\n invocation of Python scripts in this fashion.\n \"\"\"\n return self._get_string('PEX_SCRIPT', default=None)\n\n @property\n def PEX_TEARDOWN_VERBOSE(self):\n \"\"\"Boolean\n\n Enable verbosity for when the interpreter shuts down. This is mostly only useful for\n debugging PEX itself. Default: false.\n \"\"\"\n return self._get_bool('PEX_TEARDOWN_VERBOSE', default=False)\n\n @property\n def PEX_VERBOSE(self):\n \"\"\"Integer\n\n Set the verbosity level of PEX debug logging. The higher the number, the more logging, with 0\n being disabled. This environment variable can be extremely useful in debugging PEX\n environment issues. Default: 0\n \"\"\"\n return self._get_int('PEX_VERBOSE', default=0)\n\n # TODO(wickman) Remove and push into --flags. #94\n @property\n def PEX_HTTP_RETRIES(self):\n \"\"\"Integer\n\n The number of HTTP retries when performing dependency resolution when building a PEX file.\n Default: 5.\n \"\"\"\n return self._get_int('PEX_HTTP_RETRIES', default=5)\n\n\n# Global singleton environment\nENV = Variables()\n", "path": "pex/variables.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n# Due to the PEX_ properties, disable checkstyle.\n# checkstyle: noqa\n\nimport os\nimport sys\nfrom contextlib import contextmanager\n\nfrom .common import die\n\n__all__ = ('ENV', 'Variables')\n\n\nclass Variables(object):\n \"\"\"Environment variables supported by the PEX runtime.\"\"\"\n\n @classmethod\n def process_pydoc(cls, pydoc):\n if pydoc is None:\n return 'Unknown', 'Unknown'\n pydoc = pydoc.splitlines()\n variable_type = pydoc[0]\n variable_text = ' '.join(filter(None, (line.strip() for line in pydoc[2:])))\n return variable_type, variable_text\n\n @classmethod\n def iter_help(cls):\n for variable_name, value in sorted(cls.__dict__.items()):\n if not variable_name.startswith('PEX_'):\n continue\n variable_type, variable_text = cls.process_pydoc(getattr(value, '__doc__'))\n yield variable_name, variable_type, variable_text\n\n def __init__(self, environ=None, rc='~/.pexrc', use_defaults=True):\n self._use_defaults = use_defaults\n self._environ = environ.copy() if environ else os.environ\n if not self.PEX_IGNORE_RCFILES:\n rc_values = self._from_rc(rc).copy()\n rc_values.update(self._environ)\n self._environ = rc_values\n\n def copy(self):\n return self._environ.copy()\n\n def delete(self, variable):\n self._environ.pop(variable, None)\n\n def set(self, variable, value):\n self._environ[variable] = str(value)\n\n def _from_rc(self, rc):\n ret_vars = {}\n for filename in [rc, os.path.join(os.path.dirname(sys.argv[0]), '.pexrc')]:\n try:\n with open(os.path.expanduser(filename)) as fh:\n rc_items = map(self._get_kv, fh)\n ret_vars.update(dict(filter(None, rc_items)))\n except IOError:\n continue\n return ret_vars\n\n def _get_kv(self, variable):\n kv = variable.strip().split('=')\n if len(list(filter(None, kv))) == 2:\n return kv\n\n def _defaulted(self, default):\n return default if self._use_defaults else None\n\n def _get_bool(self, variable, default=False):\n value = self._environ.get(variable)\n if value is not None:\n if value.lower() in ('0', 'false'):\n return False\n elif value.lower() in ('1', 'true'):\n return True\n else:\n die('Invalid value for %s, must be 0/1/false/true, got %r' % (variable, value))\n else:\n return self._defaulted(default)\n\n def _get_string(self, variable, default=None):\n return self._environ.get(variable, self._defaulted(default))\n\n def _get_path(self, variable, default=None):\n value = self._get_string(variable, default=default)\n if value is not None:\n return os.path.realpath(os.path.expanduser(value))\n\n def _get_int(self, variable, default=None):\n try:\n return int(self._environ[variable])\n except ValueError:\n die('Invalid value for %s, must be an integer, got %r' % (variable, self._environ[variable]))\n except KeyError:\n return self._defaulted(default)\n\n def strip_defaults(self):\n \"\"\"Returns a copy of these variables but with defaults stripped.\n\n Any variables not explicitly set in the environment will have a value of `None`.\n \"\"\"\n return Variables(environ=self.copy(), use_defaults=False)\n\n @contextmanager\n def patch(self, **kw):\n \"\"\"Update the environment for the duration of a context.\"\"\"\n old_environ = self._environ\n self._environ = self._environ.copy()\n self._environ.update(kw)\n yield\n self._environ = old_environ\n\n @property\n def PEX_ALWAYS_CACHE(self):\n \"\"\"Boolean\n\n Always write PEX dependencies to disk prior to invoking regardless whether or not the\n dependencies are zip-safe. For certain dependencies that are very large such as numpy, this\n can reduce the RAM necessary to launch the PEX. The data will be written into $PEX_ROOT,\n which by default is $HOME/.pex. Default: false.\n \"\"\"\n return self._get_bool('PEX_ALWAYS_CACHE', default=False)\n\n @property\n def PEX_COVERAGE(self):\n \"\"\"Boolean\n\n Enable coverage reporting for this PEX file. This requires that the \"coverage\" module is\n available in the PEX environment. Default: false.\n \"\"\"\n return self._get_bool('PEX_COVERAGE', default=False)\n\n @property\n def PEX_COVERAGE_FILENAME(self):\n \"\"\"Filename\n\n Write the coverage data to the specified filename. If PEX_COVERAGE_FILENAME is not specified\n but PEX_COVERAGE is, coverage information will be printed to stdout and not saved.\n \"\"\"\n return self._get_path('PEX_COVERAGE_FILENAME', default=None)\n\n @property\n def PEX_FORCE_LOCAL(self):\n \"\"\"Boolean\n\n Force this PEX to be not-zip-safe. This forces all code and dependencies to be written into\n $PEX_ROOT prior to invocation. This is an option for applications with static assets that\n refer to paths relative to __file__ instead of using pkgutil/pkg_resources. Default: false.\n \"\"\"\n return self._get_bool('PEX_FORCE_LOCAL', default=False)\n\n @property\n def PEX_IGNORE_ERRORS(self):\n \"\"\"Boolean\n\n Ignore any errors resolving dependencies when invoking the PEX file. This can be useful if you\n know that a particular failing dependency is not necessary to run the application. Default:\n false.\n \"\"\"\n return self._get_bool('PEX_IGNORE_ERRORS', default=False)\n\n @property\n def PEX_INHERIT_PATH(self):\n \"\"\"Boolean\n\n Allow inheriting packages from site-packages. By default, PEX scrubs any packages and\n namespace packages from sys.path prior to invoking the application. This is generally not\n advised, but can be used in situations when certain dependencies do not conform to standard\n packaging practices and thus cannot be bundled into PEX files. Default: false.\n \"\"\"\n return self._get_bool('PEX_INHERIT_PATH', default=False)\n\n @property\n def PEX_INTERPRETER(self):\n \"\"\"Boolean\n\n Drop into a REPL instead of invoking the predefined entry point of this PEX. This can be\n useful for inspecting the PEX environment interactively. It can also be used to treat the PEX\n file as an interpreter in order to execute other scripts in the context of the PEX file, e.g.\n \"PEX_INTERPRETER=1 ./app.pex my_script.py\". Equivalent to setting PEX_MODULE to empty.\n Default: false.\n \"\"\"\n return self._get_bool('PEX_INTERPRETER', default=False)\n\n @property\n def PEX_MODULE(self):\n \"\"\"String\n\n Override the entry point into the PEX file. Can either be a module, e.g. 'SimpleHTTPServer',\n or a specific entry point in module:symbol form, e.g. \"myapp.bin:main\".\n \"\"\"\n return self._get_string('PEX_MODULE', default=None)\n\n @property\n def PEX_PROFILE(self):\n \"\"\"Boolean\n\n Enable application profiling. If specified and PEX_PROFILE_FILENAME is not specified, PEX will\n print profiling information to stdout.\n \"\"\"\n return self._get_path('PEX_PROFILE', default=None)\n\n @property\n def PEX_PROFILE_FILENAME(self):\n \"\"\"Filename\n\n Profile the application and dump a profile into the specified filename in the standard\n \"profile\" module format.\n \"\"\"\n return self._get_path('PEX_PROFILE_FILENAME', default=None)\n\n @property\n def PEX_PROFILE_SORT(self):\n \"\"\"String\n\n Toggle the profile sorting algorithm used to print out profile columns. Default:\n 'cumulative'.\n \"\"\"\n return self._get_string('PEX_PROFILE_SORT', default='cumulative')\n\n @property\n def PEX_PYTHON(self):\n \"\"\"String\n\n Override the Python interpreter used to invoke this PEX. Can be either an absolute path to an\n interpreter or a base name e.g. \"python3.3\". If a base name is provided, the $PATH will be\n searched for an appropriate match.\n \"\"\"\n return self._get_string('PEX_PYTHON', default=None)\n\n @property\n def PEX_ROOT(self):\n \"\"\"Directory\n\n The directory location for PEX to cache any dependencies and code. PEX must write\n not-zip-safe eggs and all wheels to disk in order to activate them. Default: ~/.pex\n \"\"\"\n return self._get_path('PEX_ROOT', default=None)\n\n @property\n def PEX_PATH(self):\n \"\"\"A set of one or more PEX files\n\n Merge the packages from other PEX files into the current environment. This allows you to\n do things such as create a PEX file containing the \"coverage\" module or create PEX files\n containing plugin entry points to be consumed by a main application. Paths should be\n specified in the same manner as $PATH, e.g. PEX_PATH=/path/to/pex1.pex:/path/to/pex2.pex\n and so forth.\n \"\"\"\n return self._get_string('PEX_PATH', default='')\n\n @property\n def PEX_SCRIPT(self):\n \"\"\"String\n\n The script name within the PEX environment to execute. This must either be an entry point as\n defined in a distribution's console_scripts, or a script as defined in a distribution's\n scripts section. While Python supports any script including shell scripts, PEX only supports\n invocation of Python scripts in this fashion.\n \"\"\"\n return self._get_string('PEX_SCRIPT', default=None)\n\n @property\n def PEX_TEARDOWN_VERBOSE(self):\n \"\"\"Boolean\n\n Enable verbosity for when the interpreter shuts down. This is mostly only useful for\n debugging PEX itself. Default: false.\n \"\"\"\n return self._get_bool('PEX_TEARDOWN_VERBOSE', default=False)\n\n @property\n def PEX_VERBOSE(self):\n \"\"\"Integer\n\n Set the verbosity level of PEX debug logging. The higher the number, the more logging, with 0\n being disabled. This environment variable can be extremely useful in debugging PEX\n environment issues. Default: 0\n \"\"\"\n return self._get_int('PEX_VERBOSE', default=0)\n\n # TODO(wickman) Remove and push into --flags. #94\n @property\n def PEX_HTTP_RETRIES(self):\n \"\"\"Integer\n\n The number of HTTP retries when performing dependency resolution when building a PEX file.\n Default: 5.\n \"\"\"\n return self._get_int('PEX_HTTP_RETRIES', default=5)\n\n @property\n def PEX_IGNORE_RCFILES(self):\n \"\"\"Boolean\n\n Explicitly disable the reading/parsing of pexrc files (~/.pexrc). Default: false.\n \"\"\"\n return self._get_bool('PEX_IGNORE_RCFILES', default=False)\n\n\n# Global singleton environment\nENV = Variables()\n", "path": "pex/variables.py"}]}
| 3,675 | 555 |
gh_patches_debug_35961
|
rasdani/github-patches
|
git_diff
|
ESMCI__cime-2518
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
notification options not preserved on resubmit
... got emails regarding a job I submitted via case.submit, had RESUBMIT=1, and I didn't get emails from the resubmission.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/lib/CIME/case/case_submit.py`
Content:
```
1 #!/usr/bin/env python
2
3 """
4 case.submit - Submit a cesm workflow to the queueing system or run it
5 if there is no queueing system. A cesm workflow may include multiple
6 jobs.
7 submit, check_case and check_da_settings are members of class Case in file case.py
8 """
9 import socket
10 from CIME.XML.standard_module_setup import *
11 from CIME.utils import expect, run_and_log_case_status, verbatim_success_msg
12 from CIME.locked_files import unlock_file, lock_file
13 from CIME.test_status import *
14
15 logger = logging.getLogger(__name__)
16
17 def _submit(case, job=None, no_batch=False, prereq=None, resubmit=False,
18 skip_pnl=False, mail_user=None, mail_type=None, batch_args=None):
19 if job is None:
20 job = case.get_primary_job()
21
22 rundir = case.get_value("RUNDIR")
23 if job != "case.test":
24 continue_run = case.get_value("CONTINUE_RUN")
25 expect(os.path.isdir(rundir) or not continue_run,
26 " CONTINUE_RUN is true but RUNDIR {} does not exist".format(rundir))
27
28 # if case.submit is called with the no_batch flag then we assume that this
29 # flag will stay in effect for the duration of the RESUBMITs
30 env_batch = case.get_env("batch")
31 if resubmit:
32 if env_batch.get_batch_system_type() == "none":
33 no_batch = True
34
35 # This is a resubmission, do not reinitialize test values
36 if job == "case.test":
37 case.set_value("IS_FIRST_RUN", False)
38
39 resub = case.get_value("RESUBMIT")
40 logger.info("Submitting job '{}', resubmit={:d}".format(job, resub))
41 case.set_value("RESUBMIT", resub-1)
42 if case.get_value("RESUBMIT_SETS_CONTINUE_RUN"):
43 case.set_value("CONTINUE_RUN", True)
44
45 else:
46 if job == "case.test":
47 case.set_value("IS_FIRST_RUN", True)
48
49 if no_batch:
50 batch_system = "none"
51 else:
52 batch_system = env_batch.get_batch_system_type()
53
54 case.set_value("BATCH_SYSTEM", batch_system)
55
56 env_batch_has_changed = False
57 try:
58 case.check_lockedfile(os.path.basename(env_batch.filename))
59 except SystemExit:
60 env_batch_has_changed = True
61
62 if env_batch.get_batch_system_type() != "none" and env_batch_has_changed:
63 # May need to regen batch files if user made batch setting changes (e.g. walltime, queue, etc)
64 logger.warning(\
65 """
66 env_batch.xml appears to have changed, regenerating batch scripts
67 manual edits to these file will be lost!
68 """)
69 env_batch.make_all_batch_files(case)
70
71 unlock_file(os.path.basename(env_batch.filename))
72 lock_file(os.path.basename(env_batch.filename))
73
74 if job == case.get_primary_job():
75 case.check_case()
76 case.check_DA_settings()
77 if case.get_value("MACH") == "mira":
78 with open(".original_host", "w") as fd:
79 fd.write( socket.gethostname())
80
81 #Load Modules
82 case.load_env()
83
84 case.flush()
85
86 logger.warning("submit_jobs {}".format(job))
87 job_ids = case.submit_jobs(no_batch=no_batch, job=job, skip_pnl=skip_pnl,
88 prereq=prereq, mail_user=mail_user,
89 mail_type=mail_type, batch_args=batch_args)
90
91 xml_jobids = []
92 for jobname, jobid in job_ids.items():
93 logger.info("Submitted job {} with id {}".format(jobname, jobid))
94 if jobid:
95 xml_jobids.append("{}:{}".format(jobname, jobid))
96
97 xml_jobid_text = ", ".join(xml_jobids)
98 if xml_jobid_text:
99 case.set_value("JOB_IDS", xml_jobid_text)
100
101 return xml_jobid_text
102
103 def submit(self, job=None, no_batch=False, prereq=None, resubmit=False,
104 skip_pnl=False, mail_user=None, mail_type=None, batch_args=None):
105 if self.get_value("TEST"):
106 caseroot = self.get_value("CASEROOT")
107 casebaseid = self.get_value("CASEBASEID")
108 # This should take care of the race condition where the submitted job
109 # begins immediately and tries to set RUN phase. We proactively assume
110 # a passed SUBMIT phase. If this state is already PASS, don't set it again
111 # because then we'll lose RUN phase info if it's there. This info is important
112 # for system_tests_common to know if it needs to reinitialize the test or not.
113 with TestStatus(test_dir=caseroot, test_name=casebaseid) as ts:
114 phase_status = ts.get_status(SUBMIT_PHASE)
115 if phase_status != TEST_PASS_STATUS:
116 ts.set_status(SUBMIT_PHASE, TEST_PASS_STATUS)
117
118 try:
119 functor = lambda: _submit(self, job=job, no_batch=no_batch, prereq=prereq,
120 resubmit=resubmit, skip_pnl=skip_pnl,
121 mail_user=mail_user, mail_type=mail_type,
122 batch_args=batch_args)
123 run_and_log_case_status(functor, "case.submit", caseroot=self.get_value("CASEROOT"),
124 custom_success_msg_functor=verbatim_success_msg)
125 except:
126 # If something failed in the batch system, make sure to mark
127 # the test as failed if we are running a test.
128 if self.get_value("TEST"):
129 with TestStatus(test_dir=caseroot, test_name=casebaseid) as ts:
130 ts.set_status(SUBMIT_PHASE, TEST_FAIL_STATUS)
131
132 raise
133
134 def check_case(self):
135 self.check_lockedfiles()
136 self.create_namelists() # Must be called before check_all_input_data
137 logger.info("Checking that inputdata is available as part of case submission")
138 self.check_all_input_data()
139
140 expect(self.get_value("BUILD_COMPLETE"), "Build complete is "
141 "not True please rebuild the model by calling case.build")
142 logger.info("Check case OK")
143
144 def check_DA_settings(self):
145 script = self.get_value("DATA_ASSIMILATION_SCRIPT")
146 cycles = self.get_value("DATA_ASSIMILATION_CYCLES")
147 if len(script) > 0 and os.path.isfile(script) and cycles > 0:
148 logger.info("Data Assimilation enabled using script {} with {:d} cycles".format(script,cycles))
149
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scripts/lib/CIME/case/case_submit.py b/scripts/lib/CIME/case/case_submit.py
--- a/scripts/lib/CIME/case/case_submit.py
+++ b/scripts/lib/CIME/case/case_submit.py
@@ -7,6 +7,7 @@
submit, check_case and check_da_settings are members of class Case in file case.py
"""
import socket
+from six.moves import configparser
from CIME.XML.standard_module_setup import *
from CIME.utils import expect, run_and_log_case_status, verbatim_success_msg
from CIME.locked_files import unlock_file, lock_file
@@ -115,12 +116,28 @@
if phase_status != TEST_PASS_STATUS:
ts.set_status(SUBMIT_PHASE, TEST_PASS_STATUS)
+ # If this is a resubmit check the hidden file .submit_options for
+ # any submit options used on the original submit and use them again
+ caseroot = self.get_value("CASEROOT")
+ submit_options = os.path.join(caseroot, ".submit_options")
+ if resubmit and os.path.exists(submit_options):
+ config = configparser.SafeConfigParser()
+ config.read(submit_options)
+ if not skip_pnl and config.has_option('SubmitOptions','skip_pnl'):
+ skip_pnl = config.getboolean('SubmitOptions', 'skip_pnl')
+ if mail_user is None and config.has_option('SubmitOptions', 'mail_user'):
+ mail_user = config.get('SubmitOptions', 'mail_user')
+ if mail_type is None and config.has_option('SubmitOptions', 'mail_type'):
+ mail_type = config.get('SubmitOptions', 'mail_type').split(',')
+ if batch_args is None and config.has_option('SubmitOptions', 'batch_args'):
+ batch_args = config.get('SubmitOptions', 'batch_args')
+
try:
functor = lambda: _submit(self, job=job, no_batch=no_batch, prereq=prereq,
resubmit=resubmit, skip_pnl=skip_pnl,
mail_user=mail_user, mail_type=mail_type,
batch_args=batch_args)
- run_and_log_case_status(functor, "case.submit", caseroot=self.get_value("CASEROOT"),
+ run_and_log_case_status(functor, "case.submit", caseroot=caseroot,
custom_success_msg_functor=verbatim_success_msg)
except:
# If something failed in the batch system, make sure to mark
|
{"golden_diff": "diff --git a/scripts/lib/CIME/case/case_submit.py b/scripts/lib/CIME/case/case_submit.py\n--- a/scripts/lib/CIME/case/case_submit.py\n+++ b/scripts/lib/CIME/case/case_submit.py\n@@ -7,6 +7,7 @@\n submit, check_case and check_da_settings are members of class Case in file case.py\n \"\"\"\n import socket\n+from six.moves import configparser\n from CIME.XML.standard_module_setup import *\n from CIME.utils import expect, run_and_log_case_status, verbatim_success_msg\n from CIME.locked_files import unlock_file, lock_file\n@@ -115,12 +116,28 @@\n if phase_status != TEST_PASS_STATUS:\n ts.set_status(SUBMIT_PHASE, TEST_PASS_STATUS)\n \n+ # If this is a resubmit check the hidden file .submit_options for\n+ # any submit options used on the original submit and use them again\n+ caseroot = self.get_value(\"CASEROOT\")\n+ submit_options = os.path.join(caseroot, \".submit_options\")\n+ if resubmit and os.path.exists(submit_options):\n+ config = configparser.SafeConfigParser()\n+ config.read(submit_options)\n+ if not skip_pnl and config.has_option('SubmitOptions','skip_pnl'):\n+ skip_pnl = config.getboolean('SubmitOptions', 'skip_pnl')\n+ if mail_user is None and config.has_option('SubmitOptions', 'mail_user'):\n+ mail_user = config.get('SubmitOptions', 'mail_user')\n+ if mail_type is None and config.has_option('SubmitOptions', 'mail_type'):\n+ mail_type = config.get('SubmitOptions', 'mail_type').split(',')\n+ if batch_args is None and config.has_option('SubmitOptions', 'batch_args'):\n+ batch_args = config.get('SubmitOptions', 'batch_args')\n+\n try:\n functor = lambda: _submit(self, job=job, no_batch=no_batch, prereq=prereq,\n resubmit=resubmit, skip_pnl=skip_pnl,\n mail_user=mail_user, mail_type=mail_type,\n batch_args=batch_args)\n- run_and_log_case_status(functor, \"case.submit\", caseroot=self.get_value(\"CASEROOT\"),\n+ run_and_log_case_status(functor, \"case.submit\", caseroot=caseroot,\n custom_success_msg_functor=verbatim_success_msg)\n except:\n # If something failed in the batch system, make sure to mark\n", "issue": "notification options not preserved on resubmit\n... got emails regarding a job I submitted via case.submit, had RESUBMIT=1, and I didn't get emails from the resubmission. \n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\ncase.submit - Submit a cesm workflow to the queueing system or run it\nif there is no queueing system. A cesm workflow may include multiple\njobs.\nsubmit, check_case and check_da_settings are members of class Case in file case.py\n\"\"\"\nimport socket\nfrom CIME.XML.standard_module_setup import *\nfrom CIME.utils import expect, run_and_log_case_status, verbatim_success_msg\nfrom CIME.locked_files import unlock_file, lock_file\nfrom CIME.test_status import *\n\nlogger = logging.getLogger(__name__)\n\ndef _submit(case, job=None, no_batch=False, prereq=None, resubmit=False,\n skip_pnl=False, mail_user=None, mail_type=None, batch_args=None):\n if job is None:\n job = case.get_primary_job()\n\n rundir = case.get_value(\"RUNDIR\")\n if job != \"case.test\":\n continue_run = case.get_value(\"CONTINUE_RUN\")\n expect(os.path.isdir(rundir) or not continue_run,\n \" CONTINUE_RUN is true but RUNDIR {} does not exist\".format(rundir))\n\n # if case.submit is called with the no_batch flag then we assume that this\n # flag will stay in effect for the duration of the RESUBMITs\n env_batch = case.get_env(\"batch\")\n if resubmit:\n if env_batch.get_batch_system_type() == \"none\":\n no_batch = True\n\n # This is a resubmission, do not reinitialize test values\n if job == \"case.test\":\n case.set_value(\"IS_FIRST_RUN\", False)\n\n resub = case.get_value(\"RESUBMIT\")\n logger.info(\"Submitting job '{}', resubmit={:d}\".format(job, resub))\n case.set_value(\"RESUBMIT\", resub-1)\n if case.get_value(\"RESUBMIT_SETS_CONTINUE_RUN\"):\n case.set_value(\"CONTINUE_RUN\", True)\n\n else:\n if job == \"case.test\":\n case.set_value(\"IS_FIRST_RUN\", True)\n\n if no_batch:\n batch_system = \"none\"\n else:\n batch_system = env_batch.get_batch_system_type()\n\n case.set_value(\"BATCH_SYSTEM\", batch_system)\n\n env_batch_has_changed = False\n try:\n case.check_lockedfile(os.path.basename(env_batch.filename))\n except SystemExit:\n env_batch_has_changed = True\n\n if env_batch.get_batch_system_type() != \"none\" and env_batch_has_changed:\n # May need to regen batch files if user made batch setting changes (e.g. walltime, queue, etc)\n logger.warning(\\\n\"\"\"\nenv_batch.xml appears to have changed, regenerating batch scripts\nmanual edits to these file will be lost!\n\"\"\")\n env_batch.make_all_batch_files(case)\n\n unlock_file(os.path.basename(env_batch.filename))\n lock_file(os.path.basename(env_batch.filename))\n\n if job == case.get_primary_job():\n case.check_case()\n case.check_DA_settings()\n if case.get_value(\"MACH\") == \"mira\":\n with open(\".original_host\", \"w\") as fd:\n fd.write( socket.gethostname())\n\n #Load Modules\n case.load_env()\n\n case.flush()\n\n logger.warning(\"submit_jobs {}\".format(job))\n job_ids = case.submit_jobs(no_batch=no_batch, job=job, skip_pnl=skip_pnl,\n prereq=prereq, mail_user=mail_user,\n mail_type=mail_type, batch_args=batch_args)\n\n xml_jobids = []\n for jobname, jobid in job_ids.items():\n logger.info(\"Submitted job {} with id {}\".format(jobname, jobid))\n if jobid:\n xml_jobids.append(\"{}:{}\".format(jobname, jobid))\n\n xml_jobid_text = \", \".join(xml_jobids)\n if xml_jobid_text:\n case.set_value(\"JOB_IDS\", xml_jobid_text)\n\n return xml_jobid_text\n\ndef submit(self, job=None, no_batch=False, prereq=None, resubmit=False,\n skip_pnl=False, mail_user=None, mail_type=None, batch_args=None):\n if self.get_value(\"TEST\"):\n caseroot = self.get_value(\"CASEROOT\")\n casebaseid = self.get_value(\"CASEBASEID\")\n # This should take care of the race condition where the submitted job\n # begins immediately and tries to set RUN phase. We proactively assume\n # a passed SUBMIT phase. If this state is already PASS, don't set it again\n # because then we'll lose RUN phase info if it's there. This info is important\n # for system_tests_common to know if it needs to reinitialize the test or not.\n with TestStatus(test_dir=caseroot, test_name=casebaseid) as ts:\n phase_status = ts.get_status(SUBMIT_PHASE)\n if phase_status != TEST_PASS_STATUS:\n ts.set_status(SUBMIT_PHASE, TEST_PASS_STATUS)\n\n try:\n functor = lambda: _submit(self, job=job, no_batch=no_batch, prereq=prereq,\n resubmit=resubmit, skip_pnl=skip_pnl,\n mail_user=mail_user, mail_type=mail_type,\n batch_args=batch_args)\n run_and_log_case_status(functor, \"case.submit\", caseroot=self.get_value(\"CASEROOT\"),\n custom_success_msg_functor=verbatim_success_msg)\n except:\n # If something failed in the batch system, make sure to mark\n # the test as failed if we are running a test.\n if self.get_value(\"TEST\"):\n with TestStatus(test_dir=caseroot, test_name=casebaseid) as ts:\n ts.set_status(SUBMIT_PHASE, TEST_FAIL_STATUS)\n\n raise\n\ndef check_case(self):\n self.check_lockedfiles()\n self.create_namelists() # Must be called before check_all_input_data\n logger.info(\"Checking that inputdata is available as part of case submission\")\n self.check_all_input_data()\n\n expect(self.get_value(\"BUILD_COMPLETE\"), \"Build complete is \"\n \"not True please rebuild the model by calling case.build\")\n logger.info(\"Check case OK\")\n\ndef check_DA_settings(self):\n script = self.get_value(\"DATA_ASSIMILATION_SCRIPT\")\n cycles = self.get_value(\"DATA_ASSIMILATION_CYCLES\")\n if len(script) > 0 and os.path.isfile(script) and cycles > 0:\n logger.info(\"Data Assimilation enabled using script {} with {:d} cycles\".format(script,cycles))\n", "path": "scripts/lib/CIME/case/case_submit.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\ncase.submit - Submit a cesm workflow to the queueing system or run it\nif there is no queueing system. A cesm workflow may include multiple\njobs.\nsubmit, check_case and check_da_settings are members of class Case in file case.py\n\"\"\"\nimport socket\nfrom six.moves import configparser\nfrom CIME.XML.standard_module_setup import *\nfrom CIME.utils import expect, run_and_log_case_status, verbatim_success_msg\nfrom CIME.locked_files import unlock_file, lock_file\nfrom CIME.test_status import *\n\nlogger = logging.getLogger(__name__)\n\ndef _submit(case, job=None, no_batch=False, prereq=None, resubmit=False,\n skip_pnl=False, mail_user=None, mail_type=None, batch_args=None):\n if job is None:\n job = case.get_primary_job()\n\n rundir = case.get_value(\"RUNDIR\")\n if job != \"case.test\":\n continue_run = case.get_value(\"CONTINUE_RUN\")\n expect(os.path.isdir(rundir) or not continue_run,\n \" CONTINUE_RUN is true but RUNDIR {} does not exist\".format(rundir))\n\n # if case.submit is called with the no_batch flag then we assume that this\n # flag will stay in effect for the duration of the RESUBMITs\n env_batch = case.get_env(\"batch\")\n if resubmit:\n if env_batch.get_batch_system_type() == \"none\":\n no_batch = True\n\n # This is a resubmission, do not reinitialize test values\n if job == \"case.test\":\n case.set_value(\"IS_FIRST_RUN\", False)\n\n resub = case.get_value(\"RESUBMIT\")\n logger.info(\"Submitting job '{}', resubmit={:d}\".format(job, resub))\n case.set_value(\"RESUBMIT\", resub-1)\n if case.get_value(\"RESUBMIT_SETS_CONTINUE_RUN\"):\n case.set_value(\"CONTINUE_RUN\", True)\n\n else:\n if job == \"case.test\":\n case.set_value(\"IS_FIRST_RUN\", True)\n\n if no_batch:\n batch_system = \"none\"\n else:\n batch_system = env_batch.get_batch_system_type()\n\n case.set_value(\"BATCH_SYSTEM\", batch_system)\n\n env_batch_has_changed = False\n try:\n case.check_lockedfile(os.path.basename(env_batch.filename))\n except SystemExit:\n env_batch_has_changed = True\n\n if env_batch.get_batch_system_type() != \"none\" and env_batch_has_changed:\n # May need to regen batch files if user made batch setting changes (e.g. walltime, queue, etc)\n logger.warning(\\\n\"\"\"\nenv_batch.xml appears to have changed, regenerating batch scripts\nmanual edits to these file will be lost!\n\"\"\")\n env_batch.make_all_batch_files(case)\n\n unlock_file(os.path.basename(env_batch.filename))\n lock_file(os.path.basename(env_batch.filename))\n\n if job == case.get_primary_job():\n case.check_case()\n case.check_DA_settings()\n if case.get_value(\"MACH\") == \"mira\":\n with open(\".original_host\", \"w\") as fd:\n fd.write( socket.gethostname())\n\n #Load Modules\n case.load_env()\n\n case.flush()\n\n logger.warning(\"submit_jobs {}\".format(job))\n job_ids = case.submit_jobs(no_batch=no_batch, job=job, skip_pnl=skip_pnl,\n prereq=prereq, mail_user=mail_user,\n mail_type=mail_type, batch_args=batch_args)\n\n xml_jobids = []\n for jobname, jobid in job_ids.items():\n logger.info(\"Submitted job {} with id {}\".format(jobname, jobid))\n if jobid:\n xml_jobids.append(\"{}:{}\".format(jobname, jobid))\n\n xml_jobid_text = \", \".join(xml_jobids)\n if xml_jobid_text:\n case.set_value(\"JOB_IDS\", xml_jobid_text)\n\n return xml_jobid_text\n\ndef submit(self, job=None, no_batch=False, prereq=None, resubmit=False,\n skip_pnl=False, mail_user=None, mail_type=None, batch_args=None):\n if self.get_value(\"TEST\"):\n caseroot = self.get_value(\"CASEROOT\")\n casebaseid = self.get_value(\"CASEBASEID\")\n # This should take care of the race condition where the submitted job\n # begins immediately and tries to set RUN phase. We proactively assume\n # a passed SUBMIT phase. If this state is already PASS, don't set it again\n # because then we'll lose RUN phase info if it's there. This info is important\n # for system_tests_common to know if it needs to reinitialize the test or not.\n with TestStatus(test_dir=caseroot, test_name=casebaseid) as ts:\n phase_status = ts.get_status(SUBMIT_PHASE)\n if phase_status != TEST_PASS_STATUS:\n ts.set_status(SUBMIT_PHASE, TEST_PASS_STATUS)\n\n # If this is a resubmit check the hidden file .submit_options for\n # any submit options used on the original submit and use them again\n caseroot = self.get_value(\"CASEROOT\")\n submit_options = os.path.join(caseroot, \".submit_options\")\n if resubmit and os.path.exists(submit_options):\n config = configparser.SafeConfigParser()\n config.read(submit_options)\n if not skip_pnl and config.has_option('SubmitOptions','skip_pnl'):\n skip_pnl = config.getboolean('SubmitOptions', 'skip_pnl')\n if mail_user is None and config.has_option('SubmitOptions', 'mail_user'):\n mail_user = config.get('SubmitOptions', 'mail_user')\n if mail_type is None and config.has_option('SubmitOptions', 'mail_type'):\n mail_type = config.get('SubmitOptions', 'mail_type').split(',')\n if batch_args is None and config.has_option('SubmitOptions', 'batch_args'):\n batch_args = config.get('SubmitOptions', 'batch_args')\n\n try:\n functor = lambda: _submit(self, job=job, no_batch=no_batch, prereq=prereq,\n resubmit=resubmit, skip_pnl=skip_pnl,\n mail_user=mail_user, mail_type=mail_type,\n batch_args=batch_args)\n run_and_log_case_status(functor, \"case.submit\", caseroot=caseroot,\n custom_success_msg_functor=verbatim_success_msg)\n except:\n # If something failed in the batch system, make sure to mark\n # the test as failed if we are running a test.\n if self.get_value(\"TEST\"):\n with TestStatus(test_dir=caseroot, test_name=casebaseid) as ts:\n ts.set_status(SUBMIT_PHASE, TEST_FAIL_STATUS)\n\n raise\n\ndef check_case(self):\n self.check_lockedfiles()\n self.create_namelists() # Must be called before check_all_input_data\n logger.info(\"Checking that inputdata is available as part of case submission\")\n self.check_all_input_data()\n\n expect(self.get_value(\"BUILD_COMPLETE\"), \"Build complete is \"\n \"not True please rebuild the model by calling case.build\")\n logger.info(\"Check case OK\")\n\ndef check_DA_settings(self):\n script = self.get_value(\"DATA_ASSIMILATION_SCRIPT\")\n cycles = self.get_value(\"DATA_ASSIMILATION_CYCLES\")\n if len(script) > 0 and os.path.isfile(script) and cycles > 0:\n logger.info(\"Data Assimilation enabled using script {} with {:d} cycles\".format(script,cycles))\n", "path": "scripts/lib/CIME/case/case_submit.py"}]}
| 2,067 | 549 |
gh_patches_debug_2809
|
rasdani/github-patches
|
git_diff
|
paperless-ngx__paperless-ngx-6608
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Server Error 500 when granting History All
### Description
I just tried to create a user group, granting History All to that group. Pressing save in the dialog results in an Internal Server error.
I am running a slightly customized ARM64 image, which includes zxing-cpp. I don't believe this is related to the issue.
### Steps to reproduce
1. Go To "Users & Groups"
2. Create a new group
3. Grant "History All" to that Group
4. Press Save
### Webserver logs
```bash
[2024-05-07 13:25:27,270] [ERROR] [django.request] Internal Server Error: /api/groups/34/
Traceback (most recent call last):
File "/usr/local/lib/python3.11/site-packages/asgiref/sync.py", line 518, in thread_handler
raise exc_info[1]
File "/usr/local/lib/python3.11/site-packages/django/core/handlers/exception.py", line 42, in inner
response = await get_response(request)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/asgiref/sync.py", line 518, in thread_handler
raise exc_info[1]
File "/usr/local/lib/python3.11/site-packages/django/core/handlers/base.py", line 253, in _get_response_async
response = await wrapped_callback(
^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/asgiref/sync.py", line 468, in __call__
ret = await asyncio.shield(exec_coro)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/asgiref/current_thread_executor.py", line 40, in run
result = self.fn(*self.args, **self.kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/asgiref/sync.py", line 522, in thread_handler
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/django/views/decorators/csrf.py", line 56, in wrapper_view
return view_func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/rest_framework/viewsets.py", line 125, in view
return self.dispatch(request, *args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/rest_framework/views.py", line 509, in dispatch
response = self.handle_exception(exc)
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/rest_framework/views.py", line 469, in handle_exception
self.raise_uncaught_exception(exc)
File "/usr/local/lib/python3.11/site-packages/rest_framework/views.py", line 480, in raise_uncaught_exception
raise exc
File "/usr/local/lib/python3.11/site-packages/rest_framework/views.py", line 506, in dispatch
response = handler(request, *args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/rest_framework/mixins.py", line 67, in update
serializer.is_valid(raise_exception=True)
File "/usr/local/lib/python3.11/site-packages/rest_framework/serializers.py", line 227, in is_valid
self._validated_data = self.run_validation(self.initial_data)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/rest_framework/serializers.py", line 426, in run_validation
value = self.to_internal_value(data)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/rest_framework/serializers.py", line 483, in to_internal_value
validated_value = field.run_validation(primitive_value)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/rest_framework/fields.py", line 547, in run_validation
value = self.to_internal_value(data)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/rest_framework/relations.py", line 528, in to_internal_value
return [
^
File "/usr/local/lib/python3.11/site-packages/rest_framework/relations.py", line 529, in <listcomp>
self.child_relation.to_internal_value(item)
File "/usr/local/lib/python3.11/site-packages/rest_framework/relations.py", line 462, in to_internal_value
return queryset.get(**{self.slug_field: data})
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/django/db/models/query.py", line 640, in get
raise self.model.MultipleObjectsReturned(
django.contrib.auth.models.Permission.MultipleObjectsReturned: get() returned more than one Permission -- it returned 2!
[2024-05-07 13:25:27,425] [ERROR] [django.request] Internal Server Error: /api/groups/34/
Traceback (most recent call last):
File "/usr/local/lib/python3.11/site-packages/asgiref/sync.py", line 518, in thread_handler
raise exc_info[1]
File "/usr/local/lib/python3.11/site-packages/django/core/handlers/exception.py", line 42, in inner
response = await get_response(request)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/asgiref/sync.py", line 518, in thread_handler
raise exc_info[1]
File "/usr/local/lib/python3.11/site-packages/django/core/handlers/base.py", line 253, in _get_response_async
response = await wrapped_callback(
^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/asgiref/sync.py", line 468, in __call__
ret = await asyncio.shield(exec_coro)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/asgiref/current_thread_executor.py", line 40, in run
result = self.fn(*self.args, **self.kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/asgiref/sync.py", line 522, in thread_handler
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/django/views/decorators/csrf.py", line 56, in wrapper_view
return view_func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/rest_framework/viewsets.py", line 125, in view
return self.dispatch(request, *args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/rest_framework/views.py", line 509, in dispatch
response = self.handle_exception(exc)
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/rest_framework/views.py", line 469, in handle_exception
self.raise_uncaught_exception(exc)
File "/usr/local/lib/python3.11/site-packages/rest_framework/views.py", line 480, in raise_uncaught_exception
raise exc
File "/usr/local/lib/python3.11/site-packages/rest_framework/views.py", line 506, in dispatch
response = handler(request, *args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/rest_framework/mixins.py", line 67, in update
serializer.is_valid(raise_exception=True)
File "/usr/local/lib/python3.11/site-packages/rest_framework/serializers.py", line 227, in is_valid
self._validated_data = self.run_validation(self.initial_data)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/rest_framework/serializers.py", line 426, in run_validation
value = self.to_internal_value(data)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/rest_framework/serializers.py", line 483, in to_internal_value
validated_value = field.run_validation(primitive_value)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/rest_framework/fields.py", line 547, in run_validation
value = self.to_internal_value(data)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/rest_framework/relations.py", line 528, in to_internal_value
return [
^
File "/usr/local/lib/python3.11/site-packages/rest_framework/relations.py", line 529, in <listcomp>
self.child_relation.to_internal_value(item)
File "/usr/local/lib/python3.11/site-packages/rest_framework/relations.py", line 462, in to_internal_value
return queryset.get(**{self.slug_field: data})
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/django/db/models/query.py", line 640, in get
raise self.model.MultipleObjectsReturned(
django.contrib.auth.models.Permission.MultipleObjectsReturned: get() returned more than one Permission -- it returned 2!
```
### Browser logs
_No response_
### Paperless-ngx version
2.8.1
### Host OS
Kubernetes
### Installation method
Other (please describe above)
### Browser
Arc
### Configuration changes
_No response_
### Other
_No response_
### Please confirm the following
- [X] I believe this issue is a bug that affects all users of Paperless-ngx, not something specific to my installation.
- [X] I have already searched for relevant existing issues and discussions before opening this report.
- [X] I have updated the title field above with a concise description.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/paperless/serialisers.py`
Content:
```
1 import logging
2
3 from allauth.socialaccount.models import SocialAccount
4 from django.contrib.auth.models import Group
5 from django.contrib.auth.models import Permission
6 from django.contrib.auth.models import User
7 from rest_framework import serializers
8
9 from paperless.models import ApplicationConfiguration
10
11 logger = logging.getLogger("paperless.settings")
12
13
14 class ObfuscatedUserPasswordField(serializers.Field):
15 """
16 Sends *** string instead of password in the clear
17 """
18
19 def to_representation(self, value):
20 return "**********" if len(value) > 0 else ""
21
22 def to_internal_value(self, data):
23 return data
24
25
26 class UserSerializer(serializers.ModelSerializer):
27 password = ObfuscatedUserPasswordField(required=False)
28 user_permissions = serializers.SlugRelatedField(
29 many=True,
30 queryset=Permission.objects.exclude(content_type__app_label="admin"),
31 slug_field="codename",
32 required=False,
33 )
34 inherited_permissions = serializers.SerializerMethodField()
35
36 class Meta:
37 model = User
38 fields = (
39 "id",
40 "username",
41 "email",
42 "password",
43 "first_name",
44 "last_name",
45 "date_joined",
46 "is_staff",
47 "is_active",
48 "is_superuser",
49 "groups",
50 "user_permissions",
51 "inherited_permissions",
52 )
53
54 def get_inherited_permissions(self, obj):
55 return obj.get_group_permissions()
56
57 def update(self, instance, validated_data):
58 if "password" in validated_data:
59 if len(validated_data.get("password").replace("*", "")) > 0:
60 instance.set_password(validated_data.get("password"))
61 instance.save()
62 validated_data.pop("password")
63 super().update(instance, validated_data)
64 return instance
65
66 def create(self, validated_data):
67 groups = None
68 if "groups" in validated_data:
69 groups = validated_data.pop("groups")
70 user_permissions = None
71 if "user_permissions" in validated_data:
72 user_permissions = validated_data.pop("user_permissions")
73 password = None
74 if (
75 "password" in validated_data
76 and len(validated_data.get("password").replace("*", "")) > 0
77 ):
78 password = validated_data.pop("password")
79 user = User.objects.create(**validated_data)
80 # set groups
81 if groups:
82 user.groups.set(groups)
83 # set permissions
84 if user_permissions:
85 user.user_permissions.set(user_permissions)
86 # set password
87 if password:
88 user.set_password(password)
89 user.save()
90 return user
91
92
93 class GroupSerializer(serializers.ModelSerializer):
94 permissions = serializers.SlugRelatedField(
95 many=True,
96 queryset=Permission.objects.all(),
97 slug_field="codename",
98 )
99
100 class Meta:
101 model = Group
102 fields = (
103 "id",
104 "name",
105 "permissions",
106 )
107
108
109 class SocialAccountSerializer(serializers.ModelSerializer):
110 name = serializers.SerializerMethodField()
111
112 class Meta:
113 model = SocialAccount
114 fields = (
115 "id",
116 "provider",
117 "name",
118 )
119
120 def get_name(self, obj):
121 return obj.get_provider_account().to_str()
122
123
124 class ProfileSerializer(serializers.ModelSerializer):
125 email = serializers.EmailField(allow_null=False)
126 password = ObfuscatedUserPasswordField(required=False, allow_null=False)
127 auth_token = serializers.SlugRelatedField(read_only=True, slug_field="key")
128 social_accounts = SocialAccountSerializer(
129 many=True,
130 read_only=True,
131 source="socialaccount_set",
132 )
133
134 class Meta:
135 model = User
136 fields = (
137 "email",
138 "password",
139 "first_name",
140 "last_name",
141 "auth_token",
142 "social_accounts",
143 "has_usable_password",
144 )
145
146
147 class ApplicationConfigurationSerializer(serializers.ModelSerializer):
148 user_args = serializers.JSONField(binary=True, allow_null=True)
149
150 def run_validation(self, data):
151 # Empty strings treated as None to avoid unexpected behavior
152 if "user_args" in data and data["user_args"] == "":
153 data["user_args"] = None
154 if "language" in data and data["language"] == "":
155 data["language"] = None
156 return super().run_validation(data)
157
158 def update(self, instance, validated_data):
159 if instance.app_logo and "app_logo" in validated_data:
160 instance.app_logo.delete()
161 return super().update(instance, validated_data)
162
163 class Meta:
164 model = ApplicationConfiguration
165 fields = "__all__"
166
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/paperless/serialisers.py b/src/paperless/serialisers.py
--- a/src/paperless/serialisers.py
+++ b/src/paperless/serialisers.py
@@ -93,7 +93,7 @@
class GroupSerializer(serializers.ModelSerializer):
permissions = serializers.SlugRelatedField(
many=True,
- queryset=Permission.objects.all(),
+ queryset=Permission.objects.exclude(content_type__app_label="admin"),
slug_field="codename",
)
|
{"golden_diff": "diff --git a/src/paperless/serialisers.py b/src/paperless/serialisers.py\n--- a/src/paperless/serialisers.py\n+++ b/src/paperless/serialisers.py\n@@ -93,7 +93,7 @@\n class GroupSerializer(serializers.ModelSerializer):\n permissions = serializers.SlugRelatedField(\n many=True,\n- queryset=Permission.objects.all(),\n+ queryset=Permission.objects.exclude(content_type__app_label=\"admin\"),\n slug_field=\"codename\",\n )\n", "issue": "[BUG] Server Error 500 when granting History All\n### Description\n\nI just tried to create a user group, granting History All to that group. Pressing save in the dialog results in an Internal Server error.\r\n\r\nI am running a slightly customized ARM64 image, which includes zxing-cpp. I don't believe this is related to the issue.\n\n### Steps to reproduce\n\n1. Go To \"Users & Groups\"\r\n2. Create a new group\r\n3. Grant \"History All\" to that Group\r\n4. Press Save\n\n### Webserver logs\n\n```bash\n[2024-05-07 13:25:27,270] [ERROR] [django.request] Internal Server Error: /api/groups/34/\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.11/site-packages/asgiref/sync.py\", line 518, in thread_handler\r\n raise exc_info[1]\r\n File \"/usr/local/lib/python3.11/site-packages/django/core/handlers/exception.py\", line 42, in inner\r\n response = await get_response(request)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/asgiref/sync.py\", line 518, in thread_handler\r\n raise exc_info[1]\r\n File \"/usr/local/lib/python3.11/site-packages/django/core/handlers/base.py\", line 253, in _get_response_async\r\n response = await wrapped_callback(\r\n ^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/asgiref/sync.py\", line 468, in __call__\r\n ret = await asyncio.shield(exec_coro)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/asgiref/current_thread_executor.py\", line 40, in run\r\n result = self.fn(*self.args, **self.kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/asgiref/sync.py\", line 522, in thread_handler\r\n return func(*args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/django/views/decorators/csrf.py\", line 56, in wrapper_view\r\n return view_func(*args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/rest_framework/viewsets.py\", line 125, in view\r\n return self.dispatch(request, *args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/rest_framework/views.py\", line 509, in dispatch\r\n response = self.handle_exception(exc)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/rest_framework/views.py\", line 469, in handle_exception\r\n self.raise_uncaught_exception(exc)\r\n File \"/usr/local/lib/python3.11/site-packages/rest_framework/views.py\", line 480, in raise_uncaught_exception\r\n raise exc\r\n File \"/usr/local/lib/python3.11/site-packages/rest_framework/views.py\", line 506, in dispatch\r\n response = handler(request, *args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/rest_framework/mixins.py\", line 67, in update\r\n serializer.is_valid(raise_exception=True)\r\n File \"/usr/local/lib/python3.11/site-packages/rest_framework/serializers.py\", line 227, in is_valid\r\n self._validated_data = self.run_validation(self.initial_data)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/rest_framework/serializers.py\", line 426, in run_validation\r\n value = self.to_internal_value(data)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/rest_framework/serializers.py\", line 483, in to_internal_value\r\n validated_value = field.run_validation(primitive_value)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/rest_framework/fields.py\", line 547, in run_validation\r\n value = self.to_internal_value(data)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/rest_framework/relations.py\", line 528, in to_internal_value\r\n return [\r\n ^\r\n File \"/usr/local/lib/python3.11/site-packages/rest_framework/relations.py\", line 529, in <listcomp>\r\n self.child_relation.to_internal_value(item)\r\n File \"/usr/local/lib/python3.11/site-packages/rest_framework/relations.py\", line 462, in to_internal_value\r\n return queryset.get(**{self.slug_field: data})\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/django/db/models/query.py\", line 640, in get\r\n raise self.model.MultipleObjectsReturned(\r\ndjango.contrib.auth.models.Permission.MultipleObjectsReturned: get() returned more than one Permission -- it returned 2!\r\n[2024-05-07 13:25:27,425] [ERROR] [django.request] Internal Server Error: /api/groups/34/\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.11/site-packages/asgiref/sync.py\", line 518, in thread_handler\r\n raise exc_info[1]\r\n File \"/usr/local/lib/python3.11/site-packages/django/core/handlers/exception.py\", line 42, in inner\r\n response = await get_response(request)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/asgiref/sync.py\", line 518, in thread_handler\r\n raise exc_info[1]\r\n File \"/usr/local/lib/python3.11/site-packages/django/core/handlers/base.py\", line 253, in _get_response_async\r\n response = await wrapped_callback(\r\n ^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/asgiref/sync.py\", line 468, in __call__\r\n ret = await asyncio.shield(exec_coro)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/asgiref/current_thread_executor.py\", line 40, in run\r\n result = self.fn(*self.args, **self.kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/asgiref/sync.py\", line 522, in thread_handler\r\n return func(*args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/django/views/decorators/csrf.py\", line 56, in wrapper_view\r\n return view_func(*args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/rest_framework/viewsets.py\", line 125, in view\r\n return self.dispatch(request, *args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/rest_framework/views.py\", line 509, in dispatch\r\n response = self.handle_exception(exc)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/rest_framework/views.py\", line 469, in handle_exception\r\n self.raise_uncaught_exception(exc)\r\n File \"/usr/local/lib/python3.11/site-packages/rest_framework/views.py\", line 480, in raise_uncaught_exception\r\n raise exc\r\n File \"/usr/local/lib/python3.11/site-packages/rest_framework/views.py\", line 506, in dispatch\r\n response = handler(request, *args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/rest_framework/mixins.py\", line 67, in update\r\n serializer.is_valid(raise_exception=True)\r\n File \"/usr/local/lib/python3.11/site-packages/rest_framework/serializers.py\", line 227, in is_valid\r\n self._validated_data = self.run_validation(self.initial_data)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/rest_framework/serializers.py\", line 426, in run_validation\r\n value = self.to_internal_value(data)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/rest_framework/serializers.py\", line 483, in to_internal_value\r\n validated_value = field.run_validation(primitive_value)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/rest_framework/fields.py\", line 547, in run_validation\r\n value = self.to_internal_value(data)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/rest_framework/relations.py\", line 528, in to_internal_value\r\n return [\r\n ^\r\n File \"/usr/local/lib/python3.11/site-packages/rest_framework/relations.py\", line 529, in <listcomp>\r\n self.child_relation.to_internal_value(item)\r\n File \"/usr/local/lib/python3.11/site-packages/rest_framework/relations.py\", line 462, in to_internal_value\r\n return queryset.get(**{self.slug_field: data})\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/django/db/models/query.py\", line 640, in get\r\n raise self.model.MultipleObjectsReturned(\r\ndjango.contrib.auth.models.Permission.MultipleObjectsReturned: get() returned more than one Permission -- it returned 2!\n```\n\n\n### Browser logs\n\n_No response_\n\n### Paperless-ngx version\n\n2.8.1\n\n### Host OS\n\nKubernetes\n\n### Installation method\n\nOther (please describe above)\n\n### Browser\n\nArc\n\n### Configuration changes\n\n_No response_\n\n### Other\n\n_No response_\n\n### Please confirm the following\n\n- [X] I believe this issue is a bug that affects all users of Paperless-ngx, not something specific to my installation.\n- [X] I have already searched for relevant existing issues and discussions before opening this report.\n- [X] I have updated the title field above with a concise description.\n", "before_files": [{"content": "import logging\n\nfrom allauth.socialaccount.models import SocialAccount\nfrom django.contrib.auth.models import Group\nfrom django.contrib.auth.models import Permission\nfrom django.contrib.auth.models import User\nfrom rest_framework import serializers\n\nfrom paperless.models import ApplicationConfiguration\n\nlogger = logging.getLogger(\"paperless.settings\")\n\n\nclass ObfuscatedUserPasswordField(serializers.Field):\n \"\"\"\n Sends *** string instead of password in the clear\n \"\"\"\n\n def to_representation(self, value):\n return \"**********\" if len(value) > 0 else \"\"\n\n def to_internal_value(self, data):\n return data\n\n\nclass UserSerializer(serializers.ModelSerializer):\n password = ObfuscatedUserPasswordField(required=False)\n user_permissions = serializers.SlugRelatedField(\n many=True,\n queryset=Permission.objects.exclude(content_type__app_label=\"admin\"),\n slug_field=\"codename\",\n required=False,\n )\n inherited_permissions = serializers.SerializerMethodField()\n\n class Meta:\n model = User\n fields = (\n \"id\",\n \"username\",\n \"email\",\n \"password\",\n \"first_name\",\n \"last_name\",\n \"date_joined\",\n \"is_staff\",\n \"is_active\",\n \"is_superuser\",\n \"groups\",\n \"user_permissions\",\n \"inherited_permissions\",\n )\n\n def get_inherited_permissions(self, obj):\n return obj.get_group_permissions()\n\n def update(self, instance, validated_data):\n if \"password\" in validated_data:\n if len(validated_data.get(\"password\").replace(\"*\", \"\")) > 0:\n instance.set_password(validated_data.get(\"password\"))\n instance.save()\n validated_data.pop(\"password\")\n super().update(instance, validated_data)\n return instance\n\n def create(self, validated_data):\n groups = None\n if \"groups\" in validated_data:\n groups = validated_data.pop(\"groups\")\n user_permissions = None\n if \"user_permissions\" in validated_data:\n user_permissions = validated_data.pop(\"user_permissions\")\n password = None\n if (\n \"password\" in validated_data\n and len(validated_data.get(\"password\").replace(\"*\", \"\")) > 0\n ):\n password = validated_data.pop(\"password\")\n user = User.objects.create(**validated_data)\n # set groups\n if groups:\n user.groups.set(groups)\n # set permissions\n if user_permissions:\n user.user_permissions.set(user_permissions)\n # set password\n if password:\n user.set_password(password)\n user.save()\n return user\n\n\nclass GroupSerializer(serializers.ModelSerializer):\n permissions = serializers.SlugRelatedField(\n many=True,\n queryset=Permission.objects.all(),\n slug_field=\"codename\",\n )\n\n class Meta:\n model = Group\n fields = (\n \"id\",\n \"name\",\n \"permissions\",\n )\n\n\nclass SocialAccountSerializer(serializers.ModelSerializer):\n name = serializers.SerializerMethodField()\n\n class Meta:\n model = SocialAccount\n fields = (\n \"id\",\n \"provider\",\n \"name\",\n )\n\n def get_name(self, obj):\n return obj.get_provider_account().to_str()\n\n\nclass ProfileSerializer(serializers.ModelSerializer):\n email = serializers.EmailField(allow_null=False)\n password = ObfuscatedUserPasswordField(required=False, allow_null=False)\n auth_token = serializers.SlugRelatedField(read_only=True, slug_field=\"key\")\n social_accounts = SocialAccountSerializer(\n many=True,\n read_only=True,\n source=\"socialaccount_set\",\n )\n\n class Meta:\n model = User\n fields = (\n \"email\",\n \"password\",\n \"first_name\",\n \"last_name\",\n \"auth_token\",\n \"social_accounts\",\n \"has_usable_password\",\n )\n\n\nclass ApplicationConfigurationSerializer(serializers.ModelSerializer):\n user_args = serializers.JSONField(binary=True, allow_null=True)\n\n def run_validation(self, data):\n # Empty strings treated as None to avoid unexpected behavior\n if \"user_args\" in data and data[\"user_args\"] == \"\":\n data[\"user_args\"] = None\n if \"language\" in data and data[\"language\"] == \"\":\n data[\"language\"] = None\n return super().run_validation(data)\n\n def update(self, instance, validated_data):\n if instance.app_logo and \"app_logo\" in validated_data:\n instance.app_logo.delete()\n return super().update(instance, validated_data)\n\n class Meta:\n model = ApplicationConfiguration\n fields = \"__all__\"\n", "path": "src/paperless/serialisers.py"}], "after_files": [{"content": "import logging\n\nfrom allauth.socialaccount.models import SocialAccount\nfrom django.contrib.auth.models import Group\nfrom django.contrib.auth.models import Permission\nfrom django.contrib.auth.models import User\nfrom rest_framework import serializers\n\nfrom paperless.models import ApplicationConfiguration\n\nlogger = logging.getLogger(\"paperless.settings\")\n\n\nclass ObfuscatedUserPasswordField(serializers.Field):\n \"\"\"\n Sends *** string instead of password in the clear\n \"\"\"\n\n def to_representation(self, value):\n return \"**********\" if len(value) > 0 else \"\"\n\n def to_internal_value(self, data):\n return data\n\n\nclass UserSerializer(serializers.ModelSerializer):\n password = ObfuscatedUserPasswordField(required=False)\n user_permissions = serializers.SlugRelatedField(\n many=True,\n queryset=Permission.objects.exclude(content_type__app_label=\"admin\"),\n slug_field=\"codename\",\n required=False,\n )\n inherited_permissions = serializers.SerializerMethodField()\n\n class Meta:\n model = User\n fields = (\n \"id\",\n \"username\",\n \"email\",\n \"password\",\n \"first_name\",\n \"last_name\",\n \"date_joined\",\n \"is_staff\",\n \"is_active\",\n \"is_superuser\",\n \"groups\",\n \"user_permissions\",\n \"inherited_permissions\",\n )\n\n def get_inherited_permissions(self, obj):\n return obj.get_group_permissions()\n\n def update(self, instance, validated_data):\n if \"password\" in validated_data:\n if len(validated_data.get(\"password\").replace(\"*\", \"\")) > 0:\n instance.set_password(validated_data.get(\"password\"))\n instance.save()\n validated_data.pop(\"password\")\n super().update(instance, validated_data)\n return instance\n\n def create(self, validated_data):\n groups = None\n if \"groups\" in validated_data:\n groups = validated_data.pop(\"groups\")\n user_permissions = None\n if \"user_permissions\" in validated_data:\n user_permissions = validated_data.pop(\"user_permissions\")\n password = None\n if (\n \"password\" in validated_data\n and len(validated_data.get(\"password\").replace(\"*\", \"\")) > 0\n ):\n password = validated_data.pop(\"password\")\n user = User.objects.create(**validated_data)\n # set groups\n if groups:\n user.groups.set(groups)\n # set permissions\n if user_permissions:\n user.user_permissions.set(user_permissions)\n # set password\n if password:\n user.set_password(password)\n user.save()\n return user\n\n\nclass GroupSerializer(serializers.ModelSerializer):\n permissions = serializers.SlugRelatedField(\n many=True,\n queryset=Permission.objects.exclude(content_type__app_label=\"admin\"),\n slug_field=\"codename\",\n )\n\n class Meta:\n model = Group\n fields = (\n \"id\",\n \"name\",\n \"permissions\",\n )\n\n\nclass SocialAccountSerializer(serializers.ModelSerializer):\n name = serializers.SerializerMethodField()\n\n class Meta:\n model = SocialAccount\n fields = (\n \"id\",\n \"provider\",\n \"name\",\n )\n\n def get_name(self, obj):\n return obj.get_provider_account().to_str()\n\n\nclass ProfileSerializer(serializers.ModelSerializer):\n email = serializers.EmailField(allow_null=False)\n password = ObfuscatedUserPasswordField(required=False, allow_null=False)\n auth_token = serializers.SlugRelatedField(read_only=True, slug_field=\"key\")\n social_accounts = SocialAccountSerializer(\n many=True,\n read_only=True,\n source=\"socialaccount_set\",\n )\n\n class Meta:\n model = User\n fields = (\n \"email\",\n \"password\",\n \"first_name\",\n \"last_name\",\n \"auth_token\",\n \"social_accounts\",\n \"has_usable_password\",\n )\n\n\nclass ApplicationConfigurationSerializer(serializers.ModelSerializer):\n user_args = serializers.JSONField(binary=True, allow_null=True)\n\n def run_validation(self, data):\n # Empty strings treated as None to avoid unexpected behavior\n if \"user_args\" in data and data[\"user_args\"] == \"\":\n data[\"user_args\"] = None\n if \"language\" in data and data[\"language\"] == \"\":\n data[\"language\"] = None\n return super().run_validation(data)\n\n def update(self, instance, validated_data):\n if instance.app_logo and \"app_logo\" in validated_data:\n instance.app_logo.delete()\n return super().update(instance, validated_data)\n\n class Meta:\n model = ApplicationConfiguration\n fields = \"__all__\"\n", "path": "src/paperless/serialisers.py"}]}
| 4,027 | 107 |
gh_patches_debug_22202
|
rasdani/github-patches
|
git_diff
|
kartoza__prj.app-971
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
http:// must be added manually
If in founder URL of an changelog entry, the URL is 'www.something.com', the link is wrong. The user must add 'https://' to make it work properly. I think projecta should handle this ? It might be the same issue in developer URL.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `django_project/changes/forms.py`
Content:
```
1 # coding=utf-8
2 from django import forms
3 from django.core.validators import ValidationError
4 from crispy_forms.helper import FormHelper
5 from crispy_forms.layout import (
6 Layout,
7 Fieldset,
8 Submit,
9 Field,
10 )
11 from models import (
12 Category,
13 Version,
14 Entry,
15 Sponsor,
16 SponsorshipPeriod,
17 SponsorshipLevel
18 )
19
20
21 class CategoryForm(forms.ModelForm):
22
23 # noinspection PyClassicStyleClass
24 class Meta:
25 model = Category
26 fields = ('name', 'sort_number')
27
28 def __init__(self, *args, **kwargs):
29 self.helper = FormHelper()
30 self.project = kwargs.pop('project')
31 form_title = 'New Category in %s' % self.project.name
32 layout = Layout(
33 Fieldset(
34 form_title,
35 Field('name', css_class='form-control'),
36 Field('sort_number', css_class='form-control'),
37 css_id='project-form')
38 )
39 self.helper.layout = layout
40 self.helper.html5_required = False
41 super(CategoryForm, self).__init__(*args, **kwargs)
42 self.helper.add_input(Submit('submit', 'Submit'))
43
44 def save(self, commit=True):
45 instance = super(CategoryForm, self).save(commit=False)
46 instance.project = self.project
47 instance.save()
48 return instance
49
50 def clean(self):
51 cleaned_data = self.cleaned_data
52
53 try:
54 Category.objects.get(
55 name=cleaned_data['name'], project=self.project)
56 except Category.DoesNotExist:
57 pass
58 else:
59 raise ValidationError(
60 'Category with this name already exists for this project'
61 )
62
63 return cleaned_data
64
65
66 class VersionForm(forms.ModelForm):
67
68 # noinspection PyClassicStyleClass
69 class Meta:
70 model = Version
71 fields = (
72 'name',
73 'description',
74 'image_file',
75 'release_date'
76 )
77
78 def __init__(self, *args, **kwargs):
79 self.user = kwargs.pop('user')
80 self.project = kwargs.pop('project')
81 form_title = 'New Version for %s' % self.project.name
82 self.helper = FormHelper()
83 layout = Layout(
84 Fieldset(
85 form_title,
86 Field('name', css_class='form-control'),
87 Field('description', css_class='form-control'),
88 Field('image_file', css_class='form-control'),
89
90 css_id='project-form')
91 )
92 self.helper.layout = layout
93 self.helper.html5_required = False
94 super(VersionForm, self).__init__(*args, **kwargs)
95 self.helper.add_input(Submit('submit', 'Submit'))
96
97 def save(self, commit=True):
98 instance = super(VersionForm, self).save(commit=False)
99 instance.author = self.user
100 instance.project = self.project
101 instance.approved = False
102 instance.save()
103 return instance
104
105
106 class EntryForm(forms.ModelForm):
107
108 # noinspection PyClassicStyleClass
109 class Meta:
110 model = Entry
111 fields = (
112 'category', 'title', 'description',
113 'image_file', 'image_credits', 'video',
114 'funded_by', 'funder_url', 'developed_by',
115 'developer_url'
116 )
117
118 def __init__(self, *args, **kwargs):
119 self.user = kwargs.pop('user')
120 self.version = kwargs.pop('version')
121 self.project = kwargs.pop('project')
122 form_title = 'New Entry in %s %s' % (
123 self.project.name,
124 self.version.name
125 )
126 self.helper = FormHelper()
127 layout = Layout(
128 Fieldset(
129 form_title,
130 Field('category', css_class='form-control'),
131 Field('title', css_class='form-control'),
132 Field('description', css_class='form-control'),
133 Field('image_file', css_class='form-control'),
134 Field('image_credits', css_class='form-control'),
135 Field('video', css_class='form-control'),
136 Field('funded_by', css_class='form-control'),
137 Field('funder_url', css_class='form-control'),
138 Field('developed_by', css_class='form-control'),
139 Field('developer_url', css_class='form-control'),
140 css_id='entry-form')
141 )
142 self.helper.layout = layout
143 self.helper.html5_required = False
144 super(EntryForm, self).__init__(*args, **kwargs)
145 self.helper.add_input(Submit('submit', 'Submit'))
146 self.fields['title'].label = 'Feature Title'
147 # Filter the category list when editing so it shows only relevant ones
148 self.fields['category'].queryset = Category.objects.filter(
149 project=self.project).order_by('name')
150
151 def save(self, commit=True):
152 instance = super(EntryForm, self).save(commit=False)
153 instance.author = self.user
154 instance.version = self.version
155 instance.approved = False
156 instance.save()
157 return instance
158
159
160 class SponsorForm(forms.ModelForm):
161
162 # noinspection PyClassicStyleClass
163 class Meta:
164 model = Sponsor
165 fields = (
166 'name',
167 'contact_title',
168 'address',
169 'country',
170 'sponsor_url',
171 'contact_person',
172 'sponsor_email',
173 'agreement',
174 'logo',
175 'invoice_number',
176 'project',
177 )
178
179 def __init__(self, *args, **kwargs):
180 self.user = kwargs.pop('user')
181 self.project = kwargs.pop('project')
182 form_title = 'New Sponsor for %s' % self.project.name
183 self.helper = FormHelper()
184 layout = Layout(
185 Fieldset(
186 form_title,
187 Field('name', css_class='form-control'),
188 Field('contact_title', css_class='form-control'),
189 Field('address', css_class='form-control'),
190 Field('country', css_class='form-control chosen-select'),
191 Field('sponsor_url', css_class='form-control'),
192 Field('contact_person', css_class='form-control'),
193 Field('sponsor_email', css_class='form-control'),
194 Field('agreement', css_class='form-control'),
195 Field('logo', css_class='form-control'),
196 Field('invoice_number', css_class='form-control'),
197 css_id='project-form')
198 )
199 self.helper.layout = layout
200 self.helper.html5_required = False
201 super(SponsorForm, self).__init__(*args, **kwargs)
202 self.fields['project'].initial = self.project
203 self.fields['project'].widget = forms.HiddenInput()
204 self.helper.add_input(Submit('submit', 'Submit'))
205
206 def save(self, commit=True):
207 instance = super(SponsorForm, self).save(commit=False)
208 instance.author = self.user
209 instance.approved = False
210 instance.save()
211 return instance
212
213
214 class SponsorshipLevelForm(forms.ModelForm):
215
216 # noinspection PyClassicStyleClass
217 class Meta:
218 model = SponsorshipLevel
219 fields = (
220 'name',
221 'value',
222 'currency',
223 'logo'
224 )
225
226 def __init__(self, *args, **kwargs):
227 self.user = kwargs.pop('user')
228 self.project = kwargs.pop('project')
229 form_title = 'Sponsorship Level Form for %s' % self.project.name
230 self.helper = FormHelper()
231 layout = Layout(
232 Fieldset(
233 form_title,
234 Field('name', css_class='form-control'),
235 Field('value', css_class='form-control'),
236 Field('currency', css_class='form-control'),
237 Field('logo', css_class='form-control'),
238 css_id='project-form')
239 )
240 self.helper.layout = layout
241 self.helper.html5_required = False
242 super(SponsorshipLevelForm, self).__init__(*args, **kwargs)
243 self.helper.add_input(Submit('submit', 'Submit'))
244
245 def save(self, commit=True):
246 instance = super(SponsorshipLevelForm, self).save(commit=False)
247 instance.author = self.user
248 instance.project = self.project
249 instance.save()
250 return instance
251
252
253 class SponsorshipPeriodForm(forms.ModelForm):
254
255 # noinspection PyClassicStyleClass
256 class Meta:
257 model = SponsorshipPeriod
258 fields = (
259 'sponsor',
260 'sponsorship_level',
261 'start_date',
262 'end_date',
263 'amount_sponsored',
264 'currency'
265 )
266
267 def __init__(self, *args, **kwargs):
268 self.user = kwargs.pop('user')
269 self.project = kwargs.pop('project')
270 form_title = 'Sponsorship Period Form for %s' % self.project.name
271 self.helper = FormHelper()
272 layout = Layout(
273 Fieldset(
274 form_title,
275 Field('sponsor', css_class='form-control chosen-select'),
276 Field(
277 'sponsorship_level',
278 css_class='form-control chosen-select'),
279 Field('start_date', css_class='form-control'),
280 Field('end_date', css_class='form-control'),
281 Field('amount_sponsored', css_class='form-control'),
282 Field('currency', css_class='form-control'),
283 css_id='project-form')
284 )
285 self.helper.layout = layout
286 self.helper.html5_required = False
287 super(SponsorshipPeriodForm, self).__init__(*args, **kwargs)
288 # Filter items to only show the approved items in the same project
289 self.fields['sponsor'].queryset = \
290 Sponsor.objects.filter(
291 project=self.project, approved=True).order_by('name')
292 self.fields['sponsorship_level'].queryset = \
293 SponsorshipLevel.objects.filter(
294 project=self.project, approved=True).order_by('name')
295 self.helper.add_input(Submit('submit', 'Submit'))
296
297 def save(self, commit=True):
298 instance = super(SponsorshipPeriodForm, self).save(commit=False)
299 instance.author = self.user
300 instance.project = self.project
301 instance.save()
302 return instance
303
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/django_project/changes/forms.py b/django_project/changes/forms.py
--- a/django_project/changes/forms.py
+++ b/django_project/changes/forms.py
@@ -1,5 +1,6 @@
# coding=utf-8
from django import forms
+from django.forms.widgets import TextInput
from django.core.validators import ValidationError
from crispy_forms.helper import FormHelper
from crispy_forms.layout import (
@@ -144,6 +145,10 @@
super(EntryForm, self).__init__(*args, **kwargs)
self.helper.add_input(Submit('submit', 'Submit'))
self.fields['title'].label = 'Feature Title'
+ self.fields['funder_url'] = forms.URLField(
+ initial="http://", widget=TextInput)
+ self.fields['developer_url'] = forms.URLField(
+ initial="http://", widget=TextInput)
# Filter the category list when editing so it shows only relevant ones
self.fields['category'].queryset = Category.objects.filter(
project=self.project).order_by('name')
|
{"golden_diff": "diff --git a/django_project/changes/forms.py b/django_project/changes/forms.py\n--- a/django_project/changes/forms.py\n+++ b/django_project/changes/forms.py\n@@ -1,5 +1,6 @@\n # coding=utf-8\n from django import forms\n+from django.forms.widgets import TextInput\n from django.core.validators import ValidationError\n from crispy_forms.helper import FormHelper\n from crispy_forms.layout import (\n@@ -144,6 +145,10 @@\n super(EntryForm, self).__init__(*args, **kwargs)\n self.helper.add_input(Submit('submit', 'Submit'))\n self.fields['title'].label = 'Feature Title'\n+ self.fields['funder_url'] = forms.URLField(\n+ initial=\"http://\", widget=TextInput)\n+ self.fields['developer_url'] = forms.URLField(\n+ initial=\"http://\", widget=TextInput)\n # Filter the category list when editing so it shows only relevant ones\n self.fields['category'].queryset = Category.objects.filter(\n project=self.project).order_by('name')\n", "issue": "http:// must be added manually\nIf in founder URL of an changelog entry, the URL is 'www.something.com', the link is wrong. The user must add 'https://' to make it work properly. I think projecta should handle this ? It might be the same issue in developer URL.\n", "before_files": [{"content": "# coding=utf-8\nfrom django import forms\nfrom django.core.validators import ValidationError\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import (\n Layout,\n Fieldset,\n Submit,\n Field,\n)\nfrom models import (\n Category,\n Version,\n Entry,\n Sponsor,\n SponsorshipPeriod,\n SponsorshipLevel\n)\n\n\nclass CategoryForm(forms.ModelForm):\n\n # noinspection PyClassicStyleClass\n class Meta:\n model = Category\n fields = ('name', 'sort_number')\n\n def __init__(self, *args, **kwargs):\n self.helper = FormHelper()\n self.project = kwargs.pop('project')\n form_title = 'New Category in %s' % self.project.name\n layout = Layout(\n Fieldset(\n form_title,\n Field('name', css_class='form-control'),\n Field('sort_number', css_class='form-control'),\n css_id='project-form')\n )\n self.helper.layout = layout\n self.helper.html5_required = False\n super(CategoryForm, self).__init__(*args, **kwargs)\n self.helper.add_input(Submit('submit', 'Submit'))\n\n def save(self, commit=True):\n instance = super(CategoryForm, self).save(commit=False)\n instance.project = self.project\n instance.save()\n return instance\n\n def clean(self):\n cleaned_data = self.cleaned_data\n\n try:\n Category.objects.get(\n name=cleaned_data['name'], project=self.project)\n except Category.DoesNotExist:\n pass\n else:\n raise ValidationError(\n 'Category with this name already exists for this project'\n )\n\n return cleaned_data\n\n\nclass VersionForm(forms.ModelForm):\n\n # noinspection PyClassicStyleClass\n class Meta:\n model = Version\n fields = (\n 'name',\n 'description',\n 'image_file',\n 'release_date'\n )\n\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user')\n self.project = kwargs.pop('project')\n form_title = 'New Version for %s' % self.project.name\n self.helper = FormHelper()\n layout = Layout(\n Fieldset(\n form_title,\n Field('name', css_class='form-control'),\n Field('description', css_class='form-control'),\n Field('image_file', css_class='form-control'),\n\n css_id='project-form')\n )\n self.helper.layout = layout\n self.helper.html5_required = False\n super(VersionForm, self).__init__(*args, **kwargs)\n self.helper.add_input(Submit('submit', 'Submit'))\n\n def save(self, commit=True):\n instance = super(VersionForm, self).save(commit=False)\n instance.author = self.user\n instance.project = self.project\n instance.approved = False\n instance.save()\n return instance\n\n\nclass EntryForm(forms.ModelForm):\n\n # noinspection PyClassicStyleClass\n class Meta:\n model = Entry\n fields = (\n 'category', 'title', 'description',\n 'image_file', 'image_credits', 'video',\n 'funded_by', 'funder_url', 'developed_by',\n 'developer_url'\n )\n\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user')\n self.version = kwargs.pop('version')\n self.project = kwargs.pop('project')\n form_title = 'New Entry in %s %s' % (\n self.project.name,\n self.version.name\n )\n self.helper = FormHelper()\n layout = Layout(\n Fieldset(\n form_title,\n Field('category', css_class='form-control'),\n Field('title', css_class='form-control'),\n Field('description', css_class='form-control'),\n Field('image_file', css_class='form-control'),\n Field('image_credits', css_class='form-control'),\n Field('video', css_class='form-control'),\n Field('funded_by', css_class='form-control'),\n Field('funder_url', css_class='form-control'),\n Field('developed_by', css_class='form-control'),\n Field('developer_url', css_class='form-control'),\n css_id='entry-form')\n )\n self.helper.layout = layout\n self.helper.html5_required = False\n super(EntryForm, self).__init__(*args, **kwargs)\n self.helper.add_input(Submit('submit', 'Submit'))\n self.fields['title'].label = 'Feature Title'\n # Filter the category list when editing so it shows only relevant ones\n self.fields['category'].queryset = Category.objects.filter(\n project=self.project).order_by('name')\n\n def save(self, commit=True):\n instance = super(EntryForm, self).save(commit=False)\n instance.author = self.user\n instance.version = self.version\n instance.approved = False\n instance.save()\n return instance\n\n\nclass SponsorForm(forms.ModelForm):\n\n # noinspection PyClassicStyleClass\n class Meta:\n model = Sponsor\n fields = (\n 'name',\n 'contact_title',\n 'address',\n 'country',\n 'sponsor_url',\n 'contact_person',\n 'sponsor_email',\n 'agreement',\n 'logo',\n 'invoice_number',\n 'project',\n )\n\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user')\n self.project = kwargs.pop('project')\n form_title = 'New Sponsor for %s' % self.project.name\n self.helper = FormHelper()\n layout = Layout(\n Fieldset(\n form_title,\n Field('name', css_class='form-control'),\n Field('contact_title', css_class='form-control'),\n Field('address', css_class='form-control'),\n Field('country', css_class='form-control chosen-select'),\n Field('sponsor_url', css_class='form-control'),\n Field('contact_person', css_class='form-control'),\n Field('sponsor_email', css_class='form-control'),\n Field('agreement', css_class='form-control'),\n Field('logo', css_class='form-control'),\n Field('invoice_number', css_class='form-control'),\n css_id='project-form')\n )\n self.helper.layout = layout\n self.helper.html5_required = False\n super(SponsorForm, self).__init__(*args, **kwargs)\n self.fields['project'].initial = self.project\n self.fields['project'].widget = forms.HiddenInput()\n self.helper.add_input(Submit('submit', 'Submit'))\n\n def save(self, commit=True):\n instance = super(SponsorForm, self).save(commit=False)\n instance.author = self.user\n instance.approved = False\n instance.save()\n return instance\n\n\nclass SponsorshipLevelForm(forms.ModelForm):\n\n # noinspection PyClassicStyleClass\n class Meta:\n model = SponsorshipLevel\n fields = (\n 'name',\n 'value',\n 'currency',\n 'logo'\n )\n\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user')\n self.project = kwargs.pop('project')\n form_title = 'Sponsorship Level Form for %s' % self.project.name\n self.helper = FormHelper()\n layout = Layout(\n Fieldset(\n form_title,\n Field('name', css_class='form-control'),\n Field('value', css_class='form-control'),\n Field('currency', css_class='form-control'),\n Field('logo', css_class='form-control'),\n css_id='project-form')\n )\n self.helper.layout = layout\n self.helper.html5_required = False\n super(SponsorshipLevelForm, self).__init__(*args, **kwargs)\n self.helper.add_input(Submit('submit', 'Submit'))\n\n def save(self, commit=True):\n instance = super(SponsorshipLevelForm, self).save(commit=False)\n instance.author = self.user\n instance.project = self.project\n instance.save()\n return instance\n\n\nclass SponsorshipPeriodForm(forms.ModelForm):\n\n # noinspection PyClassicStyleClass\n class Meta:\n model = SponsorshipPeriod\n fields = (\n 'sponsor',\n 'sponsorship_level',\n 'start_date',\n 'end_date',\n 'amount_sponsored',\n 'currency'\n )\n\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user')\n self.project = kwargs.pop('project')\n form_title = 'Sponsorship Period Form for %s' % self.project.name\n self.helper = FormHelper()\n layout = Layout(\n Fieldset(\n form_title,\n Field('sponsor', css_class='form-control chosen-select'),\n Field(\n 'sponsorship_level',\n css_class='form-control chosen-select'),\n Field('start_date', css_class='form-control'),\n Field('end_date', css_class='form-control'),\n Field('amount_sponsored', css_class='form-control'),\n Field('currency', css_class='form-control'),\n css_id='project-form')\n )\n self.helper.layout = layout\n self.helper.html5_required = False\n super(SponsorshipPeriodForm, self).__init__(*args, **kwargs)\n # Filter items to only show the approved items in the same project\n self.fields['sponsor'].queryset = \\\n Sponsor.objects.filter(\n project=self.project, approved=True).order_by('name')\n self.fields['sponsorship_level'].queryset = \\\n SponsorshipLevel.objects.filter(\n project=self.project, approved=True).order_by('name')\n self.helper.add_input(Submit('submit', 'Submit'))\n\n def save(self, commit=True):\n instance = super(SponsorshipPeriodForm, self).save(commit=False)\n instance.author = self.user\n instance.project = self.project\n instance.save()\n return instance\n", "path": "django_project/changes/forms.py"}], "after_files": [{"content": "# coding=utf-8\nfrom django import forms\nfrom django.forms.widgets import TextInput\nfrom django.core.validators import ValidationError\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import (\n Layout,\n Fieldset,\n Submit,\n Field,\n)\nfrom models import (\n Category,\n Version,\n Entry,\n Sponsor,\n SponsorshipPeriod,\n SponsorshipLevel\n)\n\n\nclass CategoryForm(forms.ModelForm):\n\n # noinspection PyClassicStyleClass\n class Meta:\n model = Category\n fields = ('name', 'sort_number')\n\n def __init__(self, *args, **kwargs):\n self.helper = FormHelper()\n self.project = kwargs.pop('project')\n form_title = 'New Category in %s' % self.project.name\n layout = Layout(\n Fieldset(\n form_title,\n Field('name', css_class='form-control'),\n Field('sort_number', css_class='form-control'),\n css_id='project-form')\n )\n self.helper.layout = layout\n self.helper.html5_required = False\n super(CategoryForm, self).__init__(*args, **kwargs)\n self.helper.add_input(Submit('submit', 'Submit'))\n\n def save(self, commit=True):\n instance = super(CategoryForm, self).save(commit=False)\n instance.project = self.project\n instance.save()\n return instance\n\n def clean(self):\n cleaned_data = self.cleaned_data\n\n try:\n Category.objects.get(\n name=cleaned_data['name'], project=self.project)\n except Category.DoesNotExist:\n pass\n else:\n raise ValidationError(\n 'Category with this name already exists for this project'\n )\n\n return cleaned_data\n\n\nclass VersionForm(forms.ModelForm):\n\n # noinspection PyClassicStyleClass\n class Meta:\n model = Version\n fields = (\n 'name',\n 'description',\n 'image_file',\n 'release_date'\n )\n\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user')\n self.project = kwargs.pop('project')\n form_title = 'New Version for %s' % self.project.name\n self.helper = FormHelper()\n layout = Layout(\n Fieldset(\n form_title,\n Field('name', css_class='form-control'),\n Field('description', css_class='form-control'),\n Field('image_file', css_class='form-control'),\n\n css_id='project-form')\n )\n self.helper.layout = layout\n self.helper.html5_required = False\n super(VersionForm, self).__init__(*args, **kwargs)\n self.helper.add_input(Submit('submit', 'Submit'))\n\n def save(self, commit=True):\n instance = super(VersionForm, self).save(commit=False)\n instance.author = self.user\n instance.project = self.project\n instance.approved = False\n instance.save()\n return instance\n\n\nclass EntryForm(forms.ModelForm):\n\n # noinspection PyClassicStyleClass\n class Meta:\n model = Entry\n fields = (\n 'category', 'title', 'description',\n 'image_file', 'image_credits', 'video',\n 'funded_by', 'funder_url', 'developed_by',\n 'developer_url'\n )\n\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user')\n self.version = kwargs.pop('version')\n self.project = kwargs.pop('project')\n form_title = 'New Entry in %s %s' % (\n self.project.name,\n self.version.name\n )\n self.helper = FormHelper()\n layout = Layout(\n Fieldset(\n form_title,\n Field('category', css_class='form-control'),\n Field('title', css_class='form-control'),\n Field('description', css_class='form-control'),\n Field('image_file', css_class='form-control'),\n Field('image_credits', css_class='form-control'),\n Field('video', css_class='form-control'),\n Field('funded_by', css_class='form-control'),\n Field('funder_url', css_class='form-control'),\n Field('developed_by', css_class='form-control'),\n Field('developer_url', css_class='form-control'),\n css_id='entry-form')\n )\n self.helper.layout = layout\n self.helper.html5_required = False\n super(EntryForm, self).__init__(*args, **kwargs)\n self.helper.add_input(Submit('submit', 'Submit'))\n self.fields['title'].label = 'Feature Title'\n self.fields['funder_url'] = forms.URLField(\n initial=\"http://\", widget=TextInput)\n self.fields['developer_url'] = forms.URLField(\n initial=\"http://\", widget=TextInput)\n # Filter the category list when editing so it shows only relevant ones\n self.fields['category'].queryset = Category.objects.filter(\n project=self.project).order_by('name')\n\n def save(self, commit=True):\n instance = super(EntryForm, self).save(commit=False)\n instance.author = self.user\n instance.version = self.version\n instance.approved = False\n instance.save()\n return instance\n\n\nclass SponsorForm(forms.ModelForm):\n\n # noinspection PyClassicStyleClass\n class Meta:\n model = Sponsor\n fields = (\n 'name',\n 'contact_title',\n 'address',\n 'country',\n 'sponsor_url',\n 'contact_person',\n 'sponsor_email',\n 'agreement',\n 'logo',\n 'invoice_number',\n 'project',\n )\n\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user')\n self.project = kwargs.pop('project')\n form_title = 'New Sponsor for %s' % self.project.name\n self.helper = FormHelper()\n layout = Layout(\n Fieldset(\n form_title,\n Field('name', css_class='form-control'),\n Field('contact_title', css_class='form-control'),\n Field('address', css_class='form-control'),\n Field('country', css_class='form-control chosen-select'),\n Field('sponsor_url', css_class='form-control'),\n Field('contact_person', css_class='form-control'),\n Field('sponsor_email', css_class='form-control'),\n Field('agreement', css_class='form-control'),\n Field('logo', css_class='form-control'),\n Field('invoice_number', css_class='form-control'),\n css_id='project-form')\n )\n self.helper.layout = layout\n self.helper.html5_required = False\n super(SponsorForm, self).__init__(*args, **kwargs)\n self.fields['project'].initial = self.project\n self.fields['project'].widget = forms.HiddenInput()\n self.helper.add_input(Submit('submit', 'Submit'))\n\n def save(self, commit=True):\n instance = super(SponsorForm, self).save(commit=False)\n instance.author = self.user\n instance.approved = False\n instance.save()\n return instance\n\n\nclass SponsorshipLevelForm(forms.ModelForm):\n\n # noinspection PyClassicStyleClass\n class Meta:\n model = SponsorshipLevel\n fields = (\n 'name',\n 'value',\n 'currency',\n 'logo'\n )\n\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user')\n self.project = kwargs.pop('project')\n form_title = 'Sponsorship Level Form for %s' % self.project.name\n self.helper = FormHelper()\n layout = Layout(\n Fieldset(\n form_title,\n Field('name', css_class='form-control'),\n Field('value', css_class='form-control'),\n Field('currency', css_class='form-control'),\n Field('logo', css_class='form-control'),\n css_id='project-form')\n )\n self.helper.layout = layout\n self.helper.html5_required = False\n super(SponsorshipLevelForm, self).__init__(*args, **kwargs)\n self.helper.add_input(Submit('submit', 'Submit'))\n\n def save(self, commit=True):\n instance = super(SponsorshipLevelForm, self).save(commit=False)\n instance.author = self.user\n instance.project = self.project\n instance.save()\n return instance\n\n\nclass SponsorshipPeriodForm(forms.ModelForm):\n\n # noinspection PyClassicStyleClass\n class Meta:\n model = SponsorshipPeriod\n fields = (\n 'sponsor',\n 'sponsorship_level',\n 'start_date',\n 'end_date',\n 'amount_sponsored',\n 'currency'\n )\n\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user')\n self.project = kwargs.pop('project')\n form_title = 'Sponsorship Period Form for %s' % self.project.name\n self.helper = FormHelper()\n layout = Layout(\n Fieldset(\n form_title,\n Field('sponsor', css_class='form-control chosen-select'),\n Field(\n 'sponsorship_level',\n css_class='form-control chosen-select'),\n Field('start_date', css_class='form-control'),\n Field('end_date', css_class='form-control'),\n Field('amount_sponsored', css_class='form-control'),\n Field('currency', css_class='form-control'),\n css_id='project-form')\n )\n self.helper.layout = layout\n self.helper.html5_required = False\n super(SponsorshipPeriodForm, self).__init__(*args, **kwargs)\n # Filter items to only show the approved items in the same project\n self.fields['sponsor'].queryset = \\\n Sponsor.objects.filter(\n project=self.project, approved=True).order_by('name')\n self.fields['sponsorship_level'].queryset = \\\n SponsorshipLevel.objects.filter(\n project=self.project, approved=True).order_by('name')\n self.helper.add_input(Submit('submit', 'Submit'))\n\n def save(self, commit=True):\n instance = super(SponsorshipPeriodForm, self).save(commit=False)\n instance.author = self.user\n instance.project = self.project\n instance.save()\n return instance\n", "path": "django_project/changes/forms.py"}]}
| 3,231 | 232 |
gh_patches_debug_54191
|
rasdani/github-patches
|
git_diff
|
vyperlang__vyper-293
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Need a .travis.yml file to properly run travis tests
^
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 from setuptools import setup, find_packages
4
5
6 with open('README.md') as f:
7 readme = f.read()
8
9 with open('LICENSE') as f:
10 license = f.read()
11
12 setup(
13 name='viper',
14 version='0.0.1',
15 description='Viper Programming Language for Ethereum',
16 long_description=readme,
17 author='Vitalik Buterin',
18 author_email='',
19 url='https://github.com/ethereum/viper',
20 license=license,
21 packages=find_packages(exclude=('tests', 'docs')),
22 install_requires=[
23 'ethereum == 1.3.7',
24 'serpent',
25 'pytest-cov',
26 'pytest-runner', # Must be after pytest-cov or it will not work
27 # due to https://github.com/pypa/setuptools/issues/196
28 ],
29 scripts=['bin/viper']
30 )
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -20,7 +20,7 @@
license=license,
packages=find_packages(exclude=('tests', 'docs')),
install_requires=[
- 'ethereum == 1.3.7',
+ 'ethereum==2.0.4',
'serpent',
'pytest-cov',
'pytest-runner', # Must be after pytest-cov or it will not work
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -20,7 +20,7 @@\n license=license,\n packages=find_packages(exclude=('tests', 'docs')),\n install_requires=[\n- 'ethereum == 1.3.7',\n+ 'ethereum==2.0.4',\n 'serpent',\n 'pytest-cov',\n 'pytest-runner', # Must be after pytest-cov or it will not work\n", "issue": "Need a .travis.yml file to properly run travis tests\n^\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n\nwith open('README.md') as f:\n readme = f.read()\n\nwith open('LICENSE') as f:\n license = f.read()\n\nsetup(\n name='viper',\n version='0.0.1',\n description='Viper Programming Language for Ethereum',\n long_description=readme,\n author='Vitalik Buterin',\n author_email='',\n url='https://github.com/ethereum/viper',\n license=license,\n packages=find_packages(exclude=('tests', 'docs')),\n install_requires=[\n 'ethereum == 1.3.7',\n 'serpent',\n 'pytest-cov',\n 'pytest-runner', # Must be after pytest-cov or it will not work\n # due to https://github.com/pypa/setuptools/issues/196\n ],\n scripts=['bin/viper']\n)\n", "path": "setup.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n\nwith open('README.md') as f:\n readme = f.read()\n\nwith open('LICENSE') as f:\n license = f.read()\n\nsetup(\n name='viper',\n version='0.0.1',\n description='Viper Programming Language for Ethereum',\n long_description=readme,\n author='Vitalik Buterin',\n author_email='',\n url='https://github.com/ethereum/viper',\n license=license,\n packages=find_packages(exclude=('tests', 'docs')),\n install_requires=[\n 'ethereum==2.0.4',\n 'serpent',\n 'pytest-cov',\n 'pytest-runner', # Must be after pytest-cov or it will not work\n # due to https://github.com/pypa/setuptools/issues/196\n ],\n scripts=['bin/viper']\n)\n", "path": "setup.py"}]}
| 524 | 105 |
gh_patches_debug_59073
|
rasdani/github-patches
|
git_diff
|
googleapis__google-auth-library-python-188
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Setting GOOGLE_APPLICATION_CREDENTIALS to empty string throws an error
I would expect an empty environment variable to act identically to an unset environment variable.
Reported by @parthea
If I set my GOOGLE_APPLICATION_CREDENTIALS environment variable to an empty string, I receive an exception instead of the local user web authentication flow. Is it expected?
```
tony@tonypc:~/pydata-pandas-gbq/pandas_gbq/tests$ export GOOGLE_APPLICATION_CREDENTIALS=''
tony@tonypc:~/pydata-pandas-gbq/pandas_gbq/tests$ pytest test_gbq.py::TestGBQConnectorIntegrationWithLocalUserAccountAuth -v -r s . --maxfail=1
============================= test session starts ==============================
platform linux2 -- Python 2.7.12, pytest-3.0.6, py-1.4.32, pluggy-0.4.0 -- /home/tony/miniconda2/bin/python
cachedir: ../../.cache
rootdir: /home/tony/pydata-pandas-gbq, inifile:
plugins: cov-2.4.0
collected 100 items
test_gbq.py::TestGBQConnectorIntegrationWithLocalUserAccountAuth::test_should_be_able_to_make_a_connector ERROR
==================================== ERRORS ====================================
ERROR at setup of TestGBQConnectorIntegrationWithLocalUserAccountAuth.test_should_be_able_to_make_a_connector
self = <pandas_gbq.tests.test_gbq.TestGBQConnectorIntegrationWithLocalUserAccountAuth object at 0x7f2bbecf6490>
method = <bound method TestGBQConnectorIntegrationWithLocalUserAccountAuth.test_should_....TestGBQConnectorIntegrationWithLocalUserAccountAuth object at 0x7f2bbecf6490>>
def setup_method(self, method):
_setup_common()
_skip_if_no_project_id()
_skip_local_auth_if_in_travis_env()
> self.sut = gbq.GbqConnector(_get_project_id(), auth_local_webserver=True)
test_gbq.py:268:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../gbq.py:174: in __init__
auth_local_webserver=auth_local_webserver)
../gbq.py:182: in get_credentials
credentials = self.get_application_default_credentials()
../gbq.py:212: in get_application_default_credentials
credentials, _ = google.auth.default(scopes=[self.scope])
../../../miniconda2/lib/python2.7/site-packages/google/auth/_default.py:277: in default
credentials, project_id = checker()
../../../miniconda2/lib/python2.7/site-packages/google/auth/_default.py:138: in _get_explicit_environ_credentials
os.environ[environment_vars.CREDENTIALS])
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
filename = ''
def _load_credentials_from_file(filename):
"""Loads credentials from a file.
The credentials file must be a service account key or stored authorized
user credentials.
Args:
filename (str): The full path to the credentials file.
Returns:
Tuple[google.auth.credentials.Credentials, Optional[str]]: Loaded
credentials and the project ID. Authorized user credentials do not
have the project ID information.
Raises:
google.auth.exceptions.DefaultCredentialsError: if the file is in the
wrong format.
"""
> with io.open(filename, 'r') as file_obj:
E IOError: [Errno 2] No such file or directory: ''
../../../miniconda2/lib/python2.7/site-packages/google/auth/_default.py:63: IOError
!!!!!!!!!!!!!!!!!!!! Interrupted: stopping after 1 failures !!!!!!!!!!!!!!!!!!!!
=========================== 1 error in 0.61 seconds ============================
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `google/auth/_default.py`
Content:
```
1 # Copyright 2015 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Application default credentials.
16
17 Implements application default credentials and project ID detection.
18 """
19
20 import io
21 import json
22 import logging
23 import os
24
25 from google.auth import environment_vars
26 from google.auth import exceptions
27 import google.auth.transport._http_client
28
29 _LOGGER = logging.getLogger(__name__)
30
31 # Valid types accepted for file-based credentials.
32 _AUTHORIZED_USER_TYPE = 'authorized_user'
33 _SERVICE_ACCOUNT_TYPE = 'service_account'
34 _VALID_TYPES = (_AUTHORIZED_USER_TYPE, _SERVICE_ACCOUNT_TYPE)
35
36 # Help message when no credentials can be found.
37 _HELP_MESSAGE = """
38 Could not automatically determine credentials. Please set {env} or
39 explicitly create credential and re-run the application. For more
40 information, please see
41 https://developers.google.com/accounts/docs/application-default-credentials.
42 """.format(env=environment_vars.CREDENTIALS).strip()
43
44
45 def _load_credentials_from_file(filename):
46 """Loads credentials from a file.
47
48 The credentials file must be a service account key or stored authorized
49 user credentials.
50
51 Args:
52 filename (str): The full path to the credentials file.
53
54 Returns:
55 Tuple[google.auth.credentials.Credentials, Optional[str]]: Loaded
56 credentials and the project ID. Authorized user credentials do not
57 have the project ID information.
58
59 Raises:
60 google.auth.exceptions.DefaultCredentialsError: if the file is in the
61 wrong format.
62 """
63 with io.open(filename, 'r') as file_obj:
64 try:
65 info = json.load(file_obj)
66 except ValueError as exc:
67 raise exceptions.DefaultCredentialsError(
68 'File {} is not a valid json file.'.format(filename), exc)
69
70 # The type key should indicate that the file is either a service account
71 # credentials file or an authorized user credentials file.
72 credential_type = info.get('type')
73
74 if credential_type == _AUTHORIZED_USER_TYPE:
75 from google.auth import _cloud_sdk
76
77 try:
78 credentials = _cloud_sdk.load_authorized_user_credentials(info)
79 except ValueError as exc:
80 raise exceptions.DefaultCredentialsError(
81 'Failed to load authorized user credentials from {}'.format(
82 filename), exc)
83 # Authorized user credentials do not contain the project ID.
84 return credentials, None
85
86 elif credential_type == _SERVICE_ACCOUNT_TYPE:
87 from google.oauth2 import service_account
88
89 try:
90 credentials = (
91 service_account.Credentials.from_service_account_info(info))
92 except ValueError as exc:
93 raise exceptions.DefaultCredentialsError(
94 'Failed to load service account credentials from {}'.format(
95 filename), exc)
96 return credentials, info.get('project_id')
97
98 else:
99 raise exceptions.DefaultCredentialsError(
100 'The file {file} does not have a valid type. '
101 'Type is {type}, expected one of {valid_types}.'.format(
102 file=filename, type=credential_type, valid_types=_VALID_TYPES))
103
104
105 def _get_gcloud_sdk_credentials():
106 """Gets the credentials and project ID from the Cloud SDK."""
107 from google.auth import _cloud_sdk
108
109 # Check if application default credentials exist.
110 credentials_filename = (
111 _cloud_sdk.get_application_default_credentials_path())
112
113 if not os.path.isfile(credentials_filename):
114 return None, None
115
116 credentials, project_id = _load_credentials_from_file(
117 credentials_filename)
118
119 if not project_id:
120 project_id = _cloud_sdk.get_project_id()
121
122 if not project_id:
123 _LOGGER.warning(
124 'No project ID could be determined from the Cloud SDK '
125 'configuration. Consider running `gcloud config set project` or '
126 'setting the %s environment variable', environment_vars.PROJECT)
127
128 return credentials, project_id
129
130
131 def _get_explicit_environ_credentials():
132 """Gets credentials from the GOOGLE_APPLICATION_CREDENTIALS environment
133 variable."""
134 explicit_file = os.environ.get(environment_vars.CREDENTIALS)
135
136 if explicit_file is not None:
137 credentials, project_id = _load_credentials_from_file(
138 os.environ[environment_vars.CREDENTIALS])
139
140 if not project_id:
141 _LOGGER.warning(
142 'No project ID could be determined from the credentials at %s '
143 'Consider setting the %s environment variable',
144 environment_vars.CREDENTIALS, environment_vars.PROJECT)
145
146 return credentials, project_id
147
148 else:
149 return None, None
150
151
152 def _get_gae_credentials():
153 """Gets Google App Engine App Identity credentials and project ID."""
154 from google.auth import app_engine
155
156 try:
157 credentials = app_engine.Credentials()
158 project_id = app_engine.get_project_id()
159 return credentials, project_id
160 except EnvironmentError:
161 return None, None
162
163
164 def _get_gce_credentials(request=None):
165 """Gets credentials and project ID from the GCE Metadata Service."""
166 # Ping requires a transport, but we want application default credentials
167 # to require no arguments. So, we'll use the _http_client transport which
168 # uses http.client. This is only acceptable because the metadata server
169 # doesn't do SSL and never requires proxies.
170 from google.auth import compute_engine
171 from google.auth.compute_engine import _metadata
172
173 if request is None:
174 request = google.auth.transport._http_client.Request()
175
176 if _metadata.ping(request=request):
177 # Get the project ID.
178 try:
179 project_id = _metadata.get_project_id(request=request)
180 except exceptions.TransportError:
181 _LOGGER.warning(
182 'No project ID could be determined from the Compute Engine '
183 'metadata service. Consider setting the %s environment '
184 'variable.', environment_vars.PROJECT)
185 project_id = None
186
187 return compute_engine.Credentials(), project_id
188 else:
189 return None, None
190
191
192 def default(scopes=None, request=None):
193 """Gets the default credentials for the current environment.
194
195 `Application Default Credentials`_ provides an easy way to obtain
196 credentials to call Google APIs for server-to-server or local applications.
197 This function acquires credentials from the environment in the following
198 order:
199
200 1. If the environment variable ``GOOGLE_APPLICATION_CREDENTIALS`` is set
201 to the path of a valid service account JSON private key file, then it is
202 loaded and returned. The project ID returned is the project ID defined
203 in the service account file if available (some older files do not
204 contain project ID information).
205 2. If the `Google Cloud SDK`_ is installed and has application default
206 credentials set they are loaded and returned.
207
208 To enable application default credentials with the Cloud SDK run::
209
210 gcloud auth application-default login
211
212 If the Cloud SDK has an active project, the project ID is returned. The
213 active project can be set using::
214
215 gcloud config set project
216
217 3. If the application is running in the `App Engine standard environment`_
218 then the credentials and project ID from the `App Identity Service`_
219 are used.
220 4. If the application is running in `Compute Engine`_ or the
221 `App Engine flexible environment`_ then the credentials and project ID
222 are obtained from the `Metadata Service`_.
223 5. If no credentials are found,
224 :class:`~google.auth.exceptions.DefaultCredentialsError` will be raised.
225
226 .. _Application Default Credentials: https://developers.google.com\
227 /identity/protocols/application-default-credentials
228 .. _Google Cloud SDK: https://cloud.google.com/sdk
229 .. _App Engine standard environment: https://cloud.google.com/appengine
230 .. _App Identity Service: https://cloud.google.com/appengine/docs/python\
231 /appidentity/
232 .. _Compute Engine: https://cloud.google.com/compute
233 .. _App Engine flexible environment: https://cloud.google.com\
234 /appengine/flexible
235 .. _Metadata Service: https://cloud.google.com/compute/docs\
236 /storing-retrieving-metadata
237
238 Example::
239
240 import google.auth
241
242 credentials, project_id = google.auth.default()
243
244 Args:
245 scopes (Sequence[str]): The list of scopes for the credentials. If
246 specified, the credentials will automatically be scoped if
247 necessary.
248 request (google.auth.transport.Request): An object used to make
249 HTTP requests. This is used to detect whether the application
250 is running on Compute Engine. If not specified, then it will
251 use the standard library http client to make requests.
252
253 Returns:
254 Tuple[~google.auth.credentials.Credentials, Optional[str]]:
255 the current environment's credentials and project ID. Project ID
256 may be None, which indicates that the Project ID could not be
257 ascertained from the environment.
258
259 Raises:
260 ~google.auth.exceptions.DefaultCredentialsError:
261 If no credentials were found, or if the credentials found were
262 invalid.
263 """
264 from google.auth.credentials import with_scopes_if_required
265
266 explicit_project_id = os.environ.get(
267 environment_vars.PROJECT,
268 os.environ.get(environment_vars.LEGACY_PROJECT))
269
270 checkers = (
271 _get_explicit_environ_credentials,
272 _get_gcloud_sdk_credentials,
273 _get_gae_credentials,
274 lambda: _get_gce_credentials(request))
275
276 for checker in checkers:
277 credentials, project_id = checker()
278 if credentials is not None:
279 credentials = with_scopes_if_required(credentials, scopes)
280 return credentials, explicit_project_id or project_id
281
282 raise exceptions.DefaultCredentialsError(_HELP_MESSAGE)
283
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/google/auth/_default.py b/google/auth/_default.py
--- a/google/auth/_default.py
+++ b/google/auth/_default.py
@@ -58,8 +58,12 @@
Raises:
google.auth.exceptions.DefaultCredentialsError: if the file is in the
- wrong format.
+ wrong format or is missing.
"""
+ if not os.path.exists(filename):
+ raise exceptions.DefaultCredentialsError(
+ 'File {} was not found.'.format(filename))
+
with io.open(filename, 'r') as file_obj:
try:
info = json.load(file_obj)
|
{"golden_diff": "diff --git a/google/auth/_default.py b/google/auth/_default.py\n--- a/google/auth/_default.py\n+++ b/google/auth/_default.py\n@@ -58,8 +58,12 @@\n \n Raises:\n google.auth.exceptions.DefaultCredentialsError: if the file is in the\n- wrong format.\n+ wrong format or is missing.\n \"\"\"\n+ if not os.path.exists(filename):\n+ raise exceptions.DefaultCredentialsError(\n+ 'File {} was not found.'.format(filename))\n+\n with io.open(filename, 'r') as file_obj:\n try:\n info = json.load(file_obj)\n", "issue": "Setting GOOGLE_APPLICATION_CREDENTIALS to empty string throws an error\nI would expect an empty environment variable to act identically to an unset environment variable.\r\n\r\nReported by @parthea\r\n\r\nIf I set my GOOGLE_APPLICATION_CREDENTIALS environment variable to an empty string, I receive an exception instead of the local user web authentication flow. Is it expected?\r\n\r\n```\r\ntony@tonypc:~/pydata-pandas-gbq/pandas_gbq/tests$ export GOOGLE_APPLICATION_CREDENTIALS=''\r\ntony@tonypc:~/pydata-pandas-gbq/pandas_gbq/tests$ pytest test_gbq.py::TestGBQConnectorIntegrationWithLocalUserAccountAuth -v -r s . --maxfail=1\r\n============================= test session starts ==============================\r\nplatform linux2 -- Python 2.7.12, pytest-3.0.6, py-1.4.32, pluggy-0.4.0 -- /home/tony/miniconda2/bin/python\r\ncachedir: ../../.cache\r\nrootdir: /home/tony/pydata-pandas-gbq, inifile: \r\nplugins: cov-2.4.0\r\ncollected 100 items \r\n\r\ntest_gbq.py::TestGBQConnectorIntegrationWithLocalUserAccountAuth::test_should_be_able_to_make_a_connector ERROR\r\n\r\n==================================== ERRORS ====================================\r\n ERROR at setup of TestGBQConnectorIntegrationWithLocalUserAccountAuth.test_should_be_able_to_make_a_connector \r\n\r\nself = <pandas_gbq.tests.test_gbq.TestGBQConnectorIntegrationWithLocalUserAccountAuth object at 0x7f2bbecf6490>\r\nmethod = <bound method TestGBQConnectorIntegrationWithLocalUserAccountAuth.test_should_....TestGBQConnectorIntegrationWithLocalUserAccountAuth object at 0x7f2bbecf6490>>\r\n\r\n def setup_method(self, method):\r\n _setup_common()\r\n _skip_if_no_project_id()\r\n _skip_local_auth_if_in_travis_env()\r\n \r\n> self.sut = gbq.GbqConnector(_get_project_id(), auth_local_webserver=True)\r\n\r\ntest_gbq.py:268: \r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \r\n../gbq.py:174: in __init__\r\n auth_local_webserver=auth_local_webserver)\r\n../gbq.py:182: in get_credentials\r\n credentials = self.get_application_default_credentials()\r\n../gbq.py:212: in get_application_default_credentials\r\n credentials, _ = google.auth.default(scopes=[self.scope])\r\n../../../miniconda2/lib/python2.7/site-packages/google/auth/_default.py:277: in default\r\n credentials, project_id = checker()\r\n../../../miniconda2/lib/python2.7/site-packages/google/auth/_default.py:138: in _get_explicit_environ_credentials\r\n os.environ[environment_vars.CREDENTIALS])\r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \r\n\r\nfilename = ''\r\n\r\n def _load_credentials_from_file(filename):\r\n \"\"\"Loads credentials from a file.\r\n \r\n The credentials file must be a service account key or stored authorized\r\n user credentials.\r\n \r\n Args:\r\n filename (str): The full path to the credentials file.\r\n \r\n Returns:\r\n Tuple[google.auth.credentials.Credentials, Optional[str]]: Loaded\r\n credentials and the project ID. Authorized user credentials do not\r\n have the project ID information.\r\n \r\n Raises:\r\n google.auth.exceptions.DefaultCredentialsError: if the file is in the\r\n wrong format.\r\n \"\"\"\r\n> with io.open(filename, 'r') as file_obj:\r\nE IOError: [Errno 2] No such file or directory: ''\r\n\r\n../../../miniconda2/lib/python2.7/site-packages/google/auth/_default.py:63: IOError\r\n!!!!!!!!!!!!!!!!!!!! Interrupted: stopping after 1 failures !!!!!!!!!!!!!!!!!!!!\r\n=========================== 1 error in 0.61 seconds ============================\r\n```\n", "before_files": [{"content": "# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Application default credentials.\n\nImplements application default credentials and project ID detection.\n\"\"\"\n\nimport io\nimport json\nimport logging\nimport os\n\nfrom google.auth import environment_vars\nfrom google.auth import exceptions\nimport google.auth.transport._http_client\n\n_LOGGER = logging.getLogger(__name__)\n\n# Valid types accepted for file-based credentials.\n_AUTHORIZED_USER_TYPE = 'authorized_user'\n_SERVICE_ACCOUNT_TYPE = 'service_account'\n_VALID_TYPES = (_AUTHORIZED_USER_TYPE, _SERVICE_ACCOUNT_TYPE)\n\n# Help message when no credentials can be found.\n_HELP_MESSAGE = \"\"\"\nCould not automatically determine credentials. Please set {env} or\nexplicitly create credential and re-run the application. For more\ninformation, please see\nhttps://developers.google.com/accounts/docs/application-default-credentials.\n\"\"\".format(env=environment_vars.CREDENTIALS).strip()\n\n\ndef _load_credentials_from_file(filename):\n \"\"\"Loads credentials from a file.\n\n The credentials file must be a service account key or stored authorized\n user credentials.\n\n Args:\n filename (str): The full path to the credentials file.\n\n Returns:\n Tuple[google.auth.credentials.Credentials, Optional[str]]: Loaded\n credentials and the project ID. Authorized user credentials do not\n have the project ID information.\n\n Raises:\n google.auth.exceptions.DefaultCredentialsError: if the file is in the\n wrong format.\n \"\"\"\n with io.open(filename, 'r') as file_obj:\n try:\n info = json.load(file_obj)\n except ValueError as exc:\n raise exceptions.DefaultCredentialsError(\n 'File {} is not a valid json file.'.format(filename), exc)\n\n # The type key should indicate that the file is either a service account\n # credentials file or an authorized user credentials file.\n credential_type = info.get('type')\n\n if credential_type == _AUTHORIZED_USER_TYPE:\n from google.auth import _cloud_sdk\n\n try:\n credentials = _cloud_sdk.load_authorized_user_credentials(info)\n except ValueError as exc:\n raise exceptions.DefaultCredentialsError(\n 'Failed to load authorized user credentials from {}'.format(\n filename), exc)\n # Authorized user credentials do not contain the project ID.\n return credentials, None\n\n elif credential_type == _SERVICE_ACCOUNT_TYPE:\n from google.oauth2 import service_account\n\n try:\n credentials = (\n service_account.Credentials.from_service_account_info(info))\n except ValueError as exc:\n raise exceptions.DefaultCredentialsError(\n 'Failed to load service account credentials from {}'.format(\n filename), exc)\n return credentials, info.get('project_id')\n\n else:\n raise exceptions.DefaultCredentialsError(\n 'The file {file} does not have a valid type. '\n 'Type is {type}, expected one of {valid_types}.'.format(\n file=filename, type=credential_type, valid_types=_VALID_TYPES))\n\n\ndef _get_gcloud_sdk_credentials():\n \"\"\"Gets the credentials and project ID from the Cloud SDK.\"\"\"\n from google.auth import _cloud_sdk\n\n # Check if application default credentials exist.\n credentials_filename = (\n _cloud_sdk.get_application_default_credentials_path())\n\n if not os.path.isfile(credentials_filename):\n return None, None\n\n credentials, project_id = _load_credentials_from_file(\n credentials_filename)\n\n if not project_id:\n project_id = _cloud_sdk.get_project_id()\n\n if not project_id:\n _LOGGER.warning(\n 'No project ID could be determined from the Cloud SDK '\n 'configuration. Consider running `gcloud config set project` or '\n 'setting the %s environment variable', environment_vars.PROJECT)\n\n return credentials, project_id\n\n\ndef _get_explicit_environ_credentials():\n \"\"\"Gets credentials from the GOOGLE_APPLICATION_CREDENTIALS environment\n variable.\"\"\"\n explicit_file = os.environ.get(environment_vars.CREDENTIALS)\n\n if explicit_file is not None:\n credentials, project_id = _load_credentials_from_file(\n os.environ[environment_vars.CREDENTIALS])\n\n if not project_id:\n _LOGGER.warning(\n 'No project ID could be determined from the credentials at %s '\n 'Consider setting the %s environment variable',\n environment_vars.CREDENTIALS, environment_vars.PROJECT)\n\n return credentials, project_id\n\n else:\n return None, None\n\n\ndef _get_gae_credentials():\n \"\"\"Gets Google App Engine App Identity credentials and project ID.\"\"\"\n from google.auth import app_engine\n\n try:\n credentials = app_engine.Credentials()\n project_id = app_engine.get_project_id()\n return credentials, project_id\n except EnvironmentError:\n return None, None\n\n\ndef _get_gce_credentials(request=None):\n \"\"\"Gets credentials and project ID from the GCE Metadata Service.\"\"\"\n # Ping requires a transport, but we want application default credentials\n # to require no arguments. So, we'll use the _http_client transport which\n # uses http.client. This is only acceptable because the metadata server\n # doesn't do SSL and never requires proxies.\n from google.auth import compute_engine\n from google.auth.compute_engine import _metadata\n\n if request is None:\n request = google.auth.transport._http_client.Request()\n\n if _metadata.ping(request=request):\n # Get the project ID.\n try:\n project_id = _metadata.get_project_id(request=request)\n except exceptions.TransportError:\n _LOGGER.warning(\n 'No project ID could be determined from the Compute Engine '\n 'metadata service. Consider setting the %s environment '\n 'variable.', environment_vars.PROJECT)\n project_id = None\n\n return compute_engine.Credentials(), project_id\n else:\n return None, None\n\n\ndef default(scopes=None, request=None):\n \"\"\"Gets the default credentials for the current environment.\n\n `Application Default Credentials`_ provides an easy way to obtain\n credentials to call Google APIs for server-to-server or local applications.\n This function acquires credentials from the environment in the following\n order:\n\n 1. If the environment variable ``GOOGLE_APPLICATION_CREDENTIALS`` is set\n to the path of a valid service account JSON private key file, then it is\n loaded and returned. The project ID returned is the project ID defined\n in the service account file if available (some older files do not\n contain project ID information).\n 2. If the `Google Cloud SDK`_ is installed and has application default\n credentials set they are loaded and returned.\n\n To enable application default credentials with the Cloud SDK run::\n\n gcloud auth application-default login\n\n If the Cloud SDK has an active project, the project ID is returned. The\n active project can be set using::\n\n gcloud config set project\n\n 3. If the application is running in the `App Engine standard environment`_\n then the credentials and project ID from the `App Identity Service`_\n are used.\n 4. If the application is running in `Compute Engine`_ or the\n `App Engine flexible environment`_ then the credentials and project ID\n are obtained from the `Metadata Service`_.\n 5. If no credentials are found,\n :class:`~google.auth.exceptions.DefaultCredentialsError` will be raised.\n\n .. _Application Default Credentials: https://developers.google.com\\\n /identity/protocols/application-default-credentials\n .. _Google Cloud SDK: https://cloud.google.com/sdk\n .. _App Engine standard environment: https://cloud.google.com/appengine\n .. _App Identity Service: https://cloud.google.com/appengine/docs/python\\\n /appidentity/\n .. _Compute Engine: https://cloud.google.com/compute\n .. _App Engine flexible environment: https://cloud.google.com\\\n /appengine/flexible\n .. _Metadata Service: https://cloud.google.com/compute/docs\\\n /storing-retrieving-metadata\n\n Example::\n\n import google.auth\n\n credentials, project_id = google.auth.default()\n\n Args:\n scopes (Sequence[str]): The list of scopes for the credentials. If\n specified, the credentials will automatically be scoped if\n necessary.\n request (google.auth.transport.Request): An object used to make\n HTTP requests. This is used to detect whether the application\n is running on Compute Engine. If not specified, then it will\n use the standard library http client to make requests.\n\n Returns:\n Tuple[~google.auth.credentials.Credentials, Optional[str]]:\n the current environment's credentials and project ID. Project ID\n may be None, which indicates that the Project ID could not be\n ascertained from the environment.\n\n Raises:\n ~google.auth.exceptions.DefaultCredentialsError:\n If no credentials were found, or if the credentials found were\n invalid.\n \"\"\"\n from google.auth.credentials import with_scopes_if_required\n\n explicit_project_id = os.environ.get(\n environment_vars.PROJECT,\n os.environ.get(environment_vars.LEGACY_PROJECT))\n\n checkers = (\n _get_explicit_environ_credentials,\n _get_gcloud_sdk_credentials,\n _get_gae_credentials,\n lambda: _get_gce_credentials(request))\n\n for checker in checkers:\n credentials, project_id = checker()\n if credentials is not None:\n credentials = with_scopes_if_required(credentials, scopes)\n return credentials, explicit_project_id or project_id\n\n raise exceptions.DefaultCredentialsError(_HELP_MESSAGE)\n", "path": "google/auth/_default.py"}], "after_files": [{"content": "# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Application default credentials.\n\nImplements application default credentials and project ID detection.\n\"\"\"\n\nimport io\nimport json\nimport logging\nimport os\n\nfrom google.auth import environment_vars\nfrom google.auth import exceptions\nimport google.auth.transport._http_client\n\n_LOGGER = logging.getLogger(__name__)\n\n# Valid types accepted for file-based credentials.\n_AUTHORIZED_USER_TYPE = 'authorized_user'\n_SERVICE_ACCOUNT_TYPE = 'service_account'\n_VALID_TYPES = (_AUTHORIZED_USER_TYPE, _SERVICE_ACCOUNT_TYPE)\n\n# Help message when no credentials can be found.\n_HELP_MESSAGE = \"\"\"\nCould not automatically determine credentials. Please set {env} or\nexplicitly create credential and re-run the application. For more\ninformation, please see\nhttps://developers.google.com/accounts/docs/application-default-credentials.\n\"\"\".format(env=environment_vars.CREDENTIALS).strip()\n\n\ndef _load_credentials_from_file(filename):\n \"\"\"Loads credentials from a file.\n\n The credentials file must be a service account key or stored authorized\n user credentials.\n\n Args:\n filename (str): The full path to the credentials file.\n\n Returns:\n Tuple[google.auth.credentials.Credentials, Optional[str]]: Loaded\n credentials and the project ID. Authorized user credentials do not\n have the project ID information.\n\n Raises:\n google.auth.exceptions.DefaultCredentialsError: if the file is in the\n wrong format or is missing.\n \"\"\"\n if not os.path.exists(filename):\n raise exceptions.DefaultCredentialsError(\n 'File {} was not found.'.format(filename))\n\n with io.open(filename, 'r') as file_obj:\n try:\n info = json.load(file_obj)\n except ValueError as exc:\n raise exceptions.DefaultCredentialsError(\n 'File {} is not a valid json file.'.format(filename), exc)\n\n # The type key should indicate that the file is either a service account\n # credentials file or an authorized user credentials file.\n credential_type = info.get('type')\n\n if credential_type == _AUTHORIZED_USER_TYPE:\n from google.auth import _cloud_sdk\n\n try:\n credentials = _cloud_sdk.load_authorized_user_credentials(info)\n except ValueError as exc:\n raise exceptions.DefaultCredentialsError(\n 'Failed to load authorized user credentials from {}'.format(\n filename), exc)\n # Authorized user credentials do not contain the project ID.\n return credentials, None\n\n elif credential_type == _SERVICE_ACCOUNT_TYPE:\n from google.oauth2 import service_account\n\n try:\n credentials = (\n service_account.Credentials.from_service_account_info(info))\n except ValueError as exc:\n raise exceptions.DefaultCredentialsError(\n 'Failed to load service account credentials from {}'.format(\n filename), exc)\n return credentials, info.get('project_id')\n\n else:\n raise exceptions.DefaultCredentialsError(\n 'The file {file} does not have a valid type. '\n 'Type is {type}, expected one of {valid_types}.'.format(\n file=filename, type=credential_type, valid_types=_VALID_TYPES))\n\n\ndef _get_gcloud_sdk_credentials():\n \"\"\"Gets the credentials and project ID from the Cloud SDK.\"\"\"\n from google.auth import _cloud_sdk\n\n # Check if application default credentials exist.\n credentials_filename = (\n _cloud_sdk.get_application_default_credentials_path())\n\n if not os.path.isfile(credentials_filename):\n return None, None\n\n credentials, project_id = _load_credentials_from_file(\n credentials_filename)\n\n if not project_id:\n project_id = _cloud_sdk.get_project_id()\n\n if not project_id:\n _LOGGER.warning(\n 'No project ID could be determined from the Cloud SDK '\n 'configuration. Consider running `gcloud config set project` or '\n 'setting the %s environment variable', environment_vars.PROJECT)\n\n return credentials, project_id\n\n\ndef _get_explicit_environ_credentials():\n \"\"\"Gets credentials from the GOOGLE_APPLICATION_CREDENTIALS environment\n variable.\"\"\"\n explicit_file = os.environ.get(environment_vars.CREDENTIALS)\n\n if explicit_file is not None:\n credentials, project_id = _load_credentials_from_file(\n os.environ[environment_vars.CREDENTIALS])\n\n if not project_id:\n _LOGGER.warning(\n 'No project ID could be determined from the credentials at %s '\n 'Consider setting the %s environment variable',\n environment_vars.CREDENTIALS, environment_vars.PROJECT)\n\n return credentials, project_id\n\n else:\n return None, None\n\n\ndef _get_gae_credentials():\n \"\"\"Gets Google App Engine App Identity credentials and project ID.\"\"\"\n from google.auth import app_engine\n\n try:\n credentials = app_engine.Credentials()\n project_id = app_engine.get_project_id()\n return credentials, project_id\n except EnvironmentError:\n return None, None\n\n\ndef _get_gce_credentials(request=None):\n \"\"\"Gets credentials and project ID from the GCE Metadata Service.\"\"\"\n # Ping requires a transport, but we want application default credentials\n # to require no arguments. So, we'll use the _http_client transport which\n # uses http.client. This is only acceptable because the metadata server\n # doesn't do SSL and never requires proxies.\n from google.auth import compute_engine\n from google.auth.compute_engine import _metadata\n\n if request is None:\n request = google.auth.transport._http_client.Request()\n\n if _metadata.ping(request=request):\n # Get the project ID.\n try:\n project_id = _metadata.get_project_id(request=request)\n except exceptions.TransportError:\n _LOGGER.warning(\n 'No project ID could be determined from the Compute Engine '\n 'metadata service. Consider setting the %s environment '\n 'variable.', environment_vars.PROJECT)\n project_id = None\n\n return compute_engine.Credentials(), project_id\n else:\n return None, None\n\n\ndef default(scopes=None, request=None):\n \"\"\"Gets the default credentials for the current environment.\n\n `Application Default Credentials`_ provides an easy way to obtain\n credentials to call Google APIs for server-to-server or local applications.\n This function acquires credentials from the environment in the following\n order:\n\n 1. If the environment variable ``GOOGLE_APPLICATION_CREDENTIALS`` is set\n to the path of a valid service account JSON private key file, then it is\n loaded and returned. The project ID returned is the project ID defined\n in the service account file if available (some older files do not\n contain project ID information).\n 2. If the `Google Cloud SDK`_ is installed and has application default\n credentials set they are loaded and returned.\n\n To enable application default credentials with the Cloud SDK run::\n\n gcloud auth application-default login\n\n If the Cloud SDK has an active project, the project ID is returned. The\n active project can be set using::\n\n gcloud config set project\n\n 3. If the application is running in the `App Engine standard environment`_\n then the credentials and project ID from the `App Identity Service`_\n are used.\n 4. If the application is running in `Compute Engine`_ or the\n `App Engine flexible environment`_ then the credentials and project ID\n are obtained from the `Metadata Service`_.\n 5. If no credentials are found,\n :class:`~google.auth.exceptions.DefaultCredentialsError` will be raised.\n\n .. _Application Default Credentials: https://developers.google.com\\\n /identity/protocols/application-default-credentials\n .. _Google Cloud SDK: https://cloud.google.com/sdk\n .. _App Engine standard environment: https://cloud.google.com/appengine\n .. _App Identity Service: https://cloud.google.com/appengine/docs/python\\\n /appidentity/\n .. _Compute Engine: https://cloud.google.com/compute\n .. _App Engine flexible environment: https://cloud.google.com\\\n /appengine/flexible\n .. _Metadata Service: https://cloud.google.com/compute/docs\\\n /storing-retrieving-metadata\n\n Example::\n\n import google.auth\n\n credentials, project_id = google.auth.default()\n\n Args:\n scopes (Sequence[str]): The list of scopes for the credentials. If\n specified, the credentials will automatically be scoped if\n necessary.\n request (google.auth.transport.Request): An object used to make\n HTTP requests. This is used to detect whether the application\n is running on Compute Engine. If not specified, then it will\n use the standard library http client to make requests.\n\n Returns:\n Tuple[~google.auth.credentials.Credentials, Optional[str]]:\n the current environment's credentials and project ID. Project ID\n may be None, which indicates that the Project ID could not be\n ascertained from the environment.\n\n Raises:\n ~google.auth.exceptions.DefaultCredentialsError:\n If no credentials were found, or if the credentials found were\n invalid.\n \"\"\"\n from google.auth.credentials import with_scopes_if_required\n\n explicit_project_id = os.environ.get(\n environment_vars.PROJECT,\n os.environ.get(environment_vars.LEGACY_PROJECT))\n\n checkers = (\n _get_explicit_environ_credentials,\n _get_gcloud_sdk_credentials,\n _get_gae_credentials,\n lambda: _get_gce_credentials(request))\n\n for checker in checkers:\n credentials, project_id = checker()\n if credentials is not None:\n credentials = with_scopes_if_required(credentials, scopes)\n return credentials, explicit_project_id or project_id\n\n raise exceptions.DefaultCredentialsError(_HELP_MESSAGE)\n", "path": "google/auth/_default.py"}]}
| 4,035 | 133 |
gh_patches_debug_37461
|
rasdani/github-patches
|
git_diff
|
koxudaxi__datamodel-code-generator-1606
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
msgspec output uses regex instead of pattern to Meta constraint
Strings with a pattern constraint are output as `Meta(regex='....')` however they should be output as `Meta(pattern='...')`
**To Reproduce**
Example schema:
```yaml
openapi: "3.0.0"
info:
version: 1.0.0
title: Swagger Petstore
license:
name: MIT
servers:
- url: http://petstore.swagger.io/v1
components:
schemas:
info:
type: object
properties:
hostName:
type: string
format: hostname
arn:
type: string
pattern: '(^arn:([^:]*):([^:]*):([^:]*):(|\*|[\d]{12}):(.+)$)|^\*$'
```
Used commandline:
```bash
$ datamodel-codegen --input pattern.yaml --output output_file.py --input-file-type 'openapi' \
--target-python '3.9' \
--output-model-type msgspec.Struct
```
**Expected behavior**
The regex argument to the Meta annotation should be `pattern`
```python
class Info(Struct):
hostName: Optional[str] = None
arn: Optional[
Annotated[
str,
Meta(pattern='(^arn:([^:]*):([^:]*):([^:]*):(|\\*|[\\d]{12}):(.+)$)|^\\*$'),
]
] = None
```
**Version:**
- OS: macOS
- Python version: 3.11.4
- datamodel-code-generator version: 0.22
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `datamodel_code_generator/model/msgspec.py`
Content:
```
1 from pathlib import Path
2 from typing import Any, ClassVar, DefaultDict, Dict, List, Optional, Set, Tuple
3
4 from datamodel_code_generator.imports import Import
5 from datamodel_code_generator.model import DataModel, DataModelFieldBase
6 from datamodel_code_generator.model.base import UNDEFINED
7 from datamodel_code_generator.model.imports import (
8 IMPORT_MSGSPEC_CONVERT,
9 IMPORT_MSGSPEC_FIELD,
10 IMPORT_MSGSPEC_META,
11 IMPORT_MSGSPEC_STRUCT,
12 )
13 from datamodel_code_generator.model.pydantic.base_model import Constraints
14 from datamodel_code_generator.model.rootmodel import RootModel
15 from datamodel_code_generator.reference import Reference
16 from datamodel_code_generator.types import chain_as_tuple
17
18
19 def _has_field_assignment(field: DataModelFieldBase) -> bool:
20 return bool(field.field) or not (
21 field.required
22 or (field.represented_default == 'None' and field.strip_default_none)
23 )
24
25
26 class Struct(DataModel):
27 TEMPLATE_FILE_PATH: ClassVar[str] = 'msgspec.jinja2'
28 BASE_CLASS: ClassVar[str] = 'msgspec.Struct'
29 DEFAULT_IMPORTS: ClassVar[Tuple[Import, ...]] = (IMPORT_MSGSPEC_STRUCT,)
30
31 def __init__(
32 self,
33 *,
34 reference: Reference,
35 fields: List[DataModelFieldBase],
36 decorators: Optional[List[str]] = None,
37 base_classes: Optional[List[Reference]] = None,
38 custom_base_class: Optional[str] = None,
39 custom_template_dir: Optional[Path] = None,
40 extra_template_data: Optional[DefaultDict[str, Dict[str, Any]]] = None,
41 methods: Optional[List[str]] = None,
42 path: Optional[Path] = None,
43 description: Optional[str] = None,
44 default: Any = UNDEFINED,
45 nullable: bool = False,
46 ) -> None:
47 super().__init__(
48 reference=reference,
49 fields=sorted(fields, key=_has_field_assignment, reverse=False),
50 decorators=decorators,
51 base_classes=base_classes,
52 custom_base_class=custom_base_class,
53 custom_template_dir=custom_template_dir,
54 extra_template_data=extra_template_data,
55 methods=methods,
56 path=path,
57 description=description,
58 default=default,
59 nullable=nullable,
60 )
61
62 @property
63 def imports(self) -> Tuple[Import, ...]:
64 extra_imports = []
65 if any(f for f in self.fields if f.field):
66 extra_imports.append(IMPORT_MSGSPEC_FIELD)
67 if any(f for f in self.fields if f.field and 'lambda: convert' in f.field):
68 extra_imports.append(IMPORT_MSGSPEC_CONVERT)
69 if any(f for f in self.fields if f.annotated):
70 extra_imports.append(IMPORT_MSGSPEC_META)
71 return chain_as_tuple(super().imports, extra_imports)
72
73
74 class DataModelField(DataModelFieldBase):
75 _FIELD_KEYS: ClassVar[Set[str]] = {
76 'default',
77 'default_factory',
78 }
79 _META_FIELD_KEYS: ClassVar[Set[str]] = {
80 'title',
81 'description',
82 'gt',
83 'ge',
84 'lt',
85 'le',
86 'multiple_of',
87 # 'min_items', # not supported by msgspec
88 # 'max_items', # not supported by msgspec
89 'min_length',
90 'max_length',
91 'regex',
92 # 'unique_items', # not supported by msgspec
93 }
94 _PARSE_METHOD = 'convert'
95 _COMPARE_EXPRESSIONS: ClassVar[Set[str]] = {'gt', 'ge', 'lt', 'le', 'multiple_of'}
96 constraints: Optional[Constraints] = None
97
98 def self_reference(self) -> bool: # pragma: no cover
99 return isinstance(self.parent, Struct) and self.parent.reference.path in {
100 d.reference.path for d in self.data_type.all_data_types if d.reference
101 }
102
103 def process_const(self) -> None:
104 if 'const' not in self.extras:
105 return None
106 self.const = True
107 self.nullable = False
108 const = self.extras['const']
109 if self.data_type.type == 'str' and isinstance(
110 const, str
111 ): # pragma: no cover # Literal supports only str
112 self.data_type = self.data_type.__class__(literals=[const])
113
114 def _get_strict_field_constraint_value(self, constraint: str, value: Any) -> Any:
115 if value is None or constraint not in self._COMPARE_EXPRESSIONS:
116 return value
117
118 if any(
119 data_type.type == 'float' for data_type in self.data_type.all_data_types
120 ):
121 return float(value)
122 return int(value)
123
124 @property
125 def field(self) -> Optional[str]:
126 """for backwards compatibility"""
127 result = str(self)
128 if result == '':
129 return None
130
131 return result
132
133 def __str__(self) -> str:
134 data: Dict[str, Any] = {
135 k: v for k, v in self.extras.items() if k in self._FIELD_KEYS
136 }
137 if self.alias:
138 data['name'] = self.alias
139
140 if self.default != UNDEFINED and self.default is not None:
141 data['default'] = self.default
142
143 if self.required:
144 data = {
145 k: v
146 for k, v in data.items()
147 if k
148 not in (
149 'default',
150 'default_factory',
151 )
152 }
153 elif self.default and 'default_factory' not in data:
154 default_factory = self._get_default_as_struct_model()
155 if default_factory is not None:
156 data.pop('default')
157 data['default_factory'] = default_factory
158
159 if not data:
160 return ''
161
162 if len(data) == 1 and 'default' in data:
163 return repr(data['default'])
164
165 kwargs = [
166 f'{k}={v if k == "default_factory" else repr(v)}' for k, v in data.items()
167 ]
168 return f'field({", ".join(kwargs)})'
169
170 @property
171 def annotated(self) -> Optional[str]:
172 if not self.use_annotated: # pragma: no cover
173 return None
174
175 data: Dict[str, Any] = {
176 k: v for k, v in self.extras.items() if k in self._META_FIELD_KEYS
177 }
178 if (
179 self.constraints is not None
180 and not self.self_reference()
181 and not self.data_type.strict
182 ):
183 data = {
184 **data,
185 **{
186 k: self._get_strict_field_constraint_value(k, v)
187 for k, v in self.constraints.dict().items()
188 if k in self._META_FIELD_KEYS
189 },
190 }
191
192 meta_arguments = sorted(
193 f'{k}={repr(v)}' for k, v in data.items() if v is not None
194 )
195 if not meta_arguments:
196 return None
197
198 meta = f'Meta({", ".join(meta_arguments)})'
199
200 return f'Annotated[{self.type_hint}, {meta}]'
201
202 def _get_default_as_struct_model(self) -> Optional[str]:
203 for data_type in self.data_type.data_types or (self.data_type,):
204 # TODO: Check nested data_types
205 if data_type.is_dict or self.data_type.is_union:
206 # TODO: Parse Union and dict model for default
207 continue # pragma: no cover
208 elif data_type.is_list and len(data_type.data_types) == 1:
209 data_type = data_type.data_types[0]
210 if ( # pragma: no cover
211 data_type.reference
212 and (
213 isinstance(data_type.reference.source, Struct)
214 or isinstance(data_type.reference.source, RootModel)
215 )
216 and isinstance(self.default, list)
217 ):
218 return f'lambda: {self._PARSE_METHOD}({repr(self.default)}, type=list[{data_type.alias or data_type.reference.source.class_name}])'
219 elif data_type.reference and isinstance(data_type.reference.source, Struct):
220 return f'lambda: {self._PARSE_METHOD}({repr(self.default)}, type={data_type.alias or data_type.reference.source.class_name})'
221 return None
222
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/datamodel_code_generator/model/msgspec.py b/datamodel_code_generator/model/msgspec.py
--- a/datamodel_code_generator/model/msgspec.py
+++ b/datamodel_code_generator/model/msgspec.py
@@ -1,6 +1,8 @@
from pathlib import Path
from typing import Any, ClassVar, DefaultDict, Dict, List, Optional, Set, Tuple
+from pydantic import Field
+
from datamodel_code_generator.imports import Import
from datamodel_code_generator.model import DataModel, DataModelFieldBase
from datamodel_code_generator.model.base import UNDEFINED
@@ -10,10 +12,12 @@
IMPORT_MSGSPEC_META,
IMPORT_MSGSPEC_STRUCT,
)
-from datamodel_code_generator.model.pydantic.base_model import Constraints
+from datamodel_code_generator.model.pydantic.base_model import (
+ Constraints as _Constraints,
+)
from datamodel_code_generator.model.rootmodel import RootModel
from datamodel_code_generator.reference import Reference
-from datamodel_code_generator.types import chain_as_tuple
+from datamodel_code_generator.types import chain_as_tuple, get_optional_type
def _has_field_assignment(field: DataModelFieldBase) -> bool:
@@ -71,6 +75,12 @@
return chain_as_tuple(super().imports, extra_imports)
+class Constraints(_Constraints):
+ # To override existing pattern alias
+ regex: Optional[str] = Field(None, alias='regex')
+ pattern: Optional[str] = Field(None, alias='pattern')
+
+
class DataModelField(DataModelFieldBase):
_FIELD_KEYS: ClassVar[Set[str]] = {
'default',
@@ -88,7 +98,7 @@
# 'max_items', # not supported by msgspec
'min_length',
'max_length',
- 'regex',
+ 'pattern',
# 'unique_items', # not supported by msgspec
}
_PARSE_METHOD = 'convert'
@@ -197,6 +207,10 @@
meta = f'Meta({", ".join(meta_arguments)})'
+ if not self.required:
+ type_hint = self.data_type.type_hint
+ annotated_type = f'Annotated[{type_hint}, {meta}]'
+ return get_optional_type(annotated_type, self.data_type.use_union_operator)
return f'Annotated[{self.type_hint}, {meta}]'
def _get_default_as_struct_model(self) -> Optional[str]:
|
{"golden_diff": "diff --git a/datamodel_code_generator/model/msgspec.py b/datamodel_code_generator/model/msgspec.py\n--- a/datamodel_code_generator/model/msgspec.py\n+++ b/datamodel_code_generator/model/msgspec.py\n@@ -1,6 +1,8 @@\n from pathlib import Path\n from typing import Any, ClassVar, DefaultDict, Dict, List, Optional, Set, Tuple\n \n+from pydantic import Field\n+\n from datamodel_code_generator.imports import Import\n from datamodel_code_generator.model import DataModel, DataModelFieldBase\n from datamodel_code_generator.model.base import UNDEFINED\n@@ -10,10 +12,12 @@\n IMPORT_MSGSPEC_META,\n IMPORT_MSGSPEC_STRUCT,\n )\n-from datamodel_code_generator.model.pydantic.base_model import Constraints\n+from datamodel_code_generator.model.pydantic.base_model import (\n+ Constraints as _Constraints,\n+)\n from datamodel_code_generator.model.rootmodel import RootModel\n from datamodel_code_generator.reference import Reference\n-from datamodel_code_generator.types import chain_as_tuple\n+from datamodel_code_generator.types import chain_as_tuple, get_optional_type\n \n \n def _has_field_assignment(field: DataModelFieldBase) -> bool:\n@@ -71,6 +75,12 @@\n return chain_as_tuple(super().imports, extra_imports)\n \n \n+class Constraints(_Constraints):\n+ # To override existing pattern alias\n+ regex: Optional[str] = Field(None, alias='regex')\n+ pattern: Optional[str] = Field(None, alias='pattern')\n+\n+\n class DataModelField(DataModelFieldBase):\n _FIELD_KEYS: ClassVar[Set[str]] = {\n 'default',\n@@ -88,7 +98,7 @@\n # 'max_items', # not supported by msgspec\n 'min_length',\n 'max_length',\n- 'regex',\n+ 'pattern',\n # 'unique_items', # not supported by msgspec\n }\n _PARSE_METHOD = 'convert'\n@@ -197,6 +207,10 @@\n \n meta = f'Meta({\", \".join(meta_arguments)})'\n \n+ if not self.required:\n+ type_hint = self.data_type.type_hint\n+ annotated_type = f'Annotated[{type_hint}, {meta}]'\n+ return get_optional_type(annotated_type, self.data_type.use_union_operator)\n return f'Annotated[{self.type_hint}, {meta}]'\n \n def _get_default_as_struct_model(self) -> Optional[str]:\n", "issue": "msgspec output uses regex instead of pattern to Meta constraint\nStrings with a pattern constraint are output as `Meta(regex='....')` however they should be output as `Meta(pattern='...')`\r\n\r\n**To Reproduce**\r\n\r\nExample schema:\r\n```yaml\r\nopenapi: \"3.0.0\"\r\ninfo:\r\n version: 1.0.0\r\n title: Swagger Petstore\r\n license:\r\n name: MIT\r\nservers:\r\n - url: http://petstore.swagger.io/v1\r\ncomponents:\r\n schemas:\r\n info:\r\n type: object\r\n properties:\r\n hostName:\r\n type: string\r\n format: hostname\r\n arn:\r\n type: string\r\n pattern: '(^arn:([^:]*):([^:]*):([^:]*):(|\\*|[\\d]{12}):(.+)$)|^\\*$'\r\n```\r\n\r\nUsed commandline:\r\n```bash\r\n$ datamodel-codegen --input pattern.yaml --output output_file.py --input-file-type 'openapi' \\\r\n --target-python '3.9' \\\r\n --output-model-type msgspec.Struct\r\n```\r\n\r\n**Expected behavior**\r\nThe regex argument to the Meta annotation should be `pattern`\r\n\r\n```python\r\nclass Info(Struct):\r\n hostName: Optional[str] = None\r\n arn: Optional[\r\n Annotated[\r\n str,\r\n Meta(pattern='(^arn:([^:]*):([^:]*):([^:]*):(|\\\\*|[\\\\d]{12}):(.+)$)|^\\\\*$'),\r\n ]\r\n ] = None\r\n```\r\n\r\n**Version:**\r\n - OS: macOS\r\n - Python version: 3.11.4\r\n - datamodel-code-generator version: 0.22\r\n\n", "before_files": [{"content": "from pathlib import Path\nfrom typing import Any, ClassVar, DefaultDict, Dict, List, Optional, Set, Tuple\n\nfrom datamodel_code_generator.imports import Import\nfrom datamodel_code_generator.model import DataModel, DataModelFieldBase\nfrom datamodel_code_generator.model.base import UNDEFINED\nfrom datamodel_code_generator.model.imports import (\n IMPORT_MSGSPEC_CONVERT,\n IMPORT_MSGSPEC_FIELD,\n IMPORT_MSGSPEC_META,\n IMPORT_MSGSPEC_STRUCT,\n)\nfrom datamodel_code_generator.model.pydantic.base_model import Constraints\nfrom datamodel_code_generator.model.rootmodel import RootModel\nfrom datamodel_code_generator.reference import Reference\nfrom datamodel_code_generator.types import chain_as_tuple\n\n\ndef _has_field_assignment(field: DataModelFieldBase) -> bool:\n return bool(field.field) or not (\n field.required\n or (field.represented_default == 'None' and field.strip_default_none)\n )\n\n\nclass Struct(DataModel):\n TEMPLATE_FILE_PATH: ClassVar[str] = 'msgspec.jinja2'\n BASE_CLASS: ClassVar[str] = 'msgspec.Struct'\n DEFAULT_IMPORTS: ClassVar[Tuple[Import, ...]] = (IMPORT_MSGSPEC_STRUCT,)\n\n def __init__(\n self,\n *,\n reference: Reference,\n fields: List[DataModelFieldBase],\n decorators: Optional[List[str]] = None,\n base_classes: Optional[List[Reference]] = None,\n custom_base_class: Optional[str] = None,\n custom_template_dir: Optional[Path] = None,\n extra_template_data: Optional[DefaultDict[str, Dict[str, Any]]] = None,\n methods: Optional[List[str]] = None,\n path: Optional[Path] = None,\n description: Optional[str] = None,\n default: Any = UNDEFINED,\n nullable: bool = False,\n ) -> None:\n super().__init__(\n reference=reference,\n fields=sorted(fields, key=_has_field_assignment, reverse=False),\n decorators=decorators,\n base_classes=base_classes,\n custom_base_class=custom_base_class,\n custom_template_dir=custom_template_dir,\n extra_template_data=extra_template_data,\n methods=methods,\n path=path,\n description=description,\n default=default,\n nullable=nullable,\n )\n\n @property\n def imports(self) -> Tuple[Import, ...]:\n extra_imports = []\n if any(f for f in self.fields if f.field):\n extra_imports.append(IMPORT_MSGSPEC_FIELD)\n if any(f for f in self.fields if f.field and 'lambda: convert' in f.field):\n extra_imports.append(IMPORT_MSGSPEC_CONVERT)\n if any(f for f in self.fields if f.annotated):\n extra_imports.append(IMPORT_MSGSPEC_META)\n return chain_as_tuple(super().imports, extra_imports)\n\n\nclass DataModelField(DataModelFieldBase):\n _FIELD_KEYS: ClassVar[Set[str]] = {\n 'default',\n 'default_factory',\n }\n _META_FIELD_KEYS: ClassVar[Set[str]] = {\n 'title',\n 'description',\n 'gt',\n 'ge',\n 'lt',\n 'le',\n 'multiple_of',\n # 'min_items', # not supported by msgspec\n # 'max_items', # not supported by msgspec\n 'min_length',\n 'max_length',\n 'regex',\n # 'unique_items', # not supported by msgspec\n }\n _PARSE_METHOD = 'convert'\n _COMPARE_EXPRESSIONS: ClassVar[Set[str]] = {'gt', 'ge', 'lt', 'le', 'multiple_of'}\n constraints: Optional[Constraints] = None\n\n def self_reference(self) -> bool: # pragma: no cover\n return isinstance(self.parent, Struct) and self.parent.reference.path in {\n d.reference.path for d in self.data_type.all_data_types if d.reference\n }\n\n def process_const(self) -> None:\n if 'const' not in self.extras:\n return None\n self.const = True\n self.nullable = False\n const = self.extras['const']\n if self.data_type.type == 'str' and isinstance(\n const, str\n ): # pragma: no cover # Literal supports only str\n self.data_type = self.data_type.__class__(literals=[const])\n\n def _get_strict_field_constraint_value(self, constraint: str, value: Any) -> Any:\n if value is None or constraint not in self._COMPARE_EXPRESSIONS:\n return value\n\n if any(\n data_type.type == 'float' for data_type in self.data_type.all_data_types\n ):\n return float(value)\n return int(value)\n\n @property\n def field(self) -> Optional[str]:\n \"\"\"for backwards compatibility\"\"\"\n result = str(self)\n if result == '':\n return None\n\n return result\n\n def __str__(self) -> str:\n data: Dict[str, Any] = {\n k: v for k, v in self.extras.items() if k in self._FIELD_KEYS\n }\n if self.alias:\n data['name'] = self.alias\n\n if self.default != UNDEFINED and self.default is not None:\n data['default'] = self.default\n\n if self.required:\n data = {\n k: v\n for k, v in data.items()\n if k\n not in (\n 'default',\n 'default_factory',\n )\n }\n elif self.default and 'default_factory' not in data:\n default_factory = self._get_default_as_struct_model()\n if default_factory is not None:\n data.pop('default')\n data['default_factory'] = default_factory\n\n if not data:\n return ''\n\n if len(data) == 1 and 'default' in data:\n return repr(data['default'])\n\n kwargs = [\n f'{k}={v if k == \"default_factory\" else repr(v)}' for k, v in data.items()\n ]\n return f'field({\", \".join(kwargs)})'\n\n @property\n def annotated(self) -> Optional[str]:\n if not self.use_annotated: # pragma: no cover\n return None\n\n data: Dict[str, Any] = {\n k: v for k, v in self.extras.items() if k in self._META_FIELD_KEYS\n }\n if (\n self.constraints is not None\n and not self.self_reference()\n and not self.data_type.strict\n ):\n data = {\n **data,\n **{\n k: self._get_strict_field_constraint_value(k, v)\n for k, v in self.constraints.dict().items()\n if k in self._META_FIELD_KEYS\n },\n }\n\n meta_arguments = sorted(\n f'{k}={repr(v)}' for k, v in data.items() if v is not None\n )\n if not meta_arguments:\n return None\n\n meta = f'Meta({\", \".join(meta_arguments)})'\n\n return f'Annotated[{self.type_hint}, {meta}]'\n\n def _get_default_as_struct_model(self) -> Optional[str]:\n for data_type in self.data_type.data_types or (self.data_type,):\n # TODO: Check nested data_types\n if data_type.is_dict or self.data_type.is_union:\n # TODO: Parse Union and dict model for default\n continue # pragma: no cover\n elif data_type.is_list and len(data_type.data_types) == 1:\n data_type = data_type.data_types[0]\n if ( # pragma: no cover\n data_type.reference\n and (\n isinstance(data_type.reference.source, Struct)\n or isinstance(data_type.reference.source, RootModel)\n )\n and isinstance(self.default, list)\n ):\n return f'lambda: {self._PARSE_METHOD}({repr(self.default)}, type=list[{data_type.alias or data_type.reference.source.class_name}])'\n elif data_type.reference and isinstance(data_type.reference.source, Struct):\n return f'lambda: {self._PARSE_METHOD}({repr(self.default)}, type={data_type.alias or data_type.reference.source.class_name})'\n return None\n", "path": "datamodel_code_generator/model/msgspec.py"}], "after_files": [{"content": "from pathlib import Path\nfrom typing import Any, ClassVar, DefaultDict, Dict, List, Optional, Set, Tuple\n\nfrom pydantic import Field\n\nfrom datamodel_code_generator.imports import Import\nfrom datamodel_code_generator.model import DataModel, DataModelFieldBase\nfrom datamodel_code_generator.model.base import UNDEFINED\nfrom datamodel_code_generator.model.imports import (\n IMPORT_MSGSPEC_CONVERT,\n IMPORT_MSGSPEC_FIELD,\n IMPORT_MSGSPEC_META,\n IMPORT_MSGSPEC_STRUCT,\n)\nfrom datamodel_code_generator.model.pydantic.base_model import (\n Constraints as _Constraints,\n)\nfrom datamodel_code_generator.model.rootmodel import RootModel\nfrom datamodel_code_generator.reference import Reference\nfrom datamodel_code_generator.types import chain_as_tuple, get_optional_type\n\n\ndef _has_field_assignment(field: DataModelFieldBase) -> bool:\n return bool(field.field) or not (\n field.required\n or (field.represented_default == 'None' and field.strip_default_none)\n )\n\n\nclass Struct(DataModel):\n TEMPLATE_FILE_PATH: ClassVar[str] = 'msgspec.jinja2'\n BASE_CLASS: ClassVar[str] = 'msgspec.Struct'\n DEFAULT_IMPORTS: ClassVar[Tuple[Import, ...]] = (IMPORT_MSGSPEC_STRUCT,)\n\n def __init__(\n self,\n *,\n reference: Reference,\n fields: List[DataModelFieldBase],\n decorators: Optional[List[str]] = None,\n base_classes: Optional[List[Reference]] = None,\n custom_base_class: Optional[str] = None,\n custom_template_dir: Optional[Path] = None,\n extra_template_data: Optional[DefaultDict[str, Dict[str, Any]]] = None,\n methods: Optional[List[str]] = None,\n path: Optional[Path] = None,\n description: Optional[str] = None,\n default: Any = UNDEFINED,\n nullable: bool = False,\n ) -> None:\n super().__init__(\n reference=reference,\n fields=sorted(fields, key=_has_field_assignment, reverse=False),\n decorators=decorators,\n base_classes=base_classes,\n custom_base_class=custom_base_class,\n custom_template_dir=custom_template_dir,\n extra_template_data=extra_template_data,\n methods=methods,\n path=path,\n description=description,\n default=default,\n nullable=nullable,\n )\n\n @property\n def imports(self) -> Tuple[Import, ...]:\n extra_imports = []\n if any(f for f in self.fields if f.field):\n extra_imports.append(IMPORT_MSGSPEC_FIELD)\n if any(f for f in self.fields if f.field and 'lambda: convert' in f.field):\n extra_imports.append(IMPORT_MSGSPEC_CONVERT)\n if any(f for f in self.fields if f.annotated):\n extra_imports.append(IMPORT_MSGSPEC_META)\n return chain_as_tuple(super().imports, extra_imports)\n\n\nclass Constraints(_Constraints):\n # To override existing pattern alias\n regex: Optional[str] = Field(None, alias='regex')\n pattern: Optional[str] = Field(None, alias='pattern')\n\n\nclass DataModelField(DataModelFieldBase):\n _FIELD_KEYS: ClassVar[Set[str]] = {\n 'default',\n 'default_factory',\n }\n _META_FIELD_KEYS: ClassVar[Set[str]] = {\n 'title',\n 'description',\n 'gt',\n 'ge',\n 'lt',\n 'le',\n 'multiple_of',\n # 'min_items', # not supported by msgspec\n # 'max_items', # not supported by msgspec\n 'min_length',\n 'max_length',\n 'pattern',\n # 'unique_items', # not supported by msgspec\n }\n _PARSE_METHOD = 'convert'\n _COMPARE_EXPRESSIONS: ClassVar[Set[str]] = {'gt', 'ge', 'lt', 'le', 'multiple_of'}\n constraints: Optional[Constraints] = None\n\n def self_reference(self) -> bool: # pragma: no cover\n return isinstance(self.parent, Struct) and self.parent.reference.path in {\n d.reference.path for d in self.data_type.all_data_types if d.reference\n }\n\n def process_const(self) -> None:\n if 'const' not in self.extras:\n return None\n self.const = True\n self.nullable = False\n const = self.extras['const']\n if self.data_type.type == 'str' and isinstance(\n const, str\n ): # pragma: no cover # Literal supports only str\n self.data_type = self.data_type.__class__(literals=[const])\n\n def _get_strict_field_constraint_value(self, constraint: str, value: Any) -> Any:\n if value is None or constraint not in self._COMPARE_EXPRESSIONS:\n return value\n\n if any(\n data_type.type == 'float' for data_type in self.data_type.all_data_types\n ):\n return float(value)\n return int(value)\n\n @property\n def field(self) -> Optional[str]:\n \"\"\"for backwards compatibility\"\"\"\n result = str(self)\n if result == '':\n return None\n\n return result\n\n def __str__(self) -> str:\n data: Dict[str, Any] = {\n k: v for k, v in self.extras.items() if k in self._FIELD_KEYS\n }\n if self.alias:\n data['name'] = self.alias\n\n if self.default != UNDEFINED and self.default is not None:\n data['default'] = self.default\n\n if self.required:\n data = {\n k: v\n for k, v in data.items()\n if k\n not in (\n 'default',\n 'default_factory',\n )\n }\n elif self.default and 'default_factory' not in data:\n default_factory = self._get_default_as_struct_model()\n if default_factory is not None:\n data.pop('default')\n data['default_factory'] = default_factory\n\n if not data:\n return ''\n\n if len(data) == 1 and 'default' in data:\n return repr(data['default'])\n\n kwargs = [\n f'{k}={v if k == \"default_factory\" else repr(v)}' for k, v in data.items()\n ]\n return f'field({\", \".join(kwargs)})'\n\n @property\n def annotated(self) -> Optional[str]:\n if not self.use_annotated: # pragma: no cover\n return None\n\n data: Dict[str, Any] = {\n k: v for k, v in self.extras.items() if k in self._META_FIELD_KEYS\n }\n if (\n self.constraints is not None\n and not self.self_reference()\n and not self.data_type.strict\n ):\n data = {\n **data,\n **{\n k: self._get_strict_field_constraint_value(k, v)\n for k, v in self.constraints.dict().items()\n if k in self._META_FIELD_KEYS\n },\n }\n\n meta_arguments = sorted(\n f'{k}={repr(v)}' for k, v in data.items() if v is not None\n )\n if not meta_arguments:\n return None\n\n meta = f'Meta({\", \".join(meta_arguments)})'\n\n if not self.required:\n type_hint = self.data_type.type_hint\n annotated_type = f'Annotated[{type_hint}, {meta}]'\n return get_optional_type(annotated_type, self.data_type.use_union_operator)\n return f'Annotated[{self.type_hint}, {meta}]'\n\n def _get_default_as_struct_model(self) -> Optional[str]:\n for data_type in self.data_type.data_types or (self.data_type,):\n # TODO: Check nested data_types\n if data_type.is_dict or self.data_type.is_union:\n # TODO: Parse Union and dict model for default\n continue # pragma: no cover\n elif data_type.is_list and len(data_type.data_types) == 1:\n data_type = data_type.data_types[0]\n if ( # pragma: no cover\n data_type.reference\n and (\n isinstance(data_type.reference.source, Struct)\n or isinstance(data_type.reference.source, RootModel)\n )\n and isinstance(self.default, list)\n ):\n return f'lambda: {self._PARSE_METHOD}({repr(self.default)}, type=list[{data_type.alias or data_type.reference.source.class_name}])'\n elif data_type.reference and isinstance(data_type.reference.source, Struct):\n return f'lambda: {self._PARSE_METHOD}({repr(self.default)}, type={data_type.alias or data_type.reference.source.class_name})'\n return None\n", "path": "datamodel_code_generator/model/msgspec.py"}]}
| 2,942 | 535 |
gh_patches_debug_16056
|
rasdani/github-patches
|
git_diff
|
Zeroto521__my-data-toolkit-835
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BUG: `geocentroid`, coordinates should multiply weights
<!--
Thanks for contributing a pull request!
Please follow these standard acronyms to start the commit message:
- ENH: enhancement
- BUG: bug fix
- DOC: documentation
- TYP: type annotations
- TST: addition or modification of tests
- MAINT: maintenance commit (refactoring, typos, etc.)
- BLD: change related to building
- REL: related to releasing
- API: an (incompatible) API change
- DEP: deprecate something, or remove a deprecated object
- DEV: development tool or utility
- REV: revert an earlier commit
- PERF: performance improvement
- BOT: always commit via a bot
- CI: related to CI or CD
- CLN: Code cleanup
-->
- [x] closes #833
- [ ] whatsnew entry
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dtoolkit/geoaccessor/geoseries/geocentroid.py`
Content:
```
1 import geopandas as gpd
2 import numpy as np
3 import pandas as pd
4 from shapely import Point
5
6 from dtoolkit.geoaccessor.geoseries.geodistance import geodistance
7 from dtoolkit.geoaccessor.geoseries.xy import xy
8 from dtoolkit.geoaccessor.register import register_geoseries_method
9
10
11 @register_geoseries_method
12 def geocentroid(
13 s: gpd.GeoSeries,
14 /,
15 weights: pd.Series = None,
16 max_iter: int = 300,
17 tol: float = 1e-5,
18 ) -> Point:
19 r"""
20 Return the centroid of all points via the center of gravity method.
21
22 .. math::
23
24 \left\{\begin{matrix}
25 d_i &=& D(P(\bar{x}_n, \bar{y}_n), P(x_i, y_i)) \\
26 \bar{x}_0 &=& \frac{\sum w_i x_i}{\sum w_i} \\
27 \bar{y}_0 &=& \frac{\sum w_i y_i}{\sum w_i} \\
28 \bar{x}_{n+1} &=& \frac{\sum w_i x_i / d_i}{\sum w_i / d_i} \\
29 \bar{y}_{n+1} &=& \frac{\sum w_i y_i / d_i}{\sum w_i / d_i} \\
30 \end{matrix}\right.
31
32 Parameters
33 ----------
34 weights : Hashable or 1d array-like, optional
35 - None : All weights will be set to 1.
36 - Hashable : Only for DataFrame, the column name.
37 - 1d array-like : The weights of each point.
38
39 max_iter : int, default 300
40 Maximum number of iterations to perform.
41
42 tol : float, default 1e-5
43 Tolerance for convergence.
44
45 Returns
46 -------
47 Point
48
49 See Also
50 --------
51 geopandas.GeoSeries.centroid
52 dtoolkit.geoaccessor.geoseries.geocentroid
53 dtoolkit.geoaccessor.geodataframe.geocentroid
54
55 Examples
56 --------
57 >>> import dtoolkit.geoaccessor
58 >>> import geopandas as gpd
59 >>> from shapely import Point
60 >>> df = gpd.GeoDataFrame(
61 ... {
62 ... "weights": [1, 2, 3],
63 ... "geometry": [Point(100, 32), Point(120, 50), Point(122, 55)],
64 ... },
65 ... crs=4326,
66 ... )
67 >>> df
68 weights geometry
69 0 1 POINT (100.00000 32.00000)
70 1 2 POINT (120.00000 50.00000)
71 2 3 POINT (122.00000 55.00000)
72 >>> df.geocentroid()
73 <POINT (120 50)>
74
75 Set weights for each point.
76
77 >>> df.geocentroid("weights")
78 <POINT (121.999 54.999)>
79 >>> df.geocentroid([1, 2, 3])
80 <POINT (121.999 54.999)>
81 """
82
83 weights = np.asarray(weights) if weights is not None else 1
84 coord = xy(s)
85 X = coord.mul(weights, axis=0).mean()
86 for _ in range(max_iter):
87 dis = geodistance(s, Point(*X.tolist())).rdiv(1).mul(weights, axis=0)
88 Xt = coord.mul(dis, axis=0).sum() / dis.sum()
89
90 if ((X - Xt).abs() <= tol).all():
91 X = Xt
92 break
93
94 X = Xt
95
96 return Point(*X.tolist())
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/dtoolkit/geoaccessor/geoseries/geocentroid.py b/dtoolkit/geoaccessor/geoseries/geocentroid.py
--- a/dtoolkit/geoaccessor/geoseries/geocentroid.py
+++ b/dtoolkit/geoaccessor/geoseries/geocentroid.py
@@ -80,11 +80,15 @@
<POINT (121.999 54.999)>
"""
- weights = np.asarray(weights) if weights is not None else 1
coord = xy(s)
+ if len(coord) == 1:
+ return Point(coord.iloc[0])
+
+ weights = np.asarray(weights) if weights is not None else 1
X = coord.mul(weights, axis=0).mean()
+
for _ in range(max_iter):
- dis = geodistance(s, Point(*X.tolist())).rdiv(1).mul(weights, axis=0)
+ dis = geodistance(s, Point(X)).rdiv(1).mul(weights, axis=0)
Xt = coord.mul(dis, axis=0).sum() / dis.sum()
if ((X - Xt).abs() <= tol).all():
@@ -93,4 +97,4 @@
X = Xt
- return Point(*X.tolist())
+ return Point(X)
|
{"golden_diff": "diff --git a/dtoolkit/geoaccessor/geoseries/geocentroid.py b/dtoolkit/geoaccessor/geoseries/geocentroid.py\n--- a/dtoolkit/geoaccessor/geoseries/geocentroid.py\n+++ b/dtoolkit/geoaccessor/geoseries/geocentroid.py\n@@ -80,11 +80,15 @@\n <POINT (121.999 54.999)>\n \"\"\"\n \n- weights = np.asarray(weights) if weights is not None else 1\n coord = xy(s)\n+ if len(coord) == 1:\n+ return Point(coord.iloc[0])\n+\n+ weights = np.asarray(weights) if weights is not None else 1\n X = coord.mul(weights, axis=0).mean()\n+\n for _ in range(max_iter):\n- dis = geodistance(s, Point(*X.tolist())).rdiv(1).mul(weights, axis=0)\n+ dis = geodistance(s, Point(X)).rdiv(1).mul(weights, axis=0)\n Xt = coord.mul(dis, axis=0).sum() / dis.sum()\n \n if ((X - Xt).abs() <= tol).all():\n@@ -93,4 +97,4 @@\n \n X = Xt\n \n- return Point(*X.tolist())\n+ return Point(X)\n", "issue": "BUG: `geocentroid`, coordinates should multiply weights\n<!--\r\nThanks for contributing a pull request!\r\n\r\nPlease follow these standard acronyms to start the commit message:\r\n\r\n- ENH: enhancement\r\n- BUG: bug fix\r\n- DOC: documentation\r\n- TYP: type annotations\r\n- TST: addition or modification of tests\r\n- MAINT: maintenance commit (refactoring, typos, etc.)\r\n- BLD: change related to building\r\n- REL: related to releasing\r\n- API: an (incompatible) API change\r\n- DEP: deprecate something, or remove a deprecated object\r\n- DEV: development tool or utility\r\n- REV: revert an earlier commit\r\n- PERF: performance improvement\r\n- BOT: always commit via a bot\r\n- CI: related to CI or CD\r\n- CLN: Code cleanup\r\n-->\r\n\r\n- [x] closes #833\r\n- [ ] whatsnew entry\r\n\n", "before_files": [{"content": "import geopandas as gpd\nimport numpy as np\nimport pandas as pd\nfrom shapely import Point\n\nfrom dtoolkit.geoaccessor.geoseries.geodistance import geodistance\nfrom dtoolkit.geoaccessor.geoseries.xy import xy\nfrom dtoolkit.geoaccessor.register import register_geoseries_method\n\n\n@register_geoseries_method\ndef geocentroid(\n s: gpd.GeoSeries,\n /,\n weights: pd.Series = None,\n max_iter: int = 300,\n tol: float = 1e-5,\n) -> Point:\n r\"\"\"\n Return the centroid of all points via the center of gravity method.\n\n .. math::\n\n \\left\\{\\begin{matrix}\n d_i &=& D(P(\\bar{x}_n, \\bar{y}_n), P(x_i, y_i)) \\\\\n \\bar{x}_0 &=& \\frac{\\sum w_i x_i}{\\sum w_i} \\\\\n \\bar{y}_0 &=& \\frac{\\sum w_i y_i}{\\sum w_i} \\\\\n \\bar{x}_{n+1} &=& \\frac{\\sum w_i x_i / d_i}{\\sum w_i / d_i} \\\\\n \\bar{y}_{n+1} &=& \\frac{\\sum w_i y_i / d_i}{\\sum w_i / d_i} \\\\\n \\end{matrix}\\right.\n\n Parameters\n ----------\n weights : Hashable or 1d array-like, optional\n - None : All weights will be set to 1.\n - Hashable : Only for DataFrame, the column name.\n - 1d array-like : The weights of each point.\n\n max_iter : int, default 300\n Maximum number of iterations to perform.\n\n tol : float, default 1e-5\n Tolerance for convergence.\n\n Returns\n -------\n Point\n\n See Also\n --------\n geopandas.GeoSeries.centroid\n dtoolkit.geoaccessor.geoseries.geocentroid\n dtoolkit.geoaccessor.geodataframe.geocentroid\n\n Examples\n --------\n >>> import dtoolkit.geoaccessor\n >>> import geopandas as gpd\n >>> from shapely import Point\n >>> df = gpd.GeoDataFrame(\n ... {\n ... \"weights\": [1, 2, 3],\n ... \"geometry\": [Point(100, 32), Point(120, 50), Point(122, 55)],\n ... },\n ... crs=4326,\n ... )\n >>> df\n weights geometry\n 0 1 POINT (100.00000 32.00000)\n 1 2 POINT (120.00000 50.00000)\n 2 3 POINT (122.00000 55.00000)\n >>> df.geocentroid()\n <POINT (120 50)>\n\n Set weights for each point.\n\n >>> df.geocentroid(\"weights\")\n <POINT (121.999 54.999)>\n >>> df.geocentroid([1, 2, 3])\n <POINT (121.999 54.999)>\n \"\"\"\n\n weights = np.asarray(weights) if weights is not None else 1\n coord = xy(s)\n X = coord.mul(weights, axis=0).mean()\n for _ in range(max_iter):\n dis = geodistance(s, Point(*X.tolist())).rdiv(1).mul(weights, axis=0)\n Xt = coord.mul(dis, axis=0).sum() / dis.sum()\n\n if ((X - Xt).abs() <= tol).all():\n X = Xt\n break\n\n X = Xt\n\n return Point(*X.tolist())\n", "path": "dtoolkit/geoaccessor/geoseries/geocentroid.py"}], "after_files": [{"content": "import geopandas as gpd\nimport numpy as np\nimport pandas as pd\nfrom shapely import Point\n\nfrom dtoolkit.geoaccessor.geoseries.geodistance import geodistance\nfrom dtoolkit.geoaccessor.geoseries.xy import xy\nfrom dtoolkit.geoaccessor.register import register_geoseries_method\n\n\n@register_geoseries_method\ndef geocentroid(\n s: gpd.GeoSeries,\n /,\n weights: pd.Series = None,\n max_iter: int = 300,\n tol: float = 1e-5,\n) -> Point:\n r\"\"\"\n Return the centroid of all points via the center of gravity method.\n\n .. math::\n\n \\left\\{\\begin{matrix}\n d_i &=& D(P(\\bar{x}_n, \\bar{y}_n), P(x_i, y_i)) \\\\\n \\bar{x}_0 &=& \\frac{\\sum w_i x_i}{\\sum w_i} \\\\\n \\bar{y}_0 &=& \\frac{\\sum w_i y_i}{\\sum w_i} \\\\\n \\bar{x}_{n+1} &=& \\frac{\\sum w_i x_i / d_i}{\\sum w_i / d_i} \\\\\n \\bar{y}_{n+1} &=& \\frac{\\sum w_i y_i / d_i}{\\sum w_i / d_i} \\\\\n \\end{matrix}\\right.\n\n Parameters\n ----------\n weights : Hashable or 1d array-like, optional\n - None : All weights will be set to 1.\n - Hashable : Only for DataFrame, the column name.\n - 1d array-like : The weights of each point.\n\n max_iter : int, default 300\n Maximum number of iterations to perform.\n\n tol : float, default 1e-5\n Tolerance for convergence.\n\n Returns\n -------\n Point\n\n See Also\n --------\n geopandas.GeoSeries.centroid\n dtoolkit.geoaccessor.geoseries.geocentroid\n dtoolkit.geoaccessor.geodataframe.geocentroid\n\n Examples\n --------\n >>> import dtoolkit.geoaccessor\n >>> import geopandas as gpd\n >>> from shapely import Point\n >>> df = gpd.GeoDataFrame(\n ... {\n ... \"weights\": [1, 2, 3],\n ... \"geometry\": [Point(100, 32), Point(120, 50), Point(122, 55)],\n ... },\n ... crs=4326,\n ... )\n >>> df\n weights geometry\n 0 1 POINT (100.00000 32.00000)\n 1 2 POINT (120.00000 50.00000)\n 2 3 POINT (122.00000 55.00000)\n >>> df.geocentroid()\n <POINT (120 50)>\n\n Set weights for each point.\n\n >>> df.geocentroid(\"weights\")\n <POINT (121.999 54.999)>\n >>> df.geocentroid([1, 2, 3])\n <POINT (121.999 54.999)>\n \"\"\"\n\n coord = xy(s)\n if len(coord) == 1:\n return Point(coord.iloc[0])\n\n weights = np.asarray(weights) if weights is not None else 1\n X = coord.mul(weights, axis=0).mean()\n\n for _ in range(max_iter):\n dis = geodistance(s, Point(X)).rdiv(1).mul(weights, axis=0)\n Xt = coord.mul(dis, axis=0).sum() / dis.sum()\n\n if ((X - Xt).abs() <= tol).all():\n X = Xt\n break\n\n X = Xt\n\n return Point(X)\n", "path": "dtoolkit/geoaccessor/geoseries/geocentroid.py"}]}
| 1,549 | 304 |
gh_patches_debug_40690
|
rasdani/github-patches
|
git_diff
|
hedyorg__hedy-1379
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add level 2 to hedy_translation.py
Now that the keywords from level 1 can be translated, new levels can be added.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hedy_translation.py`
Content:
```
1 from lark import Transformer, Tree
2 from hedy import get_keywords_for_language, ExtractAST, get_parser
3
4
5 TRANSPILER_LOOKUP = {}
6
7
8 def keywords_to_dict(to_lang="nl"):
9 """"Return a dictionary of keywords from language of choice. Key is english value is lang of choice"""
10 keywords = {}
11 keywords_from = get_keywords_for_language("en").replace("\n\n", "\n").splitlines()
12
13 keywords_to = get_keywords_for_language(to_lang).replace("\n\n", "\n").splitlines()
14 keywords_from_withoutlvl = []
15 for line in keywords_from:
16 if line[0] != '/':
17 keywords_from_withoutlvl.append(line)
18
19 keywords_to_withoutlvl = []
20 for line in keywords_to:
21 if line[0] != '/':
22 keywords_to_withoutlvl.append(line)
23
24 for line in range(len(keywords_from_withoutlvl)):
25 keywords[(keywords_from_withoutlvl[line].split('"'))[1]] = keywords_to_withoutlvl[line].split('"')[1]
26
27 return keywords
28
29
30 def translate_keywords(input_string, from_lang="nl", to_lang="nl", level=1):
31 """"Return code with keywords translated to language of choice in level of choice"""
32 parser = get_parser(level, from_lang)
33
34 punctuation_symbols = ['!', '?', '.']
35
36 keywordDict = keywords_to_dict(to_lang)
37 program_root = parser.parse(input_string + '\n').children[0]
38 abstract_syntaxtree = ExtractAST().transform(program_root)
39 translator = TRANSPILER_LOOKUP[level]
40 abstract_syntaxtree = translator(keywordDict, punctuation_symbols).transform(program_root)
41
42 return abstract_syntaxtree
43
44
45 def hedy_translator(level):
46 def decorating(c):
47 TRANSPILER_LOOKUP[level] = c
48 c.level = level
49 return c
50
51 return decorating
52
53
54 @hedy_translator(level=1)
55 class ConvertToLang1(Transformer):
56
57 def __init__(self, keywords, punctuation_symbols):
58 self.keywords = keywords
59 self.punctuation_symbols = punctuation_symbols
60 __class__.level = 1
61
62 def command(self, args):
63 return args[0]
64
65 def program(self, args):
66 return '\n'.join([str(c) for c in args])
67
68 def text(self, args):
69 return ''.join([str(c) for c in args])
70
71 def invalid_space(self, args):
72 return " " + ''.join([str(c) for c in args])
73
74 def print(self, args):
75 return self.keywords["print"] + " " + "".join([str(c) for c in args])
76
77 def echo(self, args):
78 all_args = self.keywords["echo"]
79 if args:
80 all_args += " "
81 return all_args + "".join([str(c) for c in args])
82
83 def ask(self, args):
84 return self.keywords["ask"] + " " + "".join([str(c) for c in args])
85
86 def turn(self, args):
87 return self.keywords["turn"] + " " + "".join([str(c) for c in args])
88
89 def forward(self, args):
90 return self.keywords["forward"] + " " + "".join([str(c) for c in args])
91
92 def random(self, args):
93 return self.keywords["random"] + "".join([str(c) for c in args])
94
95 def invalid(self, args):
96 return ' '.join([str(c) for c in args])
97
98 def __default__(self, data, children, meta):
99 return Tree(data, children, meta)
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/hedy_translation.py b/hedy_translation.py
--- a/hedy_translation.py
+++ b/hedy_translation.py
@@ -1,5 +1,5 @@
from lark import Transformer, Tree
-from hedy import get_keywords_for_language, ExtractAST, get_parser
+import hedy
TRANSPILER_LOOKUP = {}
@@ -8,9 +8,9 @@
def keywords_to_dict(to_lang="nl"):
""""Return a dictionary of keywords from language of choice. Key is english value is lang of choice"""
keywords = {}
- keywords_from = get_keywords_for_language("en").replace("\n\n", "\n").splitlines()
+ keywords_from = hedy.get_keywords_for_language("en").replace("\n\n", "\n").splitlines()
- keywords_to = get_keywords_for_language(to_lang).replace("\n\n", "\n").splitlines()
+ keywords_to = hedy.get_keywords_for_language(to_lang).replace("\n\n", "\n").splitlines()
keywords_from_withoutlvl = []
for line in keywords_from:
if line[0] != '/':
@@ -29,13 +29,13 @@
def translate_keywords(input_string, from_lang="nl", to_lang="nl", level=1):
""""Return code with keywords translated to language of choice in level of choice"""
- parser = get_parser(level, from_lang)
+ parser = hedy.get_parser(level, from_lang)
punctuation_symbols = ['!', '?', '.']
keywordDict = keywords_to_dict(to_lang)
program_root = parser.parse(input_string + '\n').children[0]
- abstract_syntaxtree = ExtractAST().transform(program_root)
+ abstract_syntaxtree = hedy.ExtractAST().transform(program_root)
translator = TRANSPILER_LOOKUP[level]
abstract_syntaxtree = translator(keywordDict, punctuation_symbols).transform(program_root)
@@ -97,3 +97,50 @@
def __default__(self, data, children, meta):
return Tree(data, children, meta)
+
+@hedy_translator(level=2)
+class ConvertToLang2(ConvertToLang1):
+
+ def assign(self, args):
+ return args[0] + " " + self.keywords["is"] + " " + ''.join([str(c) for c in args[1:]])
+
+ def print(self, args):
+
+ argument_string = ""
+ i = 0
+
+ for argument in args:
+ # escape quotes if kids accidentally use them at level 2
+ argument = hedy.process_characters_needing_escape(argument)
+
+ # final argument and punctuation arguments do not have to be separated with a space, other do
+ if i == len(args) - 1 or args[i + 1] in self.punctuation_symbols:
+ space = ''
+ else:
+ space = " "
+
+ argument_string += argument + space
+
+ i = i + 1
+
+ return self.keywords["print"] + " " + argument_string
+
+ def punctuation(self, args):
+ return ''.join([str(c) for c in args])
+
+ def var(self, args):
+ var = args[0]
+ all_parameters = ["'" + hedy.process_characters_needing_escape(a) + "'" for a in args[1:]]
+ return var + ''.join(all_parameters)
+
+ def ask(self, args):
+ var = args[0]
+ all_parameters = [hedy.process_characters_needing_escape(a) for a in args]
+
+ return all_parameters[0] + " " + self.keywords["is"] + " " + self.keywords["ask"] + " " + ''.join(all_parameters[1:])
+
+ def ask_dep_2(self, args):
+ return self.keywords["ask"] + " " + ''.join([str(c) for c in args])
+
+ def echo_dep_2(self, args):
+ return self.keywords["echo"] + " " + ''.join([str(c) for c in args])
|
{"golden_diff": "diff --git a/hedy_translation.py b/hedy_translation.py\n--- a/hedy_translation.py\n+++ b/hedy_translation.py\n@@ -1,5 +1,5 @@\n from lark import Transformer, Tree\n-from hedy import get_keywords_for_language, ExtractAST, get_parser\n+import hedy\n \n \n TRANSPILER_LOOKUP = {}\n@@ -8,9 +8,9 @@\n def keywords_to_dict(to_lang=\"nl\"):\n \"\"\"\"Return a dictionary of keywords from language of choice. Key is english value is lang of choice\"\"\"\n keywords = {}\n- keywords_from = get_keywords_for_language(\"en\").replace(\"\\n\\n\", \"\\n\").splitlines()\n+ keywords_from = hedy.get_keywords_for_language(\"en\").replace(\"\\n\\n\", \"\\n\").splitlines()\n \n- keywords_to = get_keywords_for_language(to_lang).replace(\"\\n\\n\", \"\\n\").splitlines()\n+ keywords_to = hedy.get_keywords_for_language(to_lang).replace(\"\\n\\n\", \"\\n\").splitlines()\n keywords_from_withoutlvl = []\n for line in keywords_from:\n if line[0] != '/':\n@@ -29,13 +29,13 @@\n \n def translate_keywords(input_string, from_lang=\"nl\", to_lang=\"nl\", level=1):\n \"\"\"\"Return code with keywords translated to language of choice in level of choice\"\"\"\n- parser = get_parser(level, from_lang)\n+ parser = hedy.get_parser(level, from_lang)\n \n punctuation_symbols = ['!', '?', '.']\n \n keywordDict = keywords_to_dict(to_lang)\n program_root = parser.parse(input_string + '\\n').children[0]\n- abstract_syntaxtree = ExtractAST().transform(program_root)\n+ abstract_syntaxtree = hedy.ExtractAST().transform(program_root)\n translator = TRANSPILER_LOOKUP[level]\n abstract_syntaxtree = translator(keywordDict, punctuation_symbols).transform(program_root)\n \n@@ -97,3 +97,50 @@\n \n def __default__(self, data, children, meta):\n return Tree(data, children, meta)\n+\n+@hedy_translator(level=2)\n+class ConvertToLang2(ConvertToLang1):\n+\n+ def assign(self, args):\n+ return args[0] + \" \" + self.keywords[\"is\"] + \" \" + ''.join([str(c) for c in args[1:]])\n+\n+ def print(self, args):\n+\n+ argument_string = \"\"\n+ i = 0\n+\n+ for argument in args:\n+ # escape quotes if kids accidentally use them at level 2\n+ argument = hedy.process_characters_needing_escape(argument)\n+\n+ # final argument and punctuation arguments do not have to be separated with a space, other do\n+ if i == len(args) - 1 or args[i + 1] in self.punctuation_symbols:\n+ space = ''\n+ else:\n+ space = \" \"\n+\n+ argument_string += argument + space\n+\n+ i = i + 1\n+\n+ return self.keywords[\"print\"] + \" \" + argument_string\n+\n+ def punctuation(self, args):\n+ return ''.join([str(c) for c in args])\n+\n+ def var(self, args):\n+ var = args[0]\n+ all_parameters = [\"'\" + hedy.process_characters_needing_escape(a) + \"'\" for a in args[1:]]\n+ return var + ''.join(all_parameters)\n+\n+ def ask(self, args):\n+ var = args[0]\n+ all_parameters = [hedy.process_characters_needing_escape(a) for a in args]\n+\n+ return all_parameters[0] + \" \" + self.keywords[\"is\"] + \" \" + self.keywords[\"ask\"] + \" \" + ''.join(all_parameters[1:])\n+\n+ def ask_dep_2(self, args):\n+ return self.keywords[\"ask\"] + \" \" + ''.join([str(c) for c in args])\n+\n+ def echo_dep_2(self, args):\n+ return self.keywords[\"echo\"] + \" \" + ''.join([str(c) for c in args])\n", "issue": "Add level 2 to hedy_translation.py\nNow that the keywords from level 1 can be translated, new levels can be added.\n", "before_files": [{"content": "from lark import Transformer, Tree\nfrom hedy import get_keywords_for_language, ExtractAST, get_parser\n\n\nTRANSPILER_LOOKUP = {}\n\n\ndef keywords_to_dict(to_lang=\"nl\"):\n \"\"\"\"Return a dictionary of keywords from language of choice. Key is english value is lang of choice\"\"\"\n keywords = {}\n keywords_from = get_keywords_for_language(\"en\").replace(\"\\n\\n\", \"\\n\").splitlines()\n\n keywords_to = get_keywords_for_language(to_lang).replace(\"\\n\\n\", \"\\n\").splitlines()\n keywords_from_withoutlvl = []\n for line in keywords_from:\n if line[0] != '/':\n keywords_from_withoutlvl.append(line)\n\n keywords_to_withoutlvl = []\n for line in keywords_to:\n if line[0] != '/':\n keywords_to_withoutlvl.append(line)\n\n for line in range(len(keywords_from_withoutlvl)):\n keywords[(keywords_from_withoutlvl[line].split('\"'))[1]] = keywords_to_withoutlvl[line].split('\"')[1]\n\n return keywords\n\n\ndef translate_keywords(input_string, from_lang=\"nl\", to_lang=\"nl\", level=1):\n \"\"\"\"Return code with keywords translated to language of choice in level of choice\"\"\"\n parser = get_parser(level, from_lang)\n\n punctuation_symbols = ['!', '?', '.']\n\n keywordDict = keywords_to_dict(to_lang)\n program_root = parser.parse(input_string + '\\n').children[0]\n abstract_syntaxtree = ExtractAST().transform(program_root)\n translator = TRANSPILER_LOOKUP[level]\n abstract_syntaxtree = translator(keywordDict, punctuation_symbols).transform(program_root)\n\n return abstract_syntaxtree\n\n\ndef hedy_translator(level):\n def decorating(c):\n TRANSPILER_LOOKUP[level] = c\n c.level = level\n return c\n\n return decorating\n\n\n@hedy_translator(level=1)\nclass ConvertToLang1(Transformer):\n\n def __init__(self, keywords, punctuation_symbols):\n self.keywords = keywords\n self.punctuation_symbols = punctuation_symbols\n __class__.level = 1\n\n def command(self, args):\n return args[0]\n\n def program(self, args):\n return '\\n'.join([str(c) for c in args])\n\n def text(self, args):\n return ''.join([str(c) for c in args])\n\n def invalid_space(self, args):\n return \" \" + ''.join([str(c) for c in args])\n\n def print(self, args):\n return self.keywords[\"print\"] + \" \" + \"\".join([str(c) for c in args])\n\n def echo(self, args):\n all_args = self.keywords[\"echo\"]\n if args:\n all_args += \" \"\n return all_args + \"\".join([str(c) for c in args])\n\n def ask(self, args):\n return self.keywords[\"ask\"] + \" \" + \"\".join([str(c) for c in args])\n\n def turn(self, args):\n return self.keywords[\"turn\"] + \" \" + \"\".join([str(c) for c in args])\n\n def forward(self, args):\n return self.keywords[\"forward\"] + \" \" + \"\".join([str(c) for c in args])\n\n def random(self, args):\n return self.keywords[\"random\"] + \"\".join([str(c) for c in args])\n\n def invalid(self, args):\n return ' '.join([str(c) for c in args])\n\n def __default__(self, data, children, meta):\n return Tree(data, children, meta)\n", "path": "hedy_translation.py"}], "after_files": [{"content": "from lark import Transformer, Tree\nimport hedy\n\n\nTRANSPILER_LOOKUP = {}\n\n\ndef keywords_to_dict(to_lang=\"nl\"):\n \"\"\"\"Return a dictionary of keywords from language of choice. Key is english value is lang of choice\"\"\"\n keywords = {}\n keywords_from = hedy.get_keywords_for_language(\"en\").replace(\"\\n\\n\", \"\\n\").splitlines()\n\n keywords_to = hedy.get_keywords_for_language(to_lang).replace(\"\\n\\n\", \"\\n\").splitlines()\n keywords_from_withoutlvl = []\n for line in keywords_from:\n if line[0] != '/':\n keywords_from_withoutlvl.append(line)\n\n keywords_to_withoutlvl = []\n for line in keywords_to:\n if line[0] != '/':\n keywords_to_withoutlvl.append(line)\n\n for line in range(len(keywords_from_withoutlvl)):\n keywords[(keywords_from_withoutlvl[line].split('\"'))[1]] = keywords_to_withoutlvl[line].split('\"')[1]\n\n return keywords\n\n\ndef translate_keywords(input_string, from_lang=\"nl\", to_lang=\"nl\", level=1):\n \"\"\"\"Return code with keywords translated to language of choice in level of choice\"\"\"\n parser = hedy.get_parser(level, from_lang)\n\n punctuation_symbols = ['!', '?', '.']\n\n keywordDict = keywords_to_dict(to_lang)\n program_root = parser.parse(input_string + '\\n').children[0]\n abstract_syntaxtree = hedy.ExtractAST().transform(program_root)\n translator = TRANSPILER_LOOKUP[level]\n abstract_syntaxtree = translator(keywordDict, punctuation_symbols).transform(program_root)\n\n return abstract_syntaxtree\n\n\ndef hedy_translator(level):\n def decorating(c):\n TRANSPILER_LOOKUP[level] = c\n c.level = level\n return c\n\n return decorating\n\n\n@hedy_translator(level=1)\nclass ConvertToLang1(Transformer):\n\n def __init__(self, keywords, punctuation_symbols):\n self.keywords = keywords\n self.punctuation_symbols = punctuation_symbols\n __class__.level = 1\n\n def command(self, args):\n return args[0]\n\n def program(self, args):\n return '\\n'.join([str(c) for c in args])\n\n def text(self, args):\n return ''.join([str(c) for c in args])\n\n def invalid_space(self, args):\n return \" \" + ''.join([str(c) for c in args])\n\n def print(self, args):\n return self.keywords[\"print\"] + \" \" + \"\".join([str(c) for c in args])\n\n def echo(self, args):\n all_args = self.keywords[\"echo\"]\n if args:\n all_args += \" \"\n return all_args + \"\".join([str(c) for c in args])\n\n def ask(self, args):\n return self.keywords[\"ask\"] + \" \" + \"\".join([str(c) for c in args])\n\n def turn(self, args):\n return self.keywords[\"turn\"] + \" \" + \"\".join([str(c) for c in args])\n\n def forward(self, args):\n return self.keywords[\"forward\"] + \" \" + \"\".join([str(c) for c in args])\n\n def random(self, args):\n return self.keywords[\"random\"] + \"\".join([str(c) for c in args])\n\n def invalid(self, args):\n return ' '.join([str(c) for c in args])\n\n def __default__(self, data, children, meta):\n return Tree(data, children, meta)\n\n@hedy_translator(level=2)\nclass ConvertToLang2(ConvertToLang1):\n\n def assign(self, args):\n return args[0] + \" \" + self.keywords[\"is\"] + \" \" + ''.join([str(c) for c in args[1:]])\n\n def print(self, args):\n\n argument_string = \"\"\n i = 0\n\n for argument in args:\n # escape quotes if kids accidentally use them at level 2\n argument = hedy.process_characters_needing_escape(argument)\n\n # final argument and punctuation arguments do not have to be separated with a space, other do\n if i == len(args) - 1 or args[i + 1] in self.punctuation_symbols:\n space = ''\n else:\n space = \" \"\n\n argument_string += argument + space\n\n i = i + 1\n\n return self.keywords[\"print\"] + \" \" + argument_string\n\n def punctuation(self, args):\n return ''.join([str(c) for c in args])\n\n def var(self, args):\n var = args[0]\n all_parameters = [\"'\" + hedy.process_characters_needing_escape(a) + \"'\" for a in args[1:]]\n return var + ''.join(all_parameters)\n\n def ask(self, args):\n var = args[0]\n all_parameters = [hedy.process_characters_needing_escape(a) for a in args]\n\n return all_parameters[0] + \" \" + self.keywords[\"is\"] + \" \" + self.keywords[\"ask\"] + \" \" + ''.join(all_parameters[1:])\n\n def ask_dep_2(self, args):\n return self.keywords[\"ask\"] + \" \" + ''.join([str(c) for c in args])\n\n def echo_dep_2(self, args):\n return self.keywords[\"echo\"] + \" \" + ''.join([str(c) for c in args])\n", "path": "hedy_translation.py"}]}
| 1,243 | 886 |
gh_patches_debug_19603
|
rasdani/github-patches
|
git_diff
|
cookiecutter__cookiecutter-53
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Jinja breaks on syntax error without reporting where the error occured
I tried to create a project from [here](https://github.com/thedrow/cookiecutter-pypackage/) and I got the following traceback:
```
cookiecutter https://github.com/thedrow/cookiecutter-pypackage.git
You've cloned cookiecutter-pypackage before. Is it okay to delete and re-clone it? [Y/n] y
Cloning into 'cookiecutter-pypackage'...
remote: Counting objects: 249, done.
remote: Compressing objects: 100% (131/131), done.
remote: Total 249 (delta 108), reused 230 (delta 96)
Receiving objects: 100% (249/249), 39.41 KiB, done.
Resolving deltas: 100% (108/108), done.
full_name (default is "Omer Katz")?
email (default is "[email protected]")?
github_username (default is "thedrow")? testsuite
project_name (default is "Python Boilerplate")? testsuite
repo_name (default is "boilerplate")? testsuite
project_short_description (default is "Python Boilerplate contains all the boilerplate you need to create a Python package.")? Testsuite is a set of plugins & extensions to nose2 that aims to assist you to write better tests.
release_date (default is "2013-08-25")?
year (default is "2013")?
version (default is "0.1.0")?
Traceback (most recent call last):
File "/usr/local/bin/cookiecutter", line 9, in <module>
load_entry_point('cookiecutter==0.6.4', 'console_scripts', 'cookiecutter')()
File "/usr/local/lib/python3.3/dist-packages/cookiecutter/main.py", line 90, in main
cookiecutter(args.input_dir)
File "/usr/local/lib/python3.3/dist-packages/cookiecutter/main.py", line 61, in cookiecutter
context=context
File "/usr/local/lib/python3.3/dist-packages/cookiecutter/generate.py", line 135, in generate_files
generate_file(infile, context, env)
File "/usr/local/lib/python3.3/dist-packages/cookiecutter/generate.py", line 85, in generate_file
tmpl = env.get_template(infile_fwd_slashes)
File "/usr/local/lib/python3.3/dist-packages/jinja2/environment.py", line 791, in get_template
return self._load_template(name, self.make_globals(globals))
File "/usr/local/lib/python3.3/dist-packages/jinja2/environment.py", line 765, in _load_template
template = self.loader.load(self, name, globals)
File "/usr/local/lib/python3.3/dist-packages/jinja2/loaders.py", line 125, in load
code = environment.compile(source, name, filename)
File "/usr/local/lib/python3.3/dist-packages/jinja2/environment.py", line 554, in compile
self.handle_exception(exc_info, source_hint=source)
File "/usr/local/lib/python3.3/dist-packages/jinja2/environment.py", line 742, in handle_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.3/dist-packages/jinja2/_compat.py", line 36, in reraise
raise value.with_traceback(tb)
File "./cookiecutter-pypackage/{{cookiecutter.repo_name}}/LICENSE", line 13, in <module>
* Neither the name of {{ cookiecutter.project_name } nor the names of its contributors
File "/usr/local/lib/python3.3/dist-packages/jinja2/environment.py", line 459, in _parse
return Parser(self, source, name, encode_filename(filename)).parse()
File "/usr/local/lib/python3.3/dist-packages/jinja2/parser.py", line 893, in parse
result = nodes.Template(self.subparse(), lineno=1)
File "/usr/local/lib/python3.3/dist-packages/jinja2/parser.py", line 867, in subparse
add_data(self.parse_tuple(with_condexpr=True))
File "/usr/local/lib/python3.3/dist-packages/jinja2/parser.py", line 612, in parse_tuple
args.append(parse())
File "/usr/local/lib/python3.3/dist-packages/jinja2/parser.py", line 386, in parse_expression
return self.parse_condexpr()
File "/usr/local/lib/python3.3/dist-packages/jinja2/parser.py", line 391, in parse_condexpr
expr1 = self.parse_or()
File "/usr/local/lib/python3.3/dist-packages/jinja2/parser.py", line 404, in parse_or
left = self.parse_and()
File "/usr/local/lib/python3.3/dist-packages/jinja2/parser.py", line 413, in parse_and
left = self.parse_not()
File "/usr/local/lib/python3.3/dist-packages/jinja2/parser.py", line 424, in parse_not
return self.parse_compare()
File "/usr/local/lib/python3.3/dist-packages/jinja2/parser.py", line 428, in parse_compare
expr = self.parse_add()
File "/usr/local/lib/python3.3/dist-packages/jinja2/parser.py", line 450, in parse_add
left = self.parse_sub()
File "/usr/local/lib/python3.3/dist-packages/jinja2/parser.py", line 460, in parse_sub
left = self.parse_concat()
File "/usr/local/lib/python3.3/dist-packages/jinja2/parser.py", line 470, in parse_concat
args = [self.parse_mul()]
File "/usr/local/lib/python3.3/dist-packages/jinja2/parser.py", line 480, in parse_mul
left = self.parse_div()
File "/usr/local/lib/python3.3/dist-packages/jinja2/parser.py", line 490, in parse_div
left = self.parse_floordiv()
File "/usr/local/lib/python3.3/dist-packages/jinja2/parser.py", line 500, in parse_floordiv
left = self.parse_mod()
File "/usr/local/lib/python3.3/dist-packages/jinja2/parser.py", line 510, in parse_mod
left = self.parse_pow()
File "/usr/local/lib/python3.3/dist-packages/jinja2/parser.py", line 520, in parse_pow
left = self.parse_unary()
File "/usr/local/lib/python3.3/dist-packages/jinja2/parser.py", line 539, in parse_unary
node = self.parse_postfix(node)
File "/usr/local/lib/python3.3/dist-packages/jinja2/parser.py", line 664, in parse_postfix
node = self.parse_subscript(node)
File "/usr/local/lib/python3.3/dist-packages/jinja2/parser.py", line 692, in parse_subscript
next(self.stream)
File "/usr/local/lib/python3.3/dist-packages/jinja2/lexer.py", line 349, in __next__
self.current = next(self._iter)
File "/usr/local/lib/python3.3/dist-packages/jinja2/lexer.py", line 552, in wrap
for lineno, token, value in stream:
File "/usr/local/lib/python3.3/dist-packages/jinja2/lexer.py", line 677, in tokeniter
filename)
jinja2.exceptions.TemplateSyntaxError: unexpected '}'
```
I'm using Python 3.3 on Ubuntu 13.04.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cookiecutter/generate.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """
5 cookiecutter.generate
6 ---------------------
7
8 Functions for generating a project from a project template.
9 """
10 from __future__ import unicode_literals
11 import logging
12 import os
13 import shutil
14 import sys
15
16 from jinja2 import FileSystemLoader, Template
17 from jinja2.environment import Environment
18 from binaryornot.check import is_binary
19
20 from .exceptions import NonTemplatedInputDirException
21 from .utils import make_sure_path_exists, unicode_open
22
23
24 if sys.version_info[:2] < (2, 7):
25 import simplejson as json
26 from ordereddict import OrderedDict
27 else:
28 import json
29 from collections import OrderedDict
30
31
32 def generate_context(config_file='cookiecutter.json'):
33 """
34 Generates the context for a Cookiecutter project template.
35 Loads the JSON file as a Python object, with key being the JSON filename.
36
37 :param config_file: JSON file containing project config values.
38 :paramtype config_file: filename
39 """
40
41 context = {}
42
43 file_handle = open(config_file)
44 obj = json.load(file_handle, encoding='utf-8', object_pairs_hook=OrderedDict)
45
46 # Add the Python object to the context dictionary
47 file_name = os.path.split(config_file)[1]
48 file_stem = file_name.split('.')[0]
49 context[file_stem] = obj
50
51 logging.debug('Context generated is {0}'.format(context))
52 return context
53
54
55 def generate_file(infile, context, env):
56 """
57 1. Render the contents of infile.
58 2. Render the filename of infile as the name of outfile.
59 3. Write the rendered infile to outfile.
60 :param infile: Input file to generate the file from.
61 """
62 logging.debug("Generating file {0}".format(infile))
63
64 # Render the path to the output file (but don't include the filename)
65 outdir_tmpl = Template(os.path.dirname(os.path.abspath(infile)))
66 outdir = outdir_tmpl.render(**context)
67 fname = os.path.basename(os.path.abspath(infile)) # input/output filename
68 # Write it to the corresponding place in output_dir
69 outfile = os.path.join(outdir, fname)
70 logging.debug("outfile is {0}".format(outfile))
71
72 # Just copy over binary files. Don't render.
73 logging.debug("Check {0} to see if it's a binary".format(infile))
74 if is_binary(infile):
75 logging.debug("Copying binary {0} to {1} without rendering"
76 .format(infile, outfile))
77 shutil.copyfile(infile, outfile)
78
79 else:
80 # Force fwd slashes on Windows for get_template
81 # This is a by-design Jinja issue
82 infile_fwd_slashes = infile.replace(os.path.sep, '/')
83
84 # Render the file
85 tmpl = env.get_template(infile_fwd_slashes)
86 rendered_file = tmpl.render(**context)
87
88 # Render the output filename before writing
89 name_tmpl = Template(outfile)
90 rendered_name = name_tmpl.render(**context)
91 logging.debug("Writing {0}".format(rendered_name))
92
93 with unicode_open(rendered_name, 'w') as fh:
94 fh.write(rendered_file)
95
96 def generate_files(template_dir, context=None):
97 """
98 Renders the templates and saves them to files.
99 :param input_dir: Project template input directory.
100 :paramtype input_dir: directory
101 """
102
103 # Always use utf-8
104 template_dir = template_dir
105
106 logging.debug('Generating project from {0}...'.format(template_dir))
107
108 context = context or {}
109 env = Environment()
110 env.loader = FileSystemLoader('.')
111
112 # Render dirname before writing
113 name_tmpl = Template(template_dir)
114 output_dir = name_tmpl.render(**context)
115 if output_dir == template_dir:
116 raise NonTemplatedInputDirException
117
118 logging.debug("output_dir is {0}".format(output_dir))
119 make_sure_path_exists(output_dir)
120
121 for root, dirs, files in os.walk(template_dir):
122 for d in dirs:
123 indir = os.path.join(root, d)
124 outdir = indir.replace(template_dir, output_dir, 1)
125
126 # Render dirname before writing
127 name_tmpl = Template(outdir)
128 rendered_dirname = name_tmpl.render(**context)
129
130 make_sure_path_exists(rendered_dirname)
131
132 for f in files:
133 logging.debug("f is {0}".format(f))
134 infile = os.path.join(root, f)
135 generate_file(infile, context, env)
136
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/cookiecutter/generate.py b/cookiecutter/generate.py
--- a/cookiecutter/generate.py
+++ b/cookiecutter/generate.py
@@ -15,6 +15,7 @@
from jinja2 import FileSystemLoader, Template
from jinja2.environment import Environment
+from jinja2.exceptions import TemplateSyntaxError
from binaryornot.check import is_binary
from .exceptions import NonTemplatedInputDirException
@@ -82,7 +83,13 @@
infile_fwd_slashes = infile.replace(os.path.sep, '/')
# Render the file
- tmpl = env.get_template(infile_fwd_slashes)
+ try:
+ tmpl = env.get_template(infile_fwd_slashes)
+ except TemplateSyntaxError as exception:
+ # Disable translated so that printed exception contains verbose
+ # information about syntax error location
+ exception.translated = False
+ raise
rendered_file = tmpl.render(**context)
# Render the output filename before writing
|
{"golden_diff": "diff --git a/cookiecutter/generate.py b/cookiecutter/generate.py\n--- a/cookiecutter/generate.py\n+++ b/cookiecutter/generate.py\n@@ -15,6 +15,7 @@\n \n from jinja2 import FileSystemLoader, Template\n from jinja2.environment import Environment\n+from jinja2.exceptions import TemplateSyntaxError\n from binaryornot.check import is_binary\n \n from .exceptions import NonTemplatedInputDirException\n@@ -82,7 +83,13 @@\n infile_fwd_slashes = infile.replace(os.path.sep, '/')\n \n # Render the file\n- tmpl = env.get_template(infile_fwd_slashes)\n+ try:\n+ tmpl = env.get_template(infile_fwd_slashes)\n+ except TemplateSyntaxError as exception:\n+ # Disable translated so that printed exception contains verbose\n+ # information about syntax error location\n+ exception.translated = False\n+ raise\n rendered_file = tmpl.render(**context)\n \n # Render the output filename before writing\n", "issue": "Jinja breaks on syntax error without reporting where the error occured\nI tried to create a project from [here](https://github.com/thedrow/cookiecutter-pypackage/) and I got the following traceback:\n\n```\ncookiecutter https://github.com/thedrow/cookiecutter-pypackage.git\nYou've cloned cookiecutter-pypackage before. Is it okay to delete and re-clone it? [Y/n] y\nCloning into 'cookiecutter-pypackage'...\nremote: Counting objects: 249, done.\nremote: Compressing objects: 100% (131/131), done.\nremote: Total 249 (delta 108), reused 230 (delta 96)\nReceiving objects: 100% (249/249), 39.41 KiB, done.\nResolving deltas: 100% (108/108), done.\nfull_name (default is \"Omer Katz\")? \nemail (default is \"[email protected]\")? \ngithub_username (default is \"thedrow\")? testsuite\nproject_name (default is \"Python Boilerplate\")? testsuite\nrepo_name (default is \"boilerplate\")? testsuite\nproject_short_description (default is \"Python Boilerplate contains all the boilerplate you need to create a Python package.\")? Testsuite is a set of plugins & extensions to nose2 that aims to assist you to write better tests.\nrelease_date (default is \"2013-08-25\")? \nyear (default is \"2013\")? \nversion (default is \"0.1.0\")? \nTraceback (most recent call last):\n File \"/usr/local/bin/cookiecutter\", line 9, in <module>\n load_entry_point('cookiecutter==0.6.4', 'console_scripts', 'cookiecutter')()\n File \"/usr/local/lib/python3.3/dist-packages/cookiecutter/main.py\", line 90, in main\n cookiecutter(args.input_dir)\n File \"/usr/local/lib/python3.3/dist-packages/cookiecutter/main.py\", line 61, in cookiecutter\n context=context\n File \"/usr/local/lib/python3.3/dist-packages/cookiecutter/generate.py\", line 135, in generate_files\n generate_file(infile, context, env)\n File \"/usr/local/lib/python3.3/dist-packages/cookiecutter/generate.py\", line 85, in generate_file\n tmpl = env.get_template(infile_fwd_slashes)\n File \"/usr/local/lib/python3.3/dist-packages/jinja2/environment.py\", line 791, in get_template\n return self._load_template(name, self.make_globals(globals))\n File \"/usr/local/lib/python3.3/dist-packages/jinja2/environment.py\", line 765, in _load_template\n template = self.loader.load(self, name, globals)\n File \"/usr/local/lib/python3.3/dist-packages/jinja2/loaders.py\", line 125, in load\n code = environment.compile(source, name, filename)\n File \"/usr/local/lib/python3.3/dist-packages/jinja2/environment.py\", line 554, in compile\n self.handle_exception(exc_info, source_hint=source)\n File \"/usr/local/lib/python3.3/dist-packages/jinja2/environment.py\", line 742, in handle_exception\n reraise(exc_type, exc_value, tb)\n File \"/usr/local/lib/python3.3/dist-packages/jinja2/_compat.py\", line 36, in reraise\n raise value.with_traceback(tb)\n File \"./cookiecutter-pypackage/{{cookiecutter.repo_name}}/LICENSE\", line 13, in <module>\n * Neither the name of {{ cookiecutter.project_name } nor the names of its contributors\n File \"/usr/local/lib/python3.3/dist-packages/jinja2/environment.py\", line 459, in _parse\n return Parser(self, source, name, encode_filename(filename)).parse()\n File \"/usr/local/lib/python3.3/dist-packages/jinja2/parser.py\", line 893, in parse\n result = nodes.Template(self.subparse(), lineno=1)\n File \"/usr/local/lib/python3.3/dist-packages/jinja2/parser.py\", line 867, in subparse\n add_data(self.parse_tuple(with_condexpr=True))\n File \"/usr/local/lib/python3.3/dist-packages/jinja2/parser.py\", line 612, in parse_tuple\n args.append(parse())\n File \"/usr/local/lib/python3.3/dist-packages/jinja2/parser.py\", line 386, in parse_expression\n return self.parse_condexpr()\n File \"/usr/local/lib/python3.3/dist-packages/jinja2/parser.py\", line 391, in parse_condexpr\n expr1 = self.parse_or()\n File \"/usr/local/lib/python3.3/dist-packages/jinja2/parser.py\", line 404, in parse_or\n left = self.parse_and()\n File \"/usr/local/lib/python3.3/dist-packages/jinja2/parser.py\", line 413, in parse_and\n left = self.parse_not()\n File \"/usr/local/lib/python3.3/dist-packages/jinja2/parser.py\", line 424, in parse_not\n return self.parse_compare()\n File \"/usr/local/lib/python3.3/dist-packages/jinja2/parser.py\", line 428, in parse_compare\n expr = self.parse_add()\n File \"/usr/local/lib/python3.3/dist-packages/jinja2/parser.py\", line 450, in parse_add\n left = self.parse_sub()\n File \"/usr/local/lib/python3.3/dist-packages/jinja2/parser.py\", line 460, in parse_sub\n left = self.parse_concat()\n File \"/usr/local/lib/python3.3/dist-packages/jinja2/parser.py\", line 470, in parse_concat\n args = [self.parse_mul()]\n File \"/usr/local/lib/python3.3/dist-packages/jinja2/parser.py\", line 480, in parse_mul\n left = self.parse_div()\n File \"/usr/local/lib/python3.3/dist-packages/jinja2/parser.py\", line 490, in parse_div\n left = self.parse_floordiv()\n File \"/usr/local/lib/python3.3/dist-packages/jinja2/parser.py\", line 500, in parse_floordiv\n left = self.parse_mod()\n File \"/usr/local/lib/python3.3/dist-packages/jinja2/parser.py\", line 510, in parse_mod\n left = self.parse_pow()\n File \"/usr/local/lib/python3.3/dist-packages/jinja2/parser.py\", line 520, in parse_pow\n left = self.parse_unary()\n File \"/usr/local/lib/python3.3/dist-packages/jinja2/parser.py\", line 539, in parse_unary\n node = self.parse_postfix(node)\n File \"/usr/local/lib/python3.3/dist-packages/jinja2/parser.py\", line 664, in parse_postfix\n node = self.parse_subscript(node)\n File \"/usr/local/lib/python3.3/dist-packages/jinja2/parser.py\", line 692, in parse_subscript\n next(self.stream)\n File \"/usr/local/lib/python3.3/dist-packages/jinja2/lexer.py\", line 349, in __next__\n self.current = next(self._iter)\n File \"/usr/local/lib/python3.3/dist-packages/jinja2/lexer.py\", line 552, in wrap\n for lineno, token, value in stream:\n File \"/usr/local/lib/python3.3/dist-packages/jinja2/lexer.py\", line 677, in tokeniter\n filename)\njinja2.exceptions.TemplateSyntaxError: unexpected '}'\n```\n\nI'm using Python 3.3 on Ubuntu 13.04.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.generate\n---------------------\n\nFunctions for generating a project from a project template.\n\"\"\"\nfrom __future__ import unicode_literals\nimport logging\nimport os\nimport shutil\nimport sys\n\nfrom jinja2 import FileSystemLoader, Template\nfrom jinja2.environment import Environment\nfrom binaryornot.check import is_binary\n\nfrom .exceptions import NonTemplatedInputDirException\nfrom .utils import make_sure_path_exists, unicode_open\n\n\nif sys.version_info[:2] < (2, 7):\n import simplejson as json\n from ordereddict import OrderedDict\nelse:\n import json\n from collections import OrderedDict\n\n\ndef generate_context(config_file='cookiecutter.json'):\n \"\"\"\n Generates the context for a Cookiecutter project template.\n Loads the JSON file as a Python object, with key being the JSON filename.\n\n :param config_file: JSON file containing project config values.\n :paramtype config_file: filename\n \"\"\"\n\n context = {}\n\n file_handle = open(config_file)\n obj = json.load(file_handle, encoding='utf-8', object_pairs_hook=OrderedDict)\n\n # Add the Python object to the context dictionary\n file_name = os.path.split(config_file)[1]\n file_stem = file_name.split('.')[0]\n context[file_stem] = obj\n\n logging.debug('Context generated is {0}'.format(context))\n return context\n\n\ndef generate_file(infile, context, env):\n \"\"\"\n 1. Render the contents of infile.\n 2. Render the filename of infile as the name of outfile.\n 3. Write the rendered infile to outfile.\n :param infile: Input file to generate the file from.\n \"\"\"\n logging.debug(\"Generating file {0}\".format(infile))\n\n # Render the path to the output file (but don't include the filename)\n outdir_tmpl = Template(os.path.dirname(os.path.abspath(infile)))\n outdir = outdir_tmpl.render(**context)\n fname = os.path.basename(os.path.abspath(infile)) # input/output filename\n # Write it to the corresponding place in output_dir\n outfile = os.path.join(outdir, fname)\n logging.debug(\"outfile is {0}\".format(outfile))\n\n # Just copy over binary files. Don't render.\n logging.debug(\"Check {0} to see if it's a binary\".format(infile))\n if is_binary(infile):\n logging.debug(\"Copying binary {0} to {1} without rendering\"\n .format(infile, outfile))\n shutil.copyfile(infile, outfile)\n\n else:\n # Force fwd slashes on Windows for get_template\n # This is a by-design Jinja issue\n infile_fwd_slashes = infile.replace(os.path.sep, '/')\n\n # Render the file\n tmpl = env.get_template(infile_fwd_slashes)\n rendered_file = tmpl.render(**context)\n\n # Render the output filename before writing\n name_tmpl = Template(outfile)\n rendered_name = name_tmpl.render(**context)\n logging.debug(\"Writing {0}\".format(rendered_name))\n\n with unicode_open(rendered_name, 'w') as fh:\n fh.write(rendered_file)\n\ndef generate_files(template_dir, context=None):\n \"\"\"\n Renders the templates and saves them to files.\n :param input_dir: Project template input directory.\n :paramtype input_dir: directory\n \"\"\"\n\n # Always use utf-8\n template_dir = template_dir\n\n logging.debug('Generating project from {0}...'.format(template_dir))\n\n context = context or {}\n env = Environment()\n env.loader = FileSystemLoader('.')\n\n # Render dirname before writing\n name_tmpl = Template(template_dir)\n output_dir = name_tmpl.render(**context)\n if output_dir == template_dir:\n raise NonTemplatedInputDirException\n\n logging.debug(\"output_dir is {0}\".format(output_dir))\n make_sure_path_exists(output_dir)\n\n for root, dirs, files in os.walk(template_dir):\n for d in dirs:\n indir = os.path.join(root, d)\n outdir = indir.replace(template_dir, output_dir, 1)\n\n # Render dirname before writing\n name_tmpl = Template(outdir)\n rendered_dirname = name_tmpl.render(**context)\n\n make_sure_path_exists(rendered_dirname)\n\n for f in files:\n logging.debug(\"f is {0}\".format(f))\n infile = os.path.join(root, f)\n generate_file(infile, context, env)\n", "path": "cookiecutter/generate.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.generate\n---------------------\n\nFunctions for generating a project from a project template.\n\"\"\"\nfrom __future__ import unicode_literals\nimport logging\nimport os\nimport shutil\nimport sys\n\nfrom jinja2 import FileSystemLoader, Template\nfrom jinja2.environment import Environment\nfrom jinja2.exceptions import TemplateSyntaxError\nfrom binaryornot.check import is_binary\n\nfrom .exceptions import NonTemplatedInputDirException\nfrom .utils import make_sure_path_exists, unicode_open\n\n\nif sys.version_info[:2] < (2, 7):\n import simplejson as json\n from ordereddict import OrderedDict\nelse:\n import json\n from collections import OrderedDict\n\n\ndef generate_context(config_file='cookiecutter.json'):\n \"\"\"\n Generates the context for a Cookiecutter project template.\n Loads the JSON file as a Python object, with key being the JSON filename.\n\n :param config_file: JSON file containing project config values.\n :paramtype config_file: filename\n \"\"\"\n\n context = {}\n\n file_handle = open(config_file)\n obj = json.load(file_handle, encoding='utf-8', object_pairs_hook=OrderedDict)\n\n # Add the Python object to the context dictionary\n file_name = os.path.split(config_file)[1]\n file_stem = file_name.split('.')[0]\n context[file_stem] = obj\n\n logging.debug('Context generated is {0}'.format(context))\n return context\n\n\ndef generate_file(infile, context, env):\n \"\"\"\n 1. Render the contents of infile.\n 2. Render the filename of infile as the name of outfile.\n 3. Write the rendered infile to outfile.\n :param infile: Input file to generate the file from.\n \"\"\"\n logging.debug(\"Generating file {0}\".format(infile))\n\n # Render the path to the output file (but don't include the filename)\n outdir_tmpl = Template(os.path.dirname(os.path.abspath(infile)))\n outdir = outdir_tmpl.render(**context)\n fname = os.path.basename(os.path.abspath(infile)) # input/output filename\n # Write it to the corresponding place in output_dir\n outfile = os.path.join(outdir, fname)\n logging.debug(\"outfile is {0}\".format(outfile))\n\n # Just copy over binary files. Don't render.\n logging.debug(\"Check {0} to see if it's a binary\".format(infile))\n if is_binary(infile):\n logging.debug(\"Copying binary {0} to {1} without rendering\"\n .format(infile, outfile))\n shutil.copyfile(infile, outfile)\n\n else:\n # Force fwd slashes on Windows for get_template\n # This is a by-design Jinja issue\n infile_fwd_slashes = infile.replace(os.path.sep, '/')\n\n # Render the file\n try:\n tmpl = env.get_template(infile_fwd_slashes)\n except TemplateSyntaxError as exception:\n # Disable translated so that printed exception contains verbose\n # information about syntax error location\n exception.translated = False\n raise\n rendered_file = tmpl.render(**context)\n\n # Render the output filename before writing\n name_tmpl = Template(outfile)\n rendered_name = name_tmpl.render(**context)\n logging.debug(\"Writing {0}\".format(rendered_name))\n\n with unicode_open(rendered_name, 'w') as fh:\n fh.write(rendered_file)\n\ndef generate_files(template_dir, context=None):\n \"\"\"\n Renders the templates and saves them to files.\n :param input_dir: Project template input directory.\n :paramtype input_dir: directory\n \"\"\"\n\n # Always use utf-8\n template_dir = template_dir\n\n logging.debug('Generating project from {0}...'.format(template_dir))\n\n context = context or {}\n env = Environment()\n env.loader = FileSystemLoader('.')\n\n # Render dirname before writing\n name_tmpl = Template(template_dir)\n output_dir = name_tmpl.render(**context)\n if output_dir == template_dir:\n raise NonTemplatedInputDirException\n\n logging.debug(\"output_dir is {0}\".format(output_dir))\n make_sure_path_exists(output_dir)\n\n for root, dirs, files in os.walk(template_dir):\n for d in dirs:\n indir = os.path.join(root, d)\n outdir = indir.replace(template_dir, output_dir, 1)\n\n # Render dirname before writing\n name_tmpl = Template(outdir)\n rendered_dirname = name_tmpl.render(**context)\n\n make_sure_path_exists(rendered_dirname)\n\n for f in files:\n logging.debug(\"f is {0}\".format(f))\n infile = os.path.join(root, f)\n generate_file(infile, context, env)\n", "path": "cookiecutter/generate.py"}]}
| 3,301 | 225 |
gh_patches_debug_4500
|
rasdani/github-patches
|
git_diff
|
meltano__meltano-6643
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bug: Pytest warnings when using mssql DB backend
### Meltano Version
2.4.0
### Python Version
NA
### Bug scope
Other
### Operating System
Linux
### Description
Pytest is catching warnings emitted during testing with the mssql DB backend. Example:
https://github.com/meltano/meltano/runs/7813948143
```
/home/runner/work/meltano/meltano/.nox/tests-3-9/lib/python3.9/site-packages/alembic/ddl/mssql.py:125: UserWarning: MS-SQL ALTER COLUMN operations that specify type_= should also specify a nullable= or existing_nullable= argument to avoid implicit conversion of NOT NULL columns to NULL.
util.warn(
```
It's the same warning being emitted 90 times.
### Code
Running a test that raises the warning with `python -Werror pytest ...` results in the following traceback:
```
Traceback (most recent call last):
File "/home/will/meltano/meltano/src/meltano/core/migration_service.py", line 96, in upgrade
command.upgrade(cfg, head)
File "/home/will/.pyenv/versions/meltano3/lib/python3.7/site-packages/alembic/command.py", line 322, in upgrade
script.run_env()
File "/home/will/.pyenv/versions/meltano3/lib/python3.7/site-packages/alembic/script/base.py", line 569, in run_env
util.load_python_file(self.dir, "env.py")
File "/home/will/.pyenv/versions/meltano3/lib/python3.7/site-packages/alembic/util/pyfiles.py", line 94, in load_python_file
module = load_module_py(module_id, path)
File "/home/will/.pyenv/versions/meltano3/lib/python3.7/site-packages/alembic/util/pyfiles.py", line 110, in load_module_py
spec.loader.exec_module(module) # type: ignore
File "<frozen importlib._bootstrap_external>", line 728, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/home/will/meltano/meltano/src/meltano/migrations/env.py", line 60, in <module>
run_migrations_online()
File "/home/will/meltano/meltano/src/meltano/migrations/env.py", line 37, in run_migrations_online
_run_migrations_online(connection)
File "/home/will/meltano/meltano/src/meltano/migrations/env.py", line 54, in _run_migrations_online
context.run_migrations()
File "<string>", line 8, in run_migrations
File "/home/will/.pyenv/versions/meltano3/lib/python3.7/site-packages/alembic/runtime/environment.py", line 853, in run_migrations
self.get_context().run_migrations(**kw)
File "/home/will/.pyenv/versions/meltano3/lib/python3.7/site-packages/alembic/runtime/migration.py", line 623, in run_migrations
step.migration_fn(**kw)
File "/home/will/meltano/meltano/src/meltano/migrations/versions/13e8639c6d2b_add_state_edit_to_job_state_enum.py", line 49, in upgrade
existing_type=sa.Enum(State, name="job_state"),
File "<string>", line 8, in alter_column
File "<string>", line 3, in alter_column
File "/home/will/.pyenv/versions/meltano3/lib/python3.7/site-packages/alembic/operations/ops.py", line 1880, in alter_column
return operations.invoke(alt)
File "/home/will/.pyenv/versions/meltano3/lib/python3.7/site-packages/alembic/operations/base.py", line 399, in invoke
return fn(self, operation)
File "/home/will/.pyenv/versions/meltano3/lib/python3.7/site-packages/alembic/operations/toimpl.py", line 63, in alter_column
**operation.kw
File "/home/will/.pyenv/versions/meltano3/lib/python3.7/site-packages/alembic/ddl/mssql.py", line 126, in alter_column
"MS-SQL ALTER COLUMN operations that specify type_= "
File "/home/will/.pyenv/versions/meltano3/lib/python3.7/site-packages/alembic/util/messaging.py", line 81, in warn
warnings.warn(msg, UserWarning, stacklevel=stacklevel)
UserWarning: MS-SQL ALTER COLUMN operations that specify type_= should also specify a nullable= or existing_nullable= argument to avoid implicit conversion of NOT NULL columns to NULL.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/meltano/migrations/versions/13e8639c6d2b_add_state_edit_to_job_state_enum.py`
Content:
```
1 """add_state_edit_to_job_state_enum
2
3 Revision ID: 13e8639c6d2b
4 Revises: d135f52a6f49
5 Create Date: 2022-04-21 09:35:35.435614
6
7 """
8 from enum import Enum
9
10 import sqlalchemy as sa
11 from alembic import op
12
13 from meltano.migrations.utils.dialect_typing import (
14 get_dialect_name,
15 max_string_length_for_dialect,
16 )
17
18 # revision identifiers, used by Alembic.
19 revision = "13e8639c6d2b"
20 down_revision = "d135f52a6f49"
21 branch_labels = None
22 depends_on = None
23
24
25 # from core/job/job.py
26 class State(Enum):
27 """Represents status of a Job."""
28
29 IDLE = (0, ("RUNNING", "FAIL"))
30 RUNNING = (1, ("SUCCESS", "FAIL"))
31 SUCCESS = (2, ())
32 FAIL = (3, ("RUNNING",))
33 DEAD = (4, ())
34 STATE_EDIT = (5, ())
35
36
37 def upgrade():
38 dialect_name = get_dialect_name()
39 max_string_length = max_string_length_for_dialect(dialect_name)
40
41 conn = op.get_bind()
42 # In sqlite, the field is already a varchar.
43 # "ALTER COLUMN" statements are also not supported.
44 if conn.dialect.name != "sqlite":
45 op.alter_column(
46 table_name="job",
47 column_name="state",
48 type_=sa.types.String(max_string_length),
49 existing_type=sa.Enum(State, name="job_state"),
50 )
51
52 # In postgresql, drop the created Enum type so that
53 # downgrade() can re-create it.
54 if conn.dialect.name == "postgresql":
55 conn.execute("DROP TYPE job_state;")
56
57
58 def downgrade():
59 conn = op.get_bind()
60 # In sqlite, the field is already a varchar.
61 # "ALTER COLUMN" statements are also not supported.
62 if conn.dialect.name != "sqlite":
63 op.alter_column(
64 table_name="job",
65 column_name="state",
66 _type=sa.Enum(State, name="job_state"),
67 existing_type=sa.types.String,
68 )
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/meltano/migrations/versions/13e8639c6d2b_add_state_edit_to_job_state_enum.py b/src/meltano/migrations/versions/13e8639c6d2b_add_state_edit_to_job_state_enum.py
--- a/src/meltano/migrations/versions/13e8639c6d2b_add_state_edit_to_job_state_enum.py
+++ b/src/meltano/migrations/versions/13e8639c6d2b_add_state_edit_to_job_state_enum.py
@@ -47,6 +47,7 @@
column_name="state",
type_=sa.types.String(max_string_length),
existing_type=sa.Enum(State, name="job_state"),
+ existing_nullable=True,
)
# In postgresql, drop the created Enum type so that
|
{"golden_diff": "diff --git a/src/meltano/migrations/versions/13e8639c6d2b_add_state_edit_to_job_state_enum.py b/src/meltano/migrations/versions/13e8639c6d2b_add_state_edit_to_job_state_enum.py\n--- a/src/meltano/migrations/versions/13e8639c6d2b_add_state_edit_to_job_state_enum.py\n+++ b/src/meltano/migrations/versions/13e8639c6d2b_add_state_edit_to_job_state_enum.py\n@@ -47,6 +47,7 @@\n column_name=\"state\",\n type_=sa.types.String(max_string_length),\n existing_type=sa.Enum(State, name=\"job_state\"),\n+ existing_nullable=True,\n )\n \n # In postgresql, drop the created Enum type so that\n", "issue": "bug: Pytest warnings when using mssql DB backend\n### Meltano Version\r\n\r\n2.4.0\r\n\r\n### Python Version\r\n\r\nNA\r\n\r\n### Bug scope\r\n\r\nOther\r\n\r\n### Operating System\r\n\r\nLinux\r\n\r\n### Description\r\n\r\nPytest is catching warnings emitted during testing with the mssql DB backend. Example:\r\n\r\nhttps://github.com/meltano/meltano/runs/7813948143\r\n\r\n```\r\n/home/runner/work/meltano/meltano/.nox/tests-3-9/lib/python3.9/site-packages/alembic/ddl/mssql.py:125: UserWarning: MS-SQL ALTER COLUMN operations that specify type_= should also specify a nullable= or existing_nullable= argument to avoid implicit conversion of NOT NULL columns to NULL.\r\n util.warn(\r\n```\r\n\r\nIt's the same warning being emitted 90 times.\r\n\r\n### Code\r\n\r\nRunning a test that raises the warning with `python -Werror pytest ...` results in the following traceback:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/will/meltano/meltano/src/meltano/core/migration_service.py\", line 96, in upgrade\r\n command.upgrade(cfg, head)\r\n File \"/home/will/.pyenv/versions/meltano3/lib/python3.7/site-packages/alembic/command.py\", line 322, in upgrade\r\n script.run_env()\r\n File \"/home/will/.pyenv/versions/meltano3/lib/python3.7/site-packages/alembic/script/base.py\", line 569, in run_env\r\n util.load_python_file(self.dir, \"env.py\")\r\n File \"/home/will/.pyenv/versions/meltano3/lib/python3.7/site-packages/alembic/util/pyfiles.py\", line 94, in load_python_file\r\n module = load_module_py(module_id, path)\r\n File \"/home/will/.pyenv/versions/meltano3/lib/python3.7/site-packages/alembic/util/pyfiles.py\", line 110, in load_module_py\r\n spec.loader.exec_module(module) # type: ignore\r\n File \"<frozen importlib._bootstrap_external>\", line 728, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 219, in _call_with_frames_removed\r\n File \"/home/will/meltano/meltano/src/meltano/migrations/env.py\", line 60, in <module>\r\n run_migrations_online()\r\n File \"/home/will/meltano/meltano/src/meltano/migrations/env.py\", line 37, in run_migrations_online\r\n _run_migrations_online(connection)\r\n File \"/home/will/meltano/meltano/src/meltano/migrations/env.py\", line 54, in _run_migrations_online\r\n context.run_migrations()\r\n File \"<string>\", line 8, in run_migrations\r\n File \"/home/will/.pyenv/versions/meltano3/lib/python3.7/site-packages/alembic/runtime/environment.py\", line 853, in run_migrations\r\n self.get_context().run_migrations(**kw)\r\n File \"/home/will/.pyenv/versions/meltano3/lib/python3.7/site-packages/alembic/runtime/migration.py\", line 623, in run_migrations\r\n step.migration_fn(**kw)\r\n File \"/home/will/meltano/meltano/src/meltano/migrations/versions/13e8639c6d2b_add_state_edit_to_job_state_enum.py\", line 49, in upgrade\r\n existing_type=sa.Enum(State, name=\"job_state\"),\r\n File \"<string>\", line 8, in alter_column\r\n File \"<string>\", line 3, in alter_column\r\n File \"/home/will/.pyenv/versions/meltano3/lib/python3.7/site-packages/alembic/operations/ops.py\", line 1880, in alter_column\r\n return operations.invoke(alt)\r\n File \"/home/will/.pyenv/versions/meltano3/lib/python3.7/site-packages/alembic/operations/base.py\", line 399, in invoke\r\n return fn(self, operation)\r\n File \"/home/will/.pyenv/versions/meltano3/lib/python3.7/site-packages/alembic/operations/toimpl.py\", line 63, in alter_column\r\n **operation.kw\r\n File \"/home/will/.pyenv/versions/meltano3/lib/python3.7/site-packages/alembic/ddl/mssql.py\", line 126, in alter_column\r\n \"MS-SQL ALTER COLUMN operations that specify type_= \"\r\n File \"/home/will/.pyenv/versions/meltano3/lib/python3.7/site-packages/alembic/util/messaging.py\", line 81, in warn\r\n warnings.warn(msg, UserWarning, stacklevel=stacklevel)\r\nUserWarning: MS-SQL ALTER COLUMN operations that specify type_= should also specify a nullable= or existing_nullable= argument to avoid implicit conversion of NOT NULL columns to NULL.\r\n```\n", "before_files": [{"content": "\"\"\"add_state_edit_to_job_state_enum\n\nRevision ID: 13e8639c6d2b\nRevises: d135f52a6f49\nCreate Date: 2022-04-21 09:35:35.435614\n\n\"\"\"\nfrom enum import Enum\n\nimport sqlalchemy as sa\nfrom alembic import op\n\nfrom meltano.migrations.utils.dialect_typing import (\n get_dialect_name,\n max_string_length_for_dialect,\n)\n\n# revision identifiers, used by Alembic.\nrevision = \"13e8639c6d2b\"\ndown_revision = \"d135f52a6f49\"\nbranch_labels = None\ndepends_on = None\n\n\n# from core/job/job.py\nclass State(Enum):\n \"\"\"Represents status of a Job.\"\"\"\n\n IDLE = (0, (\"RUNNING\", \"FAIL\"))\n RUNNING = (1, (\"SUCCESS\", \"FAIL\"))\n SUCCESS = (2, ())\n FAIL = (3, (\"RUNNING\",))\n DEAD = (4, ())\n STATE_EDIT = (5, ())\n\n\ndef upgrade():\n dialect_name = get_dialect_name()\n max_string_length = max_string_length_for_dialect(dialect_name)\n\n conn = op.get_bind()\n # In sqlite, the field is already a varchar.\n # \"ALTER COLUMN\" statements are also not supported.\n if conn.dialect.name != \"sqlite\":\n op.alter_column(\n table_name=\"job\",\n column_name=\"state\",\n type_=sa.types.String(max_string_length),\n existing_type=sa.Enum(State, name=\"job_state\"),\n )\n\n # In postgresql, drop the created Enum type so that\n # downgrade() can re-create it.\n if conn.dialect.name == \"postgresql\":\n conn.execute(\"DROP TYPE job_state;\")\n\n\ndef downgrade():\n conn = op.get_bind()\n # In sqlite, the field is already a varchar.\n # \"ALTER COLUMN\" statements are also not supported.\n if conn.dialect.name != \"sqlite\":\n op.alter_column(\n table_name=\"job\",\n column_name=\"state\",\n _type=sa.Enum(State, name=\"job_state\"),\n existing_type=sa.types.String,\n )\n", "path": "src/meltano/migrations/versions/13e8639c6d2b_add_state_edit_to_job_state_enum.py"}], "after_files": [{"content": "\"\"\"add_state_edit_to_job_state_enum\n\nRevision ID: 13e8639c6d2b\nRevises: d135f52a6f49\nCreate Date: 2022-04-21 09:35:35.435614\n\n\"\"\"\nfrom enum import Enum\n\nimport sqlalchemy as sa\nfrom alembic import op\n\nfrom meltano.migrations.utils.dialect_typing import (\n get_dialect_name,\n max_string_length_for_dialect,\n)\n\n# revision identifiers, used by Alembic.\nrevision = \"13e8639c6d2b\"\ndown_revision = \"d135f52a6f49\"\nbranch_labels = None\ndepends_on = None\n\n\n# from core/job/job.py\nclass State(Enum):\n \"\"\"Represents status of a Job.\"\"\"\n\n IDLE = (0, (\"RUNNING\", \"FAIL\"))\n RUNNING = (1, (\"SUCCESS\", \"FAIL\"))\n SUCCESS = (2, ())\n FAIL = (3, (\"RUNNING\",))\n DEAD = (4, ())\n STATE_EDIT = (5, ())\n\n\ndef upgrade():\n dialect_name = get_dialect_name()\n max_string_length = max_string_length_for_dialect(dialect_name)\n\n conn = op.get_bind()\n # In sqlite, the field is already a varchar.\n # \"ALTER COLUMN\" statements are also not supported.\n if conn.dialect.name != \"sqlite\":\n op.alter_column(\n table_name=\"job\",\n column_name=\"state\",\n type_=sa.types.String(max_string_length),\n existing_type=sa.Enum(State, name=\"job_state\"),\n existing_nullable=True,\n )\n\n # In postgresql, drop the created Enum type so that\n # downgrade() can re-create it.\n if conn.dialect.name == \"postgresql\":\n conn.execute(\"DROP TYPE job_state;\")\n\n\ndef downgrade():\n conn = op.get_bind()\n # In sqlite, the field is already a varchar.\n # \"ALTER COLUMN\" statements are also not supported.\n if conn.dialect.name != \"sqlite\":\n op.alter_column(\n table_name=\"job\",\n column_name=\"state\",\n _type=sa.Enum(State, name=\"job_state\"),\n existing_type=sa.types.String,\n )\n", "path": "src/meltano/migrations/versions/13e8639c6d2b_add_state_edit_to_job_state_enum.py"}]}
| 2,022 | 192 |
gh_patches_debug_33918
|
rasdani/github-patches
|
git_diff
|
ansible__ansible-modules-extras-1572
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Elasticsearch: Add proxy support during plugins installation
We should be able to install Elasticsearch plugin though proxy.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `packaging/elasticsearch_plugin.py`
Content:
```
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 import os
5
6 """
7 Ansible module to manage elasticsearch plugins
8 (c) 2015, Mathew Davies <[email protected]>
9
10 This file is part of Ansible
11
12 Ansible is free software: you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation, either version 3 of the License, or
15 (at your option) any later version.
16
17 Ansible is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with Ansible. If not, see <http://www.gnu.org/licenses/>.
23 """
24
25 DOCUMENTATION = '''
26 ---
27 module: elasticsearch_plugin
28 short_description: Manage Elasticsearch plugins
29 description:
30 - Manages Elasticsearch plugins.
31 version_added: "2.0"
32 author: Mathew Davies (@ThePixelDeveloper)
33 options:
34 name:
35 description:
36 - Name of the plugin to install
37 required: True
38 state:
39 description:
40 - Desired state of a plugin.
41 required: False
42 choices: [present, absent]
43 default: present
44 url:
45 description:
46 - Set exact URL to download the plugin from
47 required: False
48 default: None
49 timeout:
50 description:
51 - "Timeout setting: 30s, 1m, 1h..."
52 required: False
53 default: 1m
54 plugin_bin:
55 description:
56 - Location of the plugin binary
57 required: False
58 default: /usr/share/elasticsearch/bin/plugin
59 plugin_dir:
60 description:
61 - Your configured plugin directory specified in Elasticsearch
62 required: False
63 default: /usr/share/elasticsearch/plugins/
64 version:
65 description:
66 - Version of the plugin to be installed.
67 If plugin exists with previous version, it will NOT be updated
68 required: False
69 default: None
70 '''
71
72 EXAMPLES = '''
73 # Install Elasticsearch head plugin
74 - elasticsearch_plugin: state=present name="mobz/elasticsearch-head"
75
76 # Install specific version of a plugin
77 - elasticsearch_plugin: state=present name="com.github.kzwang/elasticsearch-image" version="1.2.0"
78
79 # Uninstall Elasticsearch head plugin
80 - elasticsearch_plugin: state=absent name="mobz/elasticsearch-head"
81 '''
82
83
84 def parse_plugin_repo(string):
85 elements = string.split("/")
86
87 # We first consider the simplest form: pluginname
88 repo = elements[0]
89
90 # We consider the form: username/pluginname
91 if len(elements) > 1:
92 repo = elements[1]
93
94 # remove elasticsearch- prefix
95 # remove es- prefix
96 for string in ("elasticsearch-", "es-"):
97 if repo.startswith(string):
98 return repo[len(string):]
99
100 return repo
101
102
103 def is_plugin_present(plugin_dir, working_dir):
104 return os.path.isdir(os.path.join(working_dir, plugin_dir))
105
106
107 def parse_error(string):
108 reason = "reason: "
109 return string[string.index(reason) + len(reason):].strip()
110
111
112 def main():
113
114 package_state_map = dict(
115 present="--install",
116 absent="--remove"
117 )
118
119 module = AnsibleModule(
120 argument_spec=dict(
121 name=dict(required=True),
122 state=dict(default="present", choices=package_state_map.keys()),
123 url=dict(default=None),
124 timeout=dict(default="1m"),
125 plugin_bin=dict(default="/usr/share/elasticsearch/bin/plugin"),
126 plugin_dir=dict(default="/usr/share/elasticsearch/plugins/"),
127 version=dict(default=None)
128 )
129 )
130
131 plugin_bin = module.params["plugin_bin"]
132 plugin_dir = module.params["plugin_dir"]
133 name = module.params["name"]
134 state = module.params["state"]
135 url = module.params["url"]
136 timeout = module.params["timeout"]
137 version = module.params["version"]
138
139 present = is_plugin_present(parse_plugin_repo(name), plugin_dir)
140
141 # skip if the state is correct
142 if (present and state == "present") or (state == "absent" and not present):
143 module.exit_json(changed=False, name=name)
144
145 if (version):
146 name = name + '/' + version
147
148 cmd_args = [plugin_bin, package_state_map[state], name]
149
150 if url:
151 cmd_args.append("--url %s" % url)
152
153 if timeout:
154 cmd_args.append("--timeout %s" % timeout)
155
156 cmd = " ".join(cmd_args)
157
158 rc, out, err = module.run_command(cmd)
159
160 if rc != 0:
161 reason = parse_error(out)
162 module.fail_json(msg=reason)
163
164 module.exit_json(changed=True, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err)
165
166 from ansible.module_utils.basic import *
167
168 main()
169
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/packaging/elasticsearch_plugin.py b/packaging/elasticsearch_plugin.py
--- a/packaging/elasticsearch_plugin.py
+++ b/packaging/elasticsearch_plugin.py
@@ -61,6 +61,16 @@
- Your configured plugin directory specified in Elasticsearch
required: False
default: /usr/share/elasticsearch/plugins/
+ proxy_host:
+ description:
+ - Proxy host to use during plugin installation
+ required: False
+ default: None
+ proxy_port:
+ description:
+ - Proxy port to use during plugin installation
+ required: False
+ default: None
version:
description:
- Version of the plugin to be installed.
@@ -124,16 +134,20 @@
timeout=dict(default="1m"),
plugin_bin=dict(default="/usr/share/elasticsearch/bin/plugin"),
plugin_dir=dict(default="/usr/share/elasticsearch/plugins/"),
+ proxy_host=dict(default=None),
+ proxy_port=dict(default=None),
version=dict(default=None)
)
)
- plugin_bin = module.params["plugin_bin"]
- plugin_dir = module.params["plugin_dir"]
name = module.params["name"]
state = module.params["state"]
url = module.params["url"]
timeout = module.params["timeout"]
+ plugin_bin = module.params["plugin_bin"]
+ plugin_dir = module.params["plugin_dir"]
+ proxy_host = module.params["proxy_host"]
+ proxy_port = module.params["proxy_port"]
version = module.params["version"]
present = is_plugin_present(parse_plugin_repo(name), plugin_dir)
@@ -147,6 +161,9 @@
cmd_args = [plugin_bin, package_state_map[state], name]
+ if proxy_host and proxy_port:
+ cmd_args.append("-DproxyHost=%s -DproxyPort=%s" % proxy_host, proxy_port)
+
if url:
cmd_args.append("--url %s" % url)
|
{"golden_diff": "diff --git a/packaging/elasticsearch_plugin.py b/packaging/elasticsearch_plugin.py\n--- a/packaging/elasticsearch_plugin.py\n+++ b/packaging/elasticsearch_plugin.py\n@@ -61,6 +61,16 @@\n - Your configured plugin directory specified in Elasticsearch\n required: False\n default: /usr/share/elasticsearch/plugins/\n+ proxy_host:\n+ description:\n+ - Proxy host to use during plugin installation\n+ required: False\n+ default: None\n+ proxy_port:\n+ description:\n+ - Proxy port to use during plugin installation\n+ required: False\n+ default: None \n version:\n description:\n - Version of the plugin to be installed.\n@@ -124,16 +134,20 @@\n timeout=dict(default=\"1m\"),\n plugin_bin=dict(default=\"/usr/share/elasticsearch/bin/plugin\"),\n plugin_dir=dict(default=\"/usr/share/elasticsearch/plugins/\"),\n+ proxy_host=dict(default=None),\n+ proxy_port=dict(default=None),\n version=dict(default=None)\n )\n )\n \n- plugin_bin = module.params[\"plugin_bin\"]\n- plugin_dir = module.params[\"plugin_dir\"]\n name = module.params[\"name\"]\n state = module.params[\"state\"]\n url = module.params[\"url\"]\n timeout = module.params[\"timeout\"]\n+ plugin_bin = module.params[\"plugin_bin\"]\n+ plugin_dir = module.params[\"plugin_dir\"]\n+ proxy_host = module.params[\"proxy_host\"]\n+ proxy_port = module.params[\"proxy_port\"]\n version = module.params[\"version\"]\n \n present = is_plugin_present(parse_plugin_repo(name), plugin_dir)\n@@ -147,6 +161,9 @@\n \n cmd_args = [plugin_bin, package_state_map[state], name]\n \n+ if proxy_host and proxy_port:\n+ cmd_args.append(\"-DproxyHost=%s -DproxyPort=%s\" % proxy_host, proxy_port)\n+\n if url:\n cmd_args.append(\"--url %s\" % url)\n", "issue": "Elasticsearch: Add proxy support during plugins installation\nWe should be able to install Elasticsearch plugin though proxy.\n\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport os\n\n\"\"\"\nAnsible module to manage elasticsearch plugins\n(c) 2015, Mathew Davies <[email protected]>\n\nThis file is part of Ansible\n\nAnsible is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nAnsible is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\nYou should have received a copy of the GNU General Public License\nalong with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\"\"\"\n\nDOCUMENTATION = '''\n---\nmodule: elasticsearch_plugin\nshort_description: Manage Elasticsearch plugins\ndescription:\n - Manages Elasticsearch plugins.\nversion_added: \"2.0\"\nauthor: Mathew Davies (@ThePixelDeveloper)\noptions:\n name:\n description:\n - Name of the plugin to install\n required: True\n state:\n description:\n - Desired state of a plugin.\n required: False\n choices: [present, absent]\n default: present\n url:\n description:\n - Set exact URL to download the plugin from\n required: False\n default: None\n timeout:\n description:\n - \"Timeout setting: 30s, 1m, 1h...\"\n required: False\n default: 1m\n plugin_bin:\n description:\n - Location of the plugin binary\n required: False\n default: /usr/share/elasticsearch/bin/plugin\n plugin_dir:\n description:\n - Your configured plugin directory specified in Elasticsearch\n required: False\n default: /usr/share/elasticsearch/plugins/\n version:\n description:\n - Version of the plugin to be installed.\n If plugin exists with previous version, it will NOT be updated\n required: False\n default: None\n'''\n\nEXAMPLES = '''\n# Install Elasticsearch head plugin\n- elasticsearch_plugin: state=present name=\"mobz/elasticsearch-head\"\n\n# Install specific version of a plugin\n- elasticsearch_plugin: state=present name=\"com.github.kzwang/elasticsearch-image\" version=\"1.2.0\"\n\n# Uninstall Elasticsearch head plugin\n- elasticsearch_plugin: state=absent name=\"mobz/elasticsearch-head\"\n'''\n\n\ndef parse_plugin_repo(string):\n elements = string.split(\"/\")\n\n # We first consider the simplest form: pluginname\n repo = elements[0]\n\n # We consider the form: username/pluginname\n if len(elements) > 1:\n repo = elements[1]\n\n # remove elasticsearch- prefix\n # remove es- prefix\n for string in (\"elasticsearch-\", \"es-\"):\n if repo.startswith(string):\n return repo[len(string):]\n\n return repo\n\n\ndef is_plugin_present(plugin_dir, working_dir):\n return os.path.isdir(os.path.join(working_dir, plugin_dir))\n\n\ndef parse_error(string):\n reason = \"reason: \"\n return string[string.index(reason) + len(reason):].strip()\n\n\ndef main():\n\n package_state_map = dict(\n present=\"--install\",\n absent=\"--remove\"\n )\n\n module = AnsibleModule(\n argument_spec=dict(\n name=dict(required=True),\n state=dict(default=\"present\", choices=package_state_map.keys()),\n url=dict(default=None),\n timeout=dict(default=\"1m\"),\n plugin_bin=dict(default=\"/usr/share/elasticsearch/bin/plugin\"),\n plugin_dir=dict(default=\"/usr/share/elasticsearch/plugins/\"),\n version=dict(default=None)\n )\n )\n\n plugin_bin = module.params[\"plugin_bin\"]\n plugin_dir = module.params[\"plugin_dir\"]\n name = module.params[\"name\"]\n state = module.params[\"state\"]\n url = module.params[\"url\"]\n timeout = module.params[\"timeout\"]\n version = module.params[\"version\"]\n\n present = is_plugin_present(parse_plugin_repo(name), plugin_dir)\n\n # skip if the state is correct\n if (present and state == \"present\") or (state == \"absent\" and not present):\n module.exit_json(changed=False, name=name)\n\n if (version):\n name = name + '/' + version\n\n cmd_args = [plugin_bin, package_state_map[state], name]\n\n if url:\n cmd_args.append(\"--url %s\" % url)\n\n if timeout:\n cmd_args.append(\"--timeout %s\" % timeout)\n\n cmd = \" \".join(cmd_args)\n\n rc, out, err = module.run_command(cmd)\n\n if rc != 0:\n reason = parse_error(out)\n module.fail_json(msg=reason)\n\n module.exit_json(changed=True, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err)\n\nfrom ansible.module_utils.basic import *\n\nmain()\n", "path": "packaging/elasticsearch_plugin.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport os\n\n\"\"\"\nAnsible module to manage elasticsearch plugins\n(c) 2015, Mathew Davies <[email protected]>\n\nThis file is part of Ansible\n\nAnsible is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nAnsible is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\nYou should have received a copy of the GNU General Public License\nalong with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\"\"\"\n\nDOCUMENTATION = '''\n---\nmodule: elasticsearch_plugin\nshort_description: Manage Elasticsearch plugins\ndescription:\n - Manages Elasticsearch plugins.\nversion_added: \"2.0\"\nauthor: Mathew Davies (@ThePixelDeveloper)\noptions:\n name:\n description:\n - Name of the plugin to install\n required: True\n state:\n description:\n - Desired state of a plugin.\n required: False\n choices: [present, absent]\n default: present\n url:\n description:\n - Set exact URL to download the plugin from\n required: False\n default: None\n timeout:\n description:\n - \"Timeout setting: 30s, 1m, 1h...\"\n required: False\n default: 1m\n plugin_bin:\n description:\n - Location of the plugin binary\n required: False\n default: /usr/share/elasticsearch/bin/plugin\n plugin_dir:\n description:\n - Your configured plugin directory specified in Elasticsearch\n required: False\n default: /usr/share/elasticsearch/plugins/\n proxy_host:\n description:\n - Proxy host to use during plugin installation\n required: False\n default: None\n proxy_port:\n description:\n - Proxy port to use during plugin installation\n required: False\n default: None \n version:\n description:\n - Version of the plugin to be installed.\n If plugin exists with previous version, it will NOT be updated\n required: False\n default: None\n'''\n\nEXAMPLES = '''\n# Install Elasticsearch head plugin\n- elasticsearch_plugin: state=present name=\"mobz/elasticsearch-head\"\n\n# Install specific version of a plugin\n- elasticsearch_plugin: state=present name=\"com.github.kzwang/elasticsearch-image\" version=\"1.2.0\"\n\n# Uninstall Elasticsearch head plugin\n- elasticsearch_plugin: state=absent name=\"mobz/elasticsearch-head\"\n'''\n\n\ndef parse_plugin_repo(string):\n elements = string.split(\"/\")\n\n # We first consider the simplest form: pluginname\n repo = elements[0]\n\n # We consider the form: username/pluginname\n if len(elements) > 1:\n repo = elements[1]\n\n # remove elasticsearch- prefix\n # remove es- prefix\n for string in (\"elasticsearch-\", \"es-\"):\n if repo.startswith(string):\n return repo[len(string):]\n\n return repo\n\n\ndef is_plugin_present(plugin_dir, working_dir):\n return os.path.isdir(os.path.join(working_dir, plugin_dir))\n\n\ndef parse_error(string):\n reason = \"reason: \"\n return string[string.index(reason) + len(reason):].strip()\n\n\ndef main():\n\n package_state_map = dict(\n present=\"--install\",\n absent=\"--remove\"\n )\n\n module = AnsibleModule(\n argument_spec=dict(\n name=dict(required=True),\n state=dict(default=\"present\", choices=package_state_map.keys()),\n url=dict(default=None),\n timeout=dict(default=\"1m\"),\n plugin_bin=dict(default=\"/usr/share/elasticsearch/bin/plugin\"),\n plugin_dir=dict(default=\"/usr/share/elasticsearch/plugins/\"),\n proxy_host=dict(default=None),\n proxy_port=dict(default=None),\n version=dict(default=None)\n )\n )\n\n name = module.params[\"name\"]\n state = module.params[\"state\"]\n url = module.params[\"url\"]\n timeout = module.params[\"timeout\"]\n plugin_bin = module.params[\"plugin_bin\"]\n plugin_dir = module.params[\"plugin_dir\"]\n proxy_host = module.params[\"proxy_host\"]\n proxy_port = module.params[\"proxy_port\"]\n version = module.params[\"version\"]\n\n present = is_plugin_present(parse_plugin_repo(name), plugin_dir)\n\n # skip if the state is correct\n if (present and state == \"present\") or (state == \"absent\" and not present):\n module.exit_json(changed=False, name=name)\n\n if (version):\n name = name + '/' + version\n\n cmd_args = [plugin_bin, package_state_map[state], name]\n\n if proxy_host and proxy_port:\n cmd_args.append(\"-DproxyHost=%s -DproxyPort=%s\" % proxy_host, proxy_port)\n\n if url:\n cmd_args.append(\"--url %s\" % url)\n\n if timeout:\n cmd_args.append(\"--timeout %s\" % timeout)\n\n cmd = \" \".join(cmd_args)\n\n rc, out, err = module.run_command(cmd)\n\n if rc != 0:\n reason = parse_error(out)\n module.fail_json(msg=reason)\n\n module.exit_json(changed=True, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err)\n\nfrom ansible.module_utils.basic import *\n\nmain()\n", "path": "packaging/elasticsearch_plugin.py"}]}
| 1,775 | 438 |
gh_patches_debug_15210
|
rasdani/github-patches
|
git_diff
|
scikit-image__scikit-image-1183
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
type error for morphology.remove_small_objects()
Is it better to add type error for non-labeled int?
``` python
a = np.array([[0, 0, 0, 1, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 1]], int)
b = morphology.remove_small_objects(a, 6)
print b
[[0 0 0 1 0]
[1 1 1 0 0]
[1 1 1 0 1]]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `skimage/morphology/misc.py`
Content:
```
1 import numpy as np
2 import functools
3 import scipy.ndimage as nd
4 from .selem import _default_selem
5
6 # Our function names don't exactly correspond to ndimages.
7 # This dictionary translates from our names to scipy's.
8 funcs = ('erosion', 'dilation', 'opening', 'closing')
9 skimage2ndimage = dict((x, 'grey_' + x) for x in funcs)
10
11 # These function names are the same in ndimage.
12 funcs = ('binary_erosion', 'binary_dilation', 'binary_opening',
13 'binary_closing', 'black_tophat', 'white_tophat')
14 skimage2ndimage.update(dict((x, x) for x in funcs))
15
16
17 def default_fallback(func):
18 """Decorator to fall back on ndimage for images with more than 2 dimensions
19
20 Decorator also provides a default structuring element, `selem`, with the
21 appropriate dimensionality if none is specified.
22
23 Parameters
24 ----------
25 func : function
26 A morphology function such as erosion, dilation, opening, closing,
27 white_tophat, or black_tophat.
28
29 Returns
30 -------
31 func_out : function
32 If the image dimentionality is greater than 2D, the ndimage
33 function is returned, otherwise skimage function is used.
34 """
35 @functools.wraps(func)
36 def func_out(image, selem=None, out=None, **kwargs):
37 # Default structure element
38 if selem is None:
39 selem = _default_selem(image.ndim)
40
41 # If image has more than 2 dimensions, use scipy.ndimage
42 if image.ndim > 2:
43 function = getattr(nd, skimage2ndimage[func.__name__])
44 try:
45 return function(image, footprint=selem, output=out, **kwargs)
46 except TypeError:
47 # nd.binary_* take structure instead of footprint
48 return function(image, structure=selem, output=out, **kwargs)
49 else:
50 return func(image, selem=selem, out=out, **kwargs)
51
52 return func_out
53
54
55 def remove_small_objects(ar, min_size=64, connectivity=1, in_place=False):
56 """Remove connected components smaller than the specified size.
57
58 Parameters
59 ----------
60 ar : ndarray (arbitrary shape, int or bool type)
61 The array containing the connected components of interest. If the array
62 type is int, it is assumed that it contains already-labeled objects.
63 The ints must be non-negative.
64 min_size : int, optional (default: 64)
65 The smallest allowable connected component size.
66 connectivity : int, {1, 2, ..., ar.ndim}, optional (default: 1)
67 The connectivity defining the neighborhood of a pixel.
68 in_place : bool, optional (default: False)
69 If `True`, remove the connected components in the input array itself.
70 Otherwise, make a copy.
71
72 Raises
73 ------
74 TypeError
75 If the input array is of an invalid type, such as float or string.
76 ValueError
77 If the input array contains negative values.
78
79 Returns
80 -------
81 out : ndarray, same shape and type as input `ar`
82 The input array with small connected components removed.
83
84 Examples
85 --------
86 >>> from skimage import morphology
87 >>> a = np.array([[0, 0, 0, 1, 0],
88 ... [1, 1, 1, 0, 0],
89 ... [1, 1, 1, 0, 1]], bool)
90 >>> b = morphology.remove_small_objects(a, 6)
91 >>> b
92 array([[False, False, False, False, False],
93 [ True, True, True, False, False],
94 [ True, True, True, False, False]], dtype=bool)
95 >>> c = morphology.remove_small_objects(a, 7, connectivity=2)
96 >>> c
97 array([[False, False, False, True, False],
98 [ True, True, True, False, False],
99 [ True, True, True, False, False]], dtype=bool)
100 >>> d = morphology.remove_small_objects(a, 6, in_place=True)
101 >>> d is a
102 True
103 """
104 # Should use `issubdtype` for bool below, but there's a bug in numpy 1.7
105 if not (ar.dtype == bool or np.issubdtype(ar.dtype, np.integer)):
106 raise TypeError("Only bool or integer image types are supported. "
107 "Got %s." % ar.dtype)
108
109 if in_place:
110 out = ar
111 else:
112 out = ar.copy()
113
114 if min_size == 0: # shortcut for efficiency
115 return out
116
117 if out.dtype == bool:
118 selem = nd.generate_binary_structure(ar.ndim, connectivity)
119 ccs = np.zeros_like(ar, dtype=np.int32)
120 nd.label(ar, selem, output=ccs)
121 else:
122 ccs = out
123
124 try:
125 component_sizes = np.bincount(ccs.ravel())
126 except ValueError:
127 raise ValueError("Negative value labels are not supported. Try "
128 "relabeling the input with `scipy.ndimage.label` or "
129 "`skimage.morphology.label`.")
130
131 too_small = component_sizes < min_size
132 too_small_mask = too_small[ccs]
133 out[too_small_mask] = 0
134
135 return out
136
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/skimage/morphology/misc.py b/skimage/morphology/misc.py
--- a/skimage/morphology/misc.py
+++ b/skimage/morphology/misc.py
@@ -1,5 +1,6 @@
import numpy as np
import functools
+import warnings
import scipy.ndimage as nd
from .selem import _default_selem
@@ -128,6 +129,10 @@
"relabeling the input with `scipy.ndimage.label` or "
"`skimage.morphology.label`.")
+ if len(component_sizes) == 2:
+ warnings.warn("Only one label was provided to `remove_small_objects`. "
+ "Did you mean to use a boolean array?")
+
too_small = component_sizes < min_size
too_small_mask = too_small[ccs]
out[too_small_mask] = 0
|
{"golden_diff": "diff --git a/skimage/morphology/misc.py b/skimage/morphology/misc.py\n--- a/skimage/morphology/misc.py\n+++ b/skimage/morphology/misc.py\n@@ -1,5 +1,6 @@\n import numpy as np\n import functools\n+import warnings\n import scipy.ndimage as nd\n from .selem import _default_selem\n \n@@ -128,6 +129,10 @@\n \"relabeling the input with `scipy.ndimage.label` or \"\n \"`skimage.morphology.label`.\")\n \n+ if len(component_sizes) == 2:\n+ warnings.warn(\"Only one label was provided to `remove_small_objects`. \"\n+ \"Did you mean to use a boolean array?\")\n+\n too_small = component_sizes < min_size\n too_small_mask = too_small[ccs]\n out[too_small_mask] = 0\n", "issue": "type error for morphology.remove_small_objects()\nIs it better to add type error for non-labeled int? \n\n``` python\na = np.array([[0, 0, 0, 1, 0],\n [1, 1, 1, 0, 0],\n [1, 1, 1, 0, 1]], int)\n\nb = morphology.remove_small_objects(a, 6)\n\nprint b\n\n[[0 0 0 1 0]\n [1 1 1 0 0]\n [1 1 1 0 1]]\n```\n\n", "before_files": [{"content": "import numpy as np\nimport functools\nimport scipy.ndimage as nd\nfrom .selem import _default_selem\n\n# Our function names don't exactly correspond to ndimages.\n# This dictionary translates from our names to scipy's.\nfuncs = ('erosion', 'dilation', 'opening', 'closing')\nskimage2ndimage = dict((x, 'grey_' + x) for x in funcs)\n\n# These function names are the same in ndimage.\nfuncs = ('binary_erosion', 'binary_dilation', 'binary_opening',\n 'binary_closing', 'black_tophat', 'white_tophat')\nskimage2ndimage.update(dict((x, x) for x in funcs))\n\n\ndef default_fallback(func):\n \"\"\"Decorator to fall back on ndimage for images with more than 2 dimensions\n\n Decorator also provides a default structuring element, `selem`, with the\n appropriate dimensionality if none is specified.\n\n Parameters\n ----------\n func : function\n A morphology function such as erosion, dilation, opening, closing,\n white_tophat, or black_tophat.\n\n Returns\n -------\n func_out : function\n If the image dimentionality is greater than 2D, the ndimage\n function is returned, otherwise skimage function is used.\n \"\"\"\n @functools.wraps(func)\n def func_out(image, selem=None, out=None, **kwargs):\n # Default structure element\n if selem is None:\n selem = _default_selem(image.ndim)\n\n # If image has more than 2 dimensions, use scipy.ndimage\n if image.ndim > 2:\n function = getattr(nd, skimage2ndimage[func.__name__])\n try:\n return function(image, footprint=selem, output=out, **kwargs)\n except TypeError:\n # nd.binary_* take structure instead of footprint\n return function(image, structure=selem, output=out, **kwargs)\n else:\n return func(image, selem=selem, out=out, **kwargs)\n\n return func_out\n\n\ndef remove_small_objects(ar, min_size=64, connectivity=1, in_place=False):\n \"\"\"Remove connected components smaller than the specified size.\n\n Parameters\n ----------\n ar : ndarray (arbitrary shape, int or bool type)\n The array containing the connected components of interest. If the array\n type is int, it is assumed that it contains already-labeled objects.\n The ints must be non-negative.\n min_size : int, optional (default: 64)\n The smallest allowable connected component size.\n connectivity : int, {1, 2, ..., ar.ndim}, optional (default: 1)\n The connectivity defining the neighborhood of a pixel.\n in_place : bool, optional (default: False)\n If `True`, remove the connected components in the input array itself.\n Otherwise, make a copy.\n\n Raises\n ------\n TypeError\n If the input array is of an invalid type, such as float or string.\n ValueError\n If the input array contains negative values.\n\n Returns\n -------\n out : ndarray, same shape and type as input `ar`\n The input array with small connected components removed.\n\n Examples\n --------\n >>> from skimage import morphology\n >>> a = np.array([[0, 0, 0, 1, 0],\n ... [1, 1, 1, 0, 0],\n ... [1, 1, 1, 0, 1]], bool)\n >>> b = morphology.remove_small_objects(a, 6)\n >>> b\n array([[False, False, False, False, False],\n [ True, True, True, False, False],\n [ True, True, True, False, False]], dtype=bool)\n >>> c = morphology.remove_small_objects(a, 7, connectivity=2)\n >>> c\n array([[False, False, False, True, False],\n [ True, True, True, False, False],\n [ True, True, True, False, False]], dtype=bool)\n >>> d = morphology.remove_small_objects(a, 6, in_place=True)\n >>> d is a\n True\n \"\"\"\n # Should use `issubdtype` for bool below, but there's a bug in numpy 1.7\n if not (ar.dtype == bool or np.issubdtype(ar.dtype, np.integer)):\n raise TypeError(\"Only bool or integer image types are supported. \"\n \"Got %s.\" % ar.dtype)\n\n if in_place:\n out = ar\n else:\n out = ar.copy()\n\n if min_size == 0: # shortcut for efficiency\n return out\n\n if out.dtype == bool:\n selem = nd.generate_binary_structure(ar.ndim, connectivity)\n ccs = np.zeros_like(ar, dtype=np.int32)\n nd.label(ar, selem, output=ccs)\n else:\n ccs = out\n\n try:\n component_sizes = np.bincount(ccs.ravel())\n except ValueError:\n raise ValueError(\"Negative value labels are not supported. Try \"\n \"relabeling the input with `scipy.ndimage.label` or \"\n \"`skimage.morphology.label`.\")\n\n too_small = component_sizes < min_size\n too_small_mask = too_small[ccs]\n out[too_small_mask] = 0\n\n return out\n", "path": "skimage/morphology/misc.py"}], "after_files": [{"content": "import numpy as np\nimport functools\nimport warnings\nimport scipy.ndimage as nd\nfrom .selem import _default_selem\n\n# Our function names don't exactly correspond to ndimages.\n# This dictionary translates from our names to scipy's.\nfuncs = ('erosion', 'dilation', 'opening', 'closing')\nskimage2ndimage = dict((x, 'grey_' + x) for x in funcs)\n\n# These function names are the same in ndimage.\nfuncs = ('binary_erosion', 'binary_dilation', 'binary_opening',\n 'binary_closing', 'black_tophat', 'white_tophat')\nskimage2ndimage.update(dict((x, x) for x in funcs))\n\n\ndef default_fallback(func):\n \"\"\"Decorator to fall back on ndimage for images with more than 2 dimensions\n\n Decorator also provides a default structuring element, `selem`, with the\n appropriate dimensionality if none is specified.\n\n Parameters\n ----------\n func : function\n A morphology function such as erosion, dilation, opening, closing,\n white_tophat, or black_tophat.\n\n Returns\n -------\n func_out : function\n If the image dimentionality is greater than 2D, the ndimage\n function is returned, otherwise skimage function is used.\n \"\"\"\n @functools.wraps(func)\n def func_out(image, selem=None, out=None, **kwargs):\n # Default structure element\n if selem is None:\n selem = _default_selem(image.ndim)\n\n # If image has more than 2 dimensions, use scipy.ndimage\n if image.ndim > 2:\n function = getattr(nd, skimage2ndimage[func.__name__])\n try:\n return function(image, footprint=selem, output=out, **kwargs)\n except TypeError:\n # nd.binary_* take structure instead of footprint\n return function(image, structure=selem, output=out, **kwargs)\n else:\n return func(image, selem=selem, out=out, **kwargs)\n\n return func_out\n\n\ndef remove_small_objects(ar, min_size=64, connectivity=1, in_place=False):\n \"\"\"Remove connected components smaller than the specified size.\n\n Parameters\n ----------\n ar : ndarray (arbitrary shape, int or bool type)\n The array containing the connected components of interest. If the array\n type is int, it is assumed that it contains already-labeled objects.\n The ints must be non-negative.\n min_size : int, optional (default: 64)\n The smallest allowable connected component size.\n connectivity : int, {1, 2, ..., ar.ndim}, optional (default: 1)\n The connectivity defining the neighborhood of a pixel.\n in_place : bool, optional (default: False)\n If `True`, remove the connected components in the input array itself.\n Otherwise, make a copy.\n\n Raises\n ------\n TypeError\n If the input array is of an invalid type, such as float or string.\n ValueError\n If the input array contains negative values.\n\n Returns\n -------\n out : ndarray, same shape and type as input `ar`\n The input array with small connected components removed.\n\n Examples\n --------\n >>> from skimage import morphology\n >>> a = np.array([[0, 0, 0, 1, 0],\n ... [1, 1, 1, 0, 0],\n ... [1, 1, 1, 0, 1]], bool)\n >>> b = morphology.remove_small_objects(a, 6)\n >>> b\n array([[False, False, False, False, False],\n [ True, True, True, False, False],\n [ True, True, True, False, False]], dtype=bool)\n >>> c = morphology.remove_small_objects(a, 7, connectivity=2)\n >>> c\n array([[False, False, False, True, False],\n [ True, True, True, False, False],\n [ True, True, True, False, False]], dtype=bool)\n >>> d = morphology.remove_small_objects(a, 6, in_place=True)\n >>> d is a\n True\n \"\"\"\n # Should use `issubdtype` for bool below, but there's a bug in numpy 1.7\n if not (ar.dtype == bool or np.issubdtype(ar.dtype, np.integer)):\n raise TypeError(\"Only bool or integer image types are supported. \"\n \"Got %s.\" % ar.dtype)\n\n if in_place:\n out = ar\n else:\n out = ar.copy()\n\n if min_size == 0: # shortcut for efficiency\n return out\n\n if out.dtype == bool:\n selem = nd.generate_binary_structure(ar.ndim, connectivity)\n ccs = np.zeros_like(ar, dtype=np.int32)\n nd.label(ar, selem, output=ccs)\n else:\n ccs = out\n\n try:\n component_sizes = np.bincount(ccs.ravel())\n except ValueError:\n raise ValueError(\"Negative value labels are not supported. Try \"\n \"relabeling the input with `scipy.ndimage.label` or \"\n \"`skimage.morphology.label`.\")\n\n if len(component_sizes) == 2:\n warnings.warn(\"Only one label was provided to `remove_small_objects`. \"\n \"Did you mean to use a boolean array?\")\n\n too_small = component_sizes < min_size\n too_small_mask = too_small[ccs]\n out[too_small_mask] = 0\n\n return out\n", "path": "skimage/morphology/misc.py"}]}
| 1,875 | 199 |
gh_patches_debug_715
|
rasdani/github-patches
|
git_diff
|
xorbitsai__inference-1092
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DOC: Add doc about installing from source code
Note that the issue tracker is NOT the place for general support.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `doc/source/conf.py`
Content:
```
1 # Configuration file for the Sphinx documentation builder.
2 #
3 # This file only contains a selection of the most common options. For a full
4 # list see the documentation:
5 # https://www.sphinx-doc.org/en/master/usage/configuration.html
6
7 # -- Path setup --------------------------------------------------------------
8
9 # If extensions (or modules to document with autodoc) are in another directory,
10 # add these directories to sys.path here. If the directory is relative to the
11 # documentation root, use os.path.abspath to make it absolute, like shown here.
12 #
13 import os
14 # import sys
15 # sys.path.insert(0, os.path.abspath('.'))
16
17
18 # -- Project information -----------------------------------------------------
19
20 project = 'Xinference'
21 copyright = '2023, Xorbits Inc.'
22 author = 'xorbitsai'
23
24
25 # -- General configuration ---------------------------------------------------
26
27 # Add any Sphinx extension module names here, as strings. They can be
28 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
29 # ones.
30 extensions = [
31 "sphinx.ext.mathjax",
32 "sphinx.ext.ifconfig",
33 "sphinx.ext.intersphinx",
34 "sphinx.ext.viewcode",
35 "sphinx.ext.githubpages",
36 "sphinx.ext.autosummary",
37 "sphinx.ext.napoleon",
38 "sphinx_tabs.tabs",
39 "sphinx_design",
40 "IPython.sphinxext.ipython_directive",
41 "IPython.sphinxext.ipython_console_highlighting",
42 ]
43
44 # Add any paths that contain templates here, relative to this directory.
45 templates_path = ['_templates']
46
47 # List of patterns, relative to source directory, that match files and
48 # directories to ignore when looking for source files.
49 # This pattern also affects html_static_path and html_extra_path.
50 exclude_patterns = []
51
52 # i18n
53 locale_dirs = ["locale/"] # path is example but recommended.
54 gettext_compact = False # optional
55
56
57 # -- Options for HTML output -------------------------------------------------
58
59 # The theme to use for HTML and HTML Help pages. See the documentation for
60 # a list of builtin themes.
61 #
62 html_theme = 'pydata_sphinx_theme'
63 html_title = "Xinference"
64
65 # Add any paths that contain custom static files (such as style sheets) here,
66 # relative to this directory. They are copied after the builtin static files,
67 # so a file named "default.css" will overwrite the builtin "default.css".
68 html_static_path = ['_static']
69
70 # Define the json_url for our version switcher.
71 version_match = os.environ.get("READTHEDOCS_LANGUAGE")
72 json_url = "https://inference.readthedocs.io/en/latest/_static/switcher.json"
73 if not version_match:
74 version_match = 'en'
75
76 html_theme_options = {
77 "show_toc_level": 2,
78 "header_links_before_dropdown": 6,
79 "icon_links": [
80 {
81 "name": "GitHub",
82 "url": "https://github.com/xorbitsai/inference",
83 "icon": "fa-brands fa-github",
84 "type": "fontawesome",
85 },
86 ],
87 "navbar_align": "content", # [left, content, right] For testing that the navbar items align properly
88 "navbar_start": ["navbar-logo", "version-switcher"],
89 "navbar_center": ["navbar-nav"],
90 "switcher": {
91 "json_url": json_url,
92 "version_match": version_match,
93 },
94 }
95
96
97 if version_match != 'zh-cn':
98 html_theme_options['icon_links'].extend([{
99 "name": "Slack",
100 "url": "https://join.slack.com/t/xorbitsio/shared_invite/zt-1o3z9ucdh-RbfhbPVpx7prOVdM1CAuxg",
101 "icon": "fa-brands fa-slack",
102 "type": "fontawesome",
103 },
104 {
105 "name": "Twitter",
106 "url": "https://twitter.com/xorbitsio",
107 "icon": "fa-brands fa-twitter",
108 "type": "fontawesome",
109 }])
110 else:
111 html_theme_options['icon_links'].extend([{
112 "name": "WeChat",
113 "url": "https://xorbits.cn/assets/images/wechat_work_qr.png",
114 "icon": "fa-brands fa-weixin",
115 "type": "fontawesome",
116 },
117 {
118 "name": "Zhihu",
119 "url": "https://zhihu.com/org/xorbits",
120 "icon": "fa-brands fa-zhihu",
121 "type": "fontawesome",
122 }])
123 html_theme_options["external_links"] = [
124 {"name": "产品官网", "url": "https://xorbits.cn/inference"},
125 ]
126
127 html_favicon = "_static/favicon.svg"
128
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/doc/source/conf.py b/doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -75,7 +75,7 @@
html_theme_options = {
"show_toc_level": 2,
- "header_links_before_dropdown": 6,
+ "header_links_before_dropdown": 7,
"icon_links": [
{
"name": "GitHub",
|
{"golden_diff": "diff --git a/doc/source/conf.py b/doc/source/conf.py\n--- a/doc/source/conf.py\n+++ b/doc/source/conf.py\n@@ -75,7 +75,7 @@\n \n html_theme_options = {\n \"show_toc_level\": 2,\n- \"header_links_before_dropdown\": 6,\n+ \"header_links_before_dropdown\": 7,\n \"icon_links\": [\n {\n \"name\": \"GitHub\",\n", "issue": "DOC: Add doc about installing from source code\nNote that the issue tracker is NOT the place for general support.\r\n\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Xinference'\ncopyright = '2023, Xorbits Inc.'\nauthor = 'xorbitsai'\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.ifconfig\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.githubpages\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.napoleon\",\n \"sphinx_tabs.tabs\",\n \"sphinx_design\",\n \"IPython.sphinxext.ipython_directive\",\n \"IPython.sphinxext.ipython_console_highlighting\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n# i18n\nlocale_dirs = [\"locale/\"] # path is example but recommended.\ngettext_compact = False # optional\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'pydata_sphinx_theme'\nhtml_title = \"Xinference\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Define the json_url for our version switcher.\nversion_match = os.environ.get(\"READTHEDOCS_LANGUAGE\")\njson_url = \"https://inference.readthedocs.io/en/latest/_static/switcher.json\"\nif not version_match:\n version_match = 'en'\n\nhtml_theme_options = {\n \"show_toc_level\": 2,\n \"header_links_before_dropdown\": 6,\n \"icon_links\": [\n {\n \"name\": \"GitHub\",\n \"url\": \"https://github.com/xorbitsai/inference\",\n \"icon\": \"fa-brands fa-github\",\n \"type\": \"fontawesome\",\n },\n ],\n \"navbar_align\": \"content\", # [left, content, right] For testing that the navbar items align properly\n \"navbar_start\": [\"navbar-logo\", \"version-switcher\"],\n \"navbar_center\": [\"navbar-nav\"],\n \"switcher\": {\n \"json_url\": json_url,\n \"version_match\": version_match,\n },\n}\n\n\nif version_match != 'zh-cn':\n html_theme_options['icon_links'].extend([{\n \"name\": \"Slack\",\n \"url\": \"https://join.slack.com/t/xorbitsio/shared_invite/zt-1o3z9ucdh-RbfhbPVpx7prOVdM1CAuxg\",\n \"icon\": \"fa-brands fa-slack\",\n \"type\": \"fontawesome\",\n },\n {\n \"name\": \"Twitter\",\n \"url\": \"https://twitter.com/xorbitsio\",\n \"icon\": \"fa-brands fa-twitter\",\n \"type\": \"fontawesome\",\n }])\nelse:\n html_theme_options['icon_links'].extend([{\n \"name\": \"WeChat\",\n \"url\": \"https://xorbits.cn/assets/images/wechat_work_qr.png\",\n \"icon\": \"fa-brands fa-weixin\",\n \"type\": \"fontawesome\",\n },\n {\n \"name\": \"Zhihu\",\n \"url\": \"https://zhihu.com/org/xorbits\",\n \"icon\": \"fa-brands fa-zhihu\",\n \"type\": \"fontawesome\",\n }])\n html_theme_options[\"external_links\"] = [\n {\"name\": \"\u4ea7\u54c1\u5b98\u7f51\", \"url\": \"https://xorbits.cn/inference\"},\n ]\n\nhtml_favicon = \"_static/favicon.svg\"\n", "path": "doc/source/conf.py"}], "after_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Xinference'\ncopyright = '2023, Xorbits Inc.'\nauthor = 'xorbitsai'\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.ifconfig\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.githubpages\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.napoleon\",\n \"sphinx_tabs.tabs\",\n \"sphinx_design\",\n \"IPython.sphinxext.ipython_directive\",\n \"IPython.sphinxext.ipython_console_highlighting\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n# i18n\nlocale_dirs = [\"locale/\"] # path is example but recommended.\ngettext_compact = False # optional\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'pydata_sphinx_theme'\nhtml_title = \"Xinference\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Define the json_url for our version switcher.\nversion_match = os.environ.get(\"READTHEDOCS_LANGUAGE\")\njson_url = \"https://inference.readthedocs.io/en/latest/_static/switcher.json\"\nif not version_match:\n version_match = 'en'\n\nhtml_theme_options = {\n \"show_toc_level\": 2,\n \"header_links_before_dropdown\": 7,\n \"icon_links\": [\n {\n \"name\": \"GitHub\",\n \"url\": \"https://github.com/xorbitsai/inference\",\n \"icon\": \"fa-brands fa-github\",\n \"type\": \"fontawesome\",\n },\n ],\n \"navbar_align\": \"content\", # [left, content, right] For testing that the navbar items align properly\n \"navbar_start\": [\"navbar-logo\", \"version-switcher\"],\n \"navbar_center\": [\"navbar-nav\"],\n \"switcher\": {\n \"json_url\": json_url,\n \"version_match\": version_match,\n },\n}\n\n\nif version_match != 'zh-cn':\n html_theme_options['icon_links'].extend([{\n \"name\": \"Slack\",\n \"url\": \"https://join.slack.com/t/xorbitsio/shared_invite/zt-1o3z9ucdh-RbfhbPVpx7prOVdM1CAuxg\",\n \"icon\": \"fa-brands fa-slack\",\n \"type\": \"fontawesome\",\n },\n {\n \"name\": \"Twitter\",\n \"url\": \"https://twitter.com/xorbitsio\",\n \"icon\": \"fa-brands fa-twitter\",\n \"type\": \"fontawesome\",\n }])\nelse:\n html_theme_options['icon_links'].extend([{\n \"name\": \"WeChat\",\n \"url\": \"https://xorbits.cn/assets/images/wechat_work_qr.png\",\n \"icon\": \"fa-brands fa-weixin\",\n \"type\": \"fontawesome\",\n },\n {\n \"name\": \"Zhihu\",\n \"url\": \"https://zhihu.com/org/xorbits\",\n \"icon\": \"fa-brands fa-zhihu\",\n \"type\": \"fontawesome\",\n }])\n html_theme_options[\"external_links\"] = [\n {\"name\": \"\u4ea7\u54c1\u5b98\u7f51\", \"url\": \"https://xorbits.cn/inference\"},\n ]\n\nhtml_favicon = \"_static/favicon.svg\"\n", "path": "doc/source/conf.py"}]}
| 1,587 | 93 |
gh_patches_debug_14687
|
rasdani/github-patches
|
git_diff
|
ultrabug__py3status-239
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Module "xrandr_rotate" makes i3 unusable
We have seen a case (see #217) when i3 becomes unusable after rotating screen using `xrandr_rotate` module. The symptoms are: `xrandr` process is stalled and only sending SIGCONT to it makes i3 usable again.
This issue is not reproducible for everyone. If you experience a similar behavior, please post here as many details as you can to help us investigate and fix this issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `py3status/modules/xrandr_rotate.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """
4 Switch between horizontal and vertical screen rotation on a single click.
5
6 Configuration parameters:
7 cache_timeout: how often to refresh this module.
8 (default is 10)
9 format: a string that formats the output, can include placeholders.
10 (default is '{icon}')
11 hide_if_disconnected: a boolean flag to hide icon when `screen` is disconnected.
12 it has no effect unless `screen` option is also configured.
13 (default: None)
14 horizontal_icon: a character to represent horizontal rotation.
15 (default is 'H')
16 horizontal_rotation: a horizontal rotation for xrandr to use.
17 available options: 'normal' or 'inverted'.
18 (default is 'normal')
19 screen: display output name to rotate, as detected by xrandr.
20 if not provided, all enabled screens will be rotated.
21 (default: None)
22 vertical_icon: a character to represent vertical rotation.
23 (default is 'V')
24 vertical_rotation: a vertical rotation for xrandr to use.
25 available options: 'left' or 'right'.
26 (default is 'left')
27
28 Available placeholders for formatting the output:
29 {icon} a rotation icon, specified by `horizontal_icon` or `vertical_icon`.
30 {screen} a screen name, specified by `screen` option or detected automatically
31 if only one screen is connected, otherwise 'ALL'.
32
33
34 Remarks:
35 There have been cases when rotating a screen using this module made i3 unusabe.
36 If you experience a similar behavior, please report as many details as you can:
37 https://github.com/ultrabug/py3status/issues/227
38
39
40 @author Maxim Baz (https://github.com/maximbaz)
41 @license BSD
42 """
43
44 from subprocess import Popen, PIPE
45 from time import sleep, time
46
47
48 class Py3status:
49 """
50 """
51 # available configuration parameters
52 cache_timeout = 10
53 format = '{icon}'
54 hide_if_disconnected = False
55 horizontal_icon = 'H'
56 horizontal_rotation = 'normal'
57 screen = None
58 vertical_icon = 'V'
59 vertical_rotation = 'left'
60
61 def _call(self, cmd):
62 process = Popen(cmd, stdout=PIPE, shell=True)
63 output = process.communicate()[0] or ""
64 try:
65 # python3
66 output = output.decode()
67 except:
68 pass
69 return output.strip()
70
71 def _get_all_outputs(self):
72 cmd = 'xrandr -q | grep " connected [^(]" | cut -d " " -f1'
73 return self._call(cmd).split()
74
75 def _get_current_rotation_icon(self, all_outputs):
76 output = self.screen or all_outputs[0]
77 cmd = 'xrandr -q | grep "^' + output + '" | cut -d " " -f4'
78 output = self._call(cmd)
79 # xrandr may skip printing the 'normal', in which case the output would start from '('
80 is_horizontal = output.startswith('(') or output in ['normal', 'inverted']
81 return self.horizontal_icon if is_horizontal else self.vertical_icon
82
83 def _apply(self):
84 rotation = self.horizontal_rotation if self.displayed == self.horizontal_icon else self.vertical_rotation
85 outputs = [self.screen] if self.screen else self._get_all_outputs()
86 for output in outputs:
87 cmd = 'xrandr --output ' + output + ' --rotate ' + rotation
88 self._call(cmd)
89
90 def _switch_selection(self):
91 self.displayed = self.vertical_icon if self.displayed == self.horizontal_icon else self.horizontal_icon
92
93 def on_click(self, i3s_output_list, i3s_config, event):
94 """
95 Click events
96 - left click & scroll up/down: switch between rotations
97 - right click: apply selected rotation
98 """
99 button = event['button']
100 if button in [1, 4, 5]:
101 self._switch_selection()
102 elif button == 3:
103 self._apply()
104
105 def xrandr_rotate(self, i3s_output_list, i3s_config):
106 all_outputs = self._get_all_outputs()
107 selected_screen_disconnected = self.screen is not None and self.screen not in all_outputs
108 if selected_screen_disconnected and self.hide_if_disconnected:
109 full_text = ''
110 else:
111 if not hasattr(self, 'displayed'):
112 self.displayed = self._get_current_rotation_icon(all_outputs)
113
114 screen = self.screen or all_outputs[0] if len(all_outputs) == 1 else 'ALL'
115 full_text = self.format.format(icon=self.displayed or '?', screen=screen)
116
117 response = {
118 'cached_until': time() + self.cache_timeout,
119 'full_text': full_text
120 }
121
122 # coloration
123 if selected_screen_disconnected and not self.hide_if_disconnected:
124 response['color'] = i3s_config['color_degraded']
125 elif self.displayed == self._get_current_rotation_icon(all_outputs):
126 response['color'] = i3s_config['color_good']
127
128 return response
129
130
131 if __name__ == "__main__":
132 """
133 Test this module by calling it directly.
134 """
135 x = Py3status()
136 config = {
137 'color_bad': '#FF0000',
138 'color_degraded': '#FFFF00',
139 'color_good': '#00FF00'
140 }
141 while True:
142 print(x.xrandr_rotate([], config))
143 sleep(1)
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/py3status/modules/xrandr_rotate.py b/py3status/modules/xrandr_rotate.py
--- a/py3status/modules/xrandr_rotate.py
+++ b/py3status/modules/xrandr_rotate.py
@@ -84,8 +84,8 @@
rotation = self.horizontal_rotation if self.displayed == self.horizontal_icon else self.vertical_rotation
outputs = [self.screen] if self.screen else self._get_all_outputs()
for output in outputs:
- cmd = 'xrandr --output ' + output + ' --rotate ' + rotation
- self._call(cmd)
+ cmd = 'exec xrandr --output ' + output + ' --rotate ' + rotation
+ Popen(['i3-msg', cmd], stdout=PIPE)
def _switch_selection(self):
self.displayed = self.vertical_icon if self.displayed == self.horizontal_icon else self.horizontal_icon
|
{"golden_diff": "diff --git a/py3status/modules/xrandr_rotate.py b/py3status/modules/xrandr_rotate.py\n--- a/py3status/modules/xrandr_rotate.py\n+++ b/py3status/modules/xrandr_rotate.py\n@@ -84,8 +84,8 @@\n rotation = self.horizontal_rotation if self.displayed == self.horizontal_icon else self.vertical_rotation\n outputs = [self.screen] if self.screen else self._get_all_outputs()\n for output in outputs:\n- cmd = 'xrandr --output ' + output + ' --rotate ' + rotation\n- self._call(cmd)\n+ cmd = 'exec xrandr --output ' + output + ' --rotate ' + rotation\n+ Popen(['i3-msg', cmd], stdout=PIPE)\n \n def _switch_selection(self):\n self.displayed = self.vertical_icon if self.displayed == self.horizontal_icon else self.horizontal_icon\n", "issue": "Module \"xrandr_rotate\" makes i3 unusable\nWe have seen a case (see #217) when i3 becomes unusable after rotating screen using `xrandr_rotate` module. The symptoms are: `xrandr` process is stalled and only sending SIGCONT to it makes i3 usable again.\n\nThis issue is not reproducible for everyone. If you experience a similar behavior, please post here as many details as you can to help us investigate and fix this issue.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nSwitch between horizontal and vertical screen rotation on a single click.\n\nConfiguration parameters:\n cache_timeout: how often to refresh this module.\n (default is 10)\n format: a string that formats the output, can include placeholders.\n (default is '{icon}')\n hide_if_disconnected: a boolean flag to hide icon when `screen` is disconnected.\n it has no effect unless `screen` option is also configured.\n (default: None)\n horizontal_icon: a character to represent horizontal rotation.\n (default is 'H')\n horizontal_rotation: a horizontal rotation for xrandr to use.\n available options: 'normal' or 'inverted'.\n (default is 'normal')\n screen: display output name to rotate, as detected by xrandr.\n if not provided, all enabled screens will be rotated.\n (default: None)\n vertical_icon: a character to represent vertical rotation.\n (default is 'V')\n vertical_rotation: a vertical rotation for xrandr to use.\n available options: 'left' or 'right'.\n (default is 'left')\n\nAvailable placeholders for formatting the output:\n {icon} a rotation icon, specified by `horizontal_icon` or `vertical_icon`.\n {screen} a screen name, specified by `screen` option or detected automatically\n if only one screen is connected, otherwise 'ALL'.\n\n\nRemarks:\n There have been cases when rotating a screen using this module made i3 unusabe.\n If you experience a similar behavior, please report as many details as you can:\n https://github.com/ultrabug/py3status/issues/227\n\n\n@author Maxim Baz (https://github.com/maximbaz)\n@license BSD\n\"\"\"\n\nfrom subprocess import Popen, PIPE\nfrom time import sleep, time\n\n\nclass Py3status:\n \"\"\"\n \"\"\"\n # available configuration parameters\n cache_timeout = 10\n format = '{icon}'\n hide_if_disconnected = False\n horizontal_icon = 'H'\n horizontal_rotation = 'normal'\n screen = None\n vertical_icon = 'V'\n vertical_rotation = 'left'\n\n def _call(self, cmd):\n process = Popen(cmd, stdout=PIPE, shell=True)\n output = process.communicate()[0] or \"\"\n try:\n # python3\n output = output.decode()\n except:\n pass\n return output.strip()\n\n def _get_all_outputs(self):\n cmd = 'xrandr -q | grep \" connected [^(]\" | cut -d \" \" -f1'\n return self._call(cmd).split()\n\n def _get_current_rotation_icon(self, all_outputs):\n output = self.screen or all_outputs[0]\n cmd = 'xrandr -q | grep \"^' + output + '\" | cut -d \" \" -f4'\n output = self._call(cmd)\n # xrandr may skip printing the 'normal', in which case the output would start from '('\n is_horizontal = output.startswith('(') or output in ['normal', 'inverted']\n return self.horizontal_icon if is_horizontal else self.vertical_icon\n\n def _apply(self):\n rotation = self.horizontal_rotation if self.displayed == self.horizontal_icon else self.vertical_rotation\n outputs = [self.screen] if self.screen else self._get_all_outputs()\n for output in outputs:\n cmd = 'xrandr --output ' + output + ' --rotate ' + rotation\n self._call(cmd)\n\n def _switch_selection(self):\n self.displayed = self.vertical_icon if self.displayed == self.horizontal_icon else self.horizontal_icon\n\n def on_click(self, i3s_output_list, i3s_config, event):\n \"\"\"\n Click events\n - left click & scroll up/down: switch between rotations\n - right click: apply selected rotation\n \"\"\"\n button = event['button']\n if button in [1, 4, 5]:\n self._switch_selection()\n elif button == 3:\n self._apply()\n\n def xrandr_rotate(self, i3s_output_list, i3s_config):\n all_outputs = self._get_all_outputs()\n selected_screen_disconnected = self.screen is not None and self.screen not in all_outputs\n if selected_screen_disconnected and self.hide_if_disconnected:\n full_text = ''\n else:\n if not hasattr(self, 'displayed'):\n self.displayed = self._get_current_rotation_icon(all_outputs)\n\n screen = self.screen or all_outputs[0] if len(all_outputs) == 1 else 'ALL'\n full_text = self.format.format(icon=self.displayed or '?', screen=screen)\n\n response = {\n 'cached_until': time() + self.cache_timeout,\n 'full_text': full_text\n }\n\n # coloration\n if selected_screen_disconnected and not self.hide_if_disconnected:\n response['color'] = i3s_config['color_degraded']\n elif self.displayed == self._get_current_rotation_icon(all_outputs):\n response['color'] = i3s_config['color_good']\n\n return response\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Test this module by calling it directly.\n \"\"\"\n x = Py3status()\n config = {\n 'color_bad': '#FF0000',\n 'color_degraded': '#FFFF00',\n 'color_good': '#00FF00'\n }\n while True:\n print(x.xrandr_rotate([], config))\n sleep(1)\n", "path": "py3status/modules/xrandr_rotate.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nSwitch between horizontal and vertical screen rotation on a single click.\n\nConfiguration parameters:\n cache_timeout: how often to refresh this module.\n (default is 10)\n format: a string that formats the output, can include placeholders.\n (default is '{icon}')\n hide_if_disconnected: a boolean flag to hide icon when `screen` is disconnected.\n it has no effect unless `screen` option is also configured.\n (default: None)\n horizontal_icon: a character to represent horizontal rotation.\n (default is 'H')\n horizontal_rotation: a horizontal rotation for xrandr to use.\n available options: 'normal' or 'inverted'.\n (default is 'normal')\n screen: display output name to rotate, as detected by xrandr.\n if not provided, all enabled screens will be rotated.\n (default: None)\n vertical_icon: a character to represent vertical rotation.\n (default is 'V')\n vertical_rotation: a vertical rotation for xrandr to use.\n available options: 'left' or 'right'.\n (default is 'left')\n\nAvailable placeholders for formatting the output:\n {icon} a rotation icon, specified by `horizontal_icon` or `vertical_icon`.\n {screen} a screen name, specified by `screen` option or detected automatically\n if only one screen is connected, otherwise 'ALL'.\n\n\nRemarks:\n There have been cases when rotating a screen using this module made i3 unusabe.\n If you experience a similar behavior, please report as many details as you can:\n https://github.com/ultrabug/py3status/issues/227\n\n\n@author Maxim Baz (https://github.com/maximbaz)\n@license BSD\n\"\"\"\n\nfrom subprocess import Popen, PIPE\nfrom time import sleep, time\n\n\nclass Py3status:\n \"\"\"\n \"\"\"\n # available configuration parameters\n cache_timeout = 10\n format = '{icon}'\n hide_if_disconnected = False\n horizontal_icon = 'H'\n horizontal_rotation = 'normal'\n screen = None\n vertical_icon = 'V'\n vertical_rotation = 'left'\n\n def _call(self, cmd):\n process = Popen(cmd, stdout=PIPE, shell=True)\n output = process.communicate()[0] or \"\"\n try:\n # python3\n output = output.decode()\n except:\n pass\n return output.strip()\n\n def _get_all_outputs(self):\n cmd = 'xrandr -q | grep \" connected [^(]\" | cut -d \" \" -f1'\n return self._call(cmd).split()\n\n def _get_current_rotation_icon(self, all_outputs):\n output = self.screen or all_outputs[0]\n cmd = 'xrandr -q | grep \"^' + output + '\" | cut -d \" \" -f4'\n output = self._call(cmd)\n # xrandr may skip printing the 'normal', in which case the output would start from '('\n is_horizontal = output.startswith('(') or output in ['normal', 'inverted']\n return self.horizontal_icon if is_horizontal else self.vertical_icon\n\n def _apply(self):\n rotation = self.horizontal_rotation if self.displayed == self.horizontal_icon else self.vertical_rotation\n outputs = [self.screen] if self.screen else self._get_all_outputs()\n for output in outputs:\n cmd = 'exec xrandr --output ' + output + ' --rotate ' + rotation\n Popen(['i3-msg', cmd], stdout=PIPE)\n\n def _switch_selection(self):\n self.displayed = self.vertical_icon if self.displayed == self.horizontal_icon else self.horizontal_icon\n\n def on_click(self, i3s_output_list, i3s_config, event):\n \"\"\"\n Click events\n - left click & scroll up/down: switch between rotations\n - right click: apply selected rotation\n \"\"\"\n button = event['button']\n if button in [1, 4, 5]:\n self._switch_selection()\n elif button == 3:\n self._apply()\n\n def xrandr_rotate(self, i3s_output_list, i3s_config):\n all_outputs = self._get_all_outputs()\n selected_screen_disconnected = self.screen is not None and self.screen not in all_outputs\n if selected_screen_disconnected and self.hide_if_disconnected:\n full_text = ''\n else:\n if not hasattr(self, 'displayed'):\n self.displayed = self._get_current_rotation_icon(all_outputs)\n\n screen = self.screen or all_outputs[0] if len(all_outputs) == 1 else 'ALL'\n full_text = self.format.format(icon=self.displayed or '?', screen=screen)\n\n response = {\n 'cached_until': time() + self.cache_timeout,\n 'full_text': full_text\n }\n\n # coloration\n if selected_screen_disconnected and not self.hide_if_disconnected:\n response['color'] = i3s_config['color_degraded']\n elif self.displayed == self._get_current_rotation_icon(all_outputs):\n response['color'] = i3s_config['color_good']\n\n return response\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Test this module by calling it directly.\n \"\"\"\n x = Py3status()\n config = {\n 'color_bad': '#FF0000',\n 'color_degraded': '#FFFF00',\n 'color_good': '#00FF00'\n }\n while True:\n print(x.xrandr_rotate([], config))\n sleep(1)\n", "path": "py3status/modules/xrandr_rotate.py"}]}
| 1,881 | 196 |
gh_patches_debug_24055
|
rasdani/github-patches
|
git_diff
|
conda__conda-3625
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
conda update icu (54.1-0 --> 56.1-4 conda-forge)
In a new installation, it appears that going from icu 54 to 56 will fail unless the following is done (at least on linux):
bash Anaconda2-4.2.0-Linux-x86_64.sh
conda remove icu
rm -r $HOME/anaconda2/lib/icu
conda install -c conda-forge icu=56.1
In other words, using the first and fourth lines alone fails with:
CondaOSError: OS error: failed to link (src=u'/home/anaconda2/pkgs/icu-56.1-4/lib/icu/current', dst='/home/anaconda2/lib/icu/current', type=3, error=OSError(17, 'File exists'))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda/common/disk.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import sys
5 from errno import EACCES, EEXIST, ENOENT, EPERM
6 from itertools import chain
7 from logging import getLogger
8 from os import W_OK, access, chmod, getpid, listdir, lstat, makedirs, rename, unlink, walk
9 from os.path import abspath, basename, dirname, isdir, join, lexists
10 from shutil import rmtree
11 from stat import S_IEXEC, S_IMODE, S_ISDIR, S_ISLNK, S_ISREG, S_IWRITE
12 from time import sleep
13 from uuid import uuid4
14
15 from ..compat import lchmod, text_type
16 from ..utils import on_win
17
18 __all__ = ["rm_rf", "exp_backoff_fn", "try_write"]
19
20 log = getLogger(__name__)
21
22
23 def try_write(dir_path, heavy=False):
24 """Test write access to a directory.
25
26 Args:
27 dir_path (str): directory to test write access
28 heavy (bool): Actually create and delete a file, or do a faster os.access test.
29 https://docs.python.org/dev/library/os.html?highlight=xattr#os.access
30
31 Returns:
32 bool
33
34 """
35 if not isdir(dir_path):
36 return False
37 if on_win or heavy:
38 # try to create a file to see if `dir_path` is writable, see #2151
39 temp_filename = join(dir_path, '.conda-try-write-%d' % getpid())
40 try:
41 with open(temp_filename, mode='wb') as fo:
42 fo.write(b'This is a test file.\n')
43 backoff_unlink(temp_filename)
44 return True
45 except (IOError, OSError):
46 return False
47 finally:
48 backoff_unlink(temp_filename)
49 else:
50 return access(dir_path, W_OK)
51
52
53 def backoff_unlink(file_or_symlink_path):
54 def _unlink(path):
55 make_writable(path)
56 unlink(path)
57
58 try:
59 exp_backoff_fn(lambda f: lexists(f) and _unlink(f), file_or_symlink_path)
60 except (IOError, OSError) as e:
61 if e.errno not in (ENOENT,):
62 # errno.ENOENT File not found error / No such file or directory
63 raise
64
65
66 def backoff_rmdir(dirpath):
67 if not isdir(dirpath):
68 return
69
70 # shutil.rmtree:
71 # if onerror is set, it is called to handle the error with arguments (func, path, exc_info)
72 # where func is os.listdir, os.remove, or os.rmdir;
73 # path is the argument to that function that caused it to fail; and
74 # exc_info is a tuple returned by sys.exc_info() ==> (type, value, traceback).
75 def retry(func, path, exc_info):
76 if getattr(exc_info[1], 'errno', None) == ENOENT:
77 return
78 recursive_make_writable(dirname(path))
79 func(path)
80
81 def _rmdir(path):
82 try:
83 recursive_make_writable(path)
84 exp_backoff_fn(rmtree, path, onerror=retry)
85 except (IOError, OSError) as e:
86 if e.errno == ENOENT:
87 log.debug("no such file or directory: %s", path)
88 else:
89 raise
90
91 for root, dirs, files in walk(dirpath, topdown=False):
92 for file in files:
93 backoff_unlink(join(root, file))
94 for dir in dirs:
95 _rmdir(join(root, dir))
96
97 _rmdir(dirpath)
98
99
100 def make_writable(path):
101 try:
102 mode = lstat(path).st_mode
103 if S_ISDIR(mode):
104 chmod(path, S_IMODE(mode) | S_IWRITE | S_IEXEC)
105 elif S_ISREG(mode):
106 chmod(path, S_IMODE(mode) | S_IWRITE)
107 elif S_ISLNK(mode):
108 lchmod(path, S_IMODE(mode) | S_IWRITE)
109 else:
110 log.debug("path cannot be made writable: %s", path)
111 except Exception as e:
112 eno = getattr(e, 'errno', None)
113 if eno in (ENOENT,):
114 log.debug("tried to make writable, but didn't exist: %s", path)
115 raise
116 elif eno in (EACCES, EPERM):
117 log.debug("tried make writable but failed: %s\n%r", path, e)
118 else:
119 log.warn("Error making path writable: %s\n%r", path, e)
120 raise
121
122
123 def recursive_make_writable(path):
124 # The need for this function was pointed out at
125 # https://github.com/conda/conda/issues/3266#issuecomment-239241915
126 # Especially on windows, file removal will often fail because it is marked read-only
127 if isdir(path):
128 for root, dirs, files in walk(path):
129 for path in chain.from_iterable((files, dirs)):
130 try:
131 exp_backoff_fn(make_writable, join(root, path))
132 except (IOError, OSError) as e:
133 if e.errno == ENOENT:
134 log.debug("no such file or directory: %s", path)
135 else:
136 raise
137 else:
138 exp_backoff_fn(make_writable, path)
139
140
141 def exp_backoff_fn(fn, *args, **kwargs):
142 """Mostly for retrying file operations that fail on Windows due to virus scanners"""
143 if not on_win:
144 return fn(*args, **kwargs)
145
146 import random
147 # with max_tries = 6, max total time ~= 3.2 sec
148 # with max_tries = 7, max total time ~= 6.5 sec
149 max_tries = 7
150 for n in range(max_tries):
151 try:
152 result = fn(*args, **kwargs)
153 except (OSError, IOError) as e:
154 log.debug(repr(e))
155 if e.errno in (EPERM, EACCES):
156 if n == max_tries-1:
157 raise
158 sleep_time = ((2 ** n) + random.random()) * 0.1
159 caller_frame = sys._getframe(1)
160 log.debug("retrying %s/%s %s() in %g sec",
161 basename(caller_frame.f_code.co_filename),
162 caller_frame.f_lineno, fn.__name__,
163 sleep_time)
164 sleep(sleep_time)
165 elif e.errno in (ENOENT,):
166 # errno.ENOENT File not found error / No such file or directory
167 raise
168 else:
169 log.warn("Uncaught backoff with errno %d", e.errno)
170 raise
171 else:
172 return result
173
174
175 def rm_rf(path, max_retries=5, trash=True):
176 """
177 Completely delete path
178 max_retries is the number of times to retry on failure. The default is 5. This only applies
179 to deleting a directory.
180 If removing path fails and trash is True, files will be moved to the trash directory.
181 """
182 try:
183 path = abspath(path)
184 log.debug("rm_rf %s", path)
185 if isdir(path):
186 try:
187 # On Windows, always move to trash first.
188 if trash and on_win:
189 move_result = move_path_to_trash(path, preclean=False)
190 if move_result:
191 return True
192 backoff_rmdir(path)
193 finally:
194 # If path was removed, ensure it's not in linked_data_
195 if not isdir(path):
196 from conda.install import delete_linked_data_any
197 delete_linked_data_any(path)
198 elif lexists(path):
199 try:
200 backoff_unlink(path)
201 return True
202 except (OSError, IOError) as e:
203 log.debug("%r errno %d\nCannot unlink %s.", e, e.errno, path)
204 if trash:
205 move_result = move_path_to_trash(path)
206 if move_result:
207 return True
208 log.info("Failed to remove %s.", path)
209
210 else:
211 log.debug("rm_rf failed. Not a link, file, or directory: %s", path)
212 return True
213 finally:
214 if lexists(path):
215 log.info("rm_rf failed for %s", path)
216 return False
217
218
219 def delete_trash(prefix=None):
220 from ..base.context import context
221 for pkg_dir in context.pkgs_dirs:
222 trash_dir = join(pkg_dir, '.trash')
223 if not lexists(trash_dir):
224 log.debug("Trash directory %s doesn't exist. Moving on.", trash_dir)
225 continue
226 log.debug("removing trash for %s", trash_dir)
227 for p in listdir(trash_dir):
228 path = join(trash_dir, p)
229 try:
230 if isdir(path):
231 backoff_rmdir(path)
232 else:
233 backoff_unlink(path)
234 except (IOError, OSError) as e:
235 log.info("Could not delete path in trash dir %s\n%r", path, e)
236 if listdir(trash_dir):
237 log.info("Unable to clean trash directory %s", trash_dir)
238
239
240 def move_to_trash(prefix, f, tempdir=None):
241 """
242 Move a file or folder f from prefix to the trash
243
244 tempdir is a deprecated parameter, and will be ignored.
245
246 This function is deprecated in favor of `move_path_to_trash`.
247 """
248 return move_path_to_trash(join(prefix, f) if f else prefix)
249
250
251 def move_path_to_trash(path, preclean=True):
252 """
253 Move a path to the trash
254 """
255 from ..base.context import context
256 for pkg_dir in context.pkgs_dirs:
257 trash_dir = join(pkg_dir, '.trash')
258
259 try:
260 makedirs(trash_dir)
261 except (IOError, OSError) as e1:
262 if e1.errno != EEXIST:
263 continue
264
265 trash_file = join(trash_dir, text_type(uuid4()))
266
267 try:
268 rename(path, trash_file)
269 except (IOError, OSError) as e:
270 log.debug("Could not move %s to %s.\n%r", path, trash_file, e)
271 else:
272 log.debug("Moved to trash: %s", path)
273 from ..install import delete_linked_data_any
274 delete_linked_data_any(path)
275 return True
276
277 return False
278
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/conda/common/disk.py b/conda/common/disk.py
--- a/conda/common/disk.py
+++ b/conda/common/disk.py
@@ -6,7 +6,7 @@
from itertools import chain
from logging import getLogger
from os import W_OK, access, chmod, getpid, listdir, lstat, makedirs, rename, unlink, walk
-from os.path import abspath, basename, dirname, isdir, join, lexists
+from os.path import abspath, basename, dirname, isdir, isfile, islink, join, lexists
from shutil import rmtree
from stat import S_IEXEC, S_IMODE, S_ISDIR, S_ISLNK, S_ISREG, S_IWRITE
from time import sleep
@@ -192,10 +192,10 @@
backoff_rmdir(path)
finally:
# If path was removed, ensure it's not in linked_data_
- if not isdir(path):
+ if islink(path) or isfile(path):
from conda.install import delete_linked_data_any
delete_linked_data_any(path)
- elif lexists(path):
+ if lexists(path):
try:
backoff_unlink(path)
return True
|
{"golden_diff": "diff --git a/conda/common/disk.py b/conda/common/disk.py\n--- a/conda/common/disk.py\n+++ b/conda/common/disk.py\n@@ -6,7 +6,7 @@\n from itertools import chain\n from logging import getLogger\n from os import W_OK, access, chmod, getpid, listdir, lstat, makedirs, rename, unlink, walk\n-from os.path import abspath, basename, dirname, isdir, join, lexists\n+from os.path import abspath, basename, dirname, isdir, isfile, islink, join, lexists\n from shutil import rmtree\n from stat import S_IEXEC, S_IMODE, S_ISDIR, S_ISLNK, S_ISREG, S_IWRITE\n from time import sleep\n@@ -192,10 +192,10 @@\n backoff_rmdir(path)\n finally:\n # If path was removed, ensure it's not in linked_data_\n- if not isdir(path):\n+ if islink(path) or isfile(path):\n from conda.install import delete_linked_data_any\n delete_linked_data_any(path)\n- elif lexists(path):\n+ if lexists(path):\n try:\n backoff_unlink(path)\n return True\n", "issue": "conda update icu (54.1-0 --> 56.1-4 conda-forge)\nIn a new installation, it appears that going from icu 54 to 56 will fail unless the following is done (at least on linux):\n\nbash Anaconda2-4.2.0-Linux-x86_64.sh\nconda remove icu\nrm -r $HOME/anaconda2/lib/icu\nconda install -c conda-forge icu=56.1\n\nIn other words, using the first and fourth lines alone fails with:\n\nCondaOSError: OS error: failed to link (src=u'/home/anaconda2/pkgs/icu-56.1-4/lib/icu/current', dst='/home/anaconda2/lib/icu/current', type=3, error=OSError(17, 'File exists'))\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport sys\nfrom errno import EACCES, EEXIST, ENOENT, EPERM\nfrom itertools import chain\nfrom logging import getLogger\nfrom os import W_OK, access, chmod, getpid, listdir, lstat, makedirs, rename, unlink, walk\nfrom os.path import abspath, basename, dirname, isdir, join, lexists\nfrom shutil import rmtree\nfrom stat import S_IEXEC, S_IMODE, S_ISDIR, S_ISLNK, S_ISREG, S_IWRITE\nfrom time import sleep\nfrom uuid import uuid4\n\nfrom ..compat import lchmod, text_type\nfrom ..utils import on_win\n\n__all__ = [\"rm_rf\", \"exp_backoff_fn\", \"try_write\"]\n\nlog = getLogger(__name__)\n\n\ndef try_write(dir_path, heavy=False):\n \"\"\"Test write access to a directory.\n\n Args:\n dir_path (str): directory to test write access\n heavy (bool): Actually create and delete a file, or do a faster os.access test.\n https://docs.python.org/dev/library/os.html?highlight=xattr#os.access\n\n Returns:\n bool\n\n \"\"\"\n if not isdir(dir_path):\n return False\n if on_win or heavy:\n # try to create a file to see if `dir_path` is writable, see #2151\n temp_filename = join(dir_path, '.conda-try-write-%d' % getpid())\n try:\n with open(temp_filename, mode='wb') as fo:\n fo.write(b'This is a test file.\\n')\n backoff_unlink(temp_filename)\n return True\n except (IOError, OSError):\n return False\n finally:\n backoff_unlink(temp_filename)\n else:\n return access(dir_path, W_OK)\n\n\ndef backoff_unlink(file_or_symlink_path):\n def _unlink(path):\n make_writable(path)\n unlink(path)\n\n try:\n exp_backoff_fn(lambda f: lexists(f) and _unlink(f), file_or_symlink_path)\n except (IOError, OSError) as e:\n if e.errno not in (ENOENT,):\n # errno.ENOENT File not found error / No such file or directory\n raise\n\n\ndef backoff_rmdir(dirpath):\n if not isdir(dirpath):\n return\n\n # shutil.rmtree:\n # if onerror is set, it is called to handle the error with arguments (func, path, exc_info)\n # where func is os.listdir, os.remove, or os.rmdir;\n # path is the argument to that function that caused it to fail; and\n # exc_info is a tuple returned by sys.exc_info() ==> (type, value, traceback).\n def retry(func, path, exc_info):\n if getattr(exc_info[1], 'errno', None) == ENOENT:\n return\n recursive_make_writable(dirname(path))\n func(path)\n\n def _rmdir(path):\n try:\n recursive_make_writable(path)\n exp_backoff_fn(rmtree, path, onerror=retry)\n except (IOError, OSError) as e:\n if e.errno == ENOENT:\n log.debug(\"no such file or directory: %s\", path)\n else:\n raise\n\n for root, dirs, files in walk(dirpath, topdown=False):\n for file in files:\n backoff_unlink(join(root, file))\n for dir in dirs:\n _rmdir(join(root, dir))\n\n _rmdir(dirpath)\n\n\ndef make_writable(path):\n try:\n mode = lstat(path).st_mode\n if S_ISDIR(mode):\n chmod(path, S_IMODE(mode) | S_IWRITE | S_IEXEC)\n elif S_ISREG(mode):\n chmod(path, S_IMODE(mode) | S_IWRITE)\n elif S_ISLNK(mode):\n lchmod(path, S_IMODE(mode) | S_IWRITE)\n else:\n log.debug(\"path cannot be made writable: %s\", path)\n except Exception as e:\n eno = getattr(e, 'errno', None)\n if eno in (ENOENT,):\n log.debug(\"tried to make writable, but didn't exist: %s\", path)\n raise\n elif eno in (EACCES, EPERM):\n log.debug(\"tried make writable but failed: %s\\n%r\", path, e)\n else:\n log.warn(\"Error making path writable: %s\\n%r\", path, e)\n raise\n\n\ndef recursive_make_writable(path):\n # The need for this function was pointed out at\n # https://github.com/conda/conda/issues/3266#issuecomment-239241915\n # Especially on windows, file removal will often fail because it is marked read-only\n if isdir(path):\n for root, dirs, files in walk(path):\n for path in chain.from_iterable((files, dirs)):\n try:\n exp_backoff_fn(make_writable, join(root, path))\n except (IOError, OSError) as e:\n if e.errno == ENOENT:\n log.debug(\"no such file or directory: %s\", path)\n else:\n raise\n else:\n exp_backoff_fn(make_writable, path)\n\n\ndef exp_backoff_fn(fn, *args, **kwargs):\n \"\"\"Mostly for retrying file operations that fail on Windows due to virus scanners\"\"\"\n if not on_win:\n return fn(*args, **kwargs)\n\n import random\n # with max_tries = 6, max total time ~= 3.2 sec\n # with max_tries = 7, max total time ~= 6.5 sec\n max_tries = 7\n for n in range(max_tries):\n try:\n result = fn(*args, **kwargs)\n except (OSError, IOError) as e:\n log.debug(repr(e))\n if e.errno in (EPERM, EACCES):\n if n == max_tries-1:\n raise\n sleep_time = ((2 ** n) + random.random()) * 0.1\n caller_frame = sys._getframe(1)\n log.debug(\"retrying %s/%s %s() in %g sec\",\n basename(caller_frame.f_code.co_filename),\n caller_frame.f_lineno, fn.__name__,\n sleep_time)\n sleep(sleep_time)\n elif e.errno in (ENOENT,):\n # errno.ENOENT File not found error / No such file or directory\n raise\n else:\n log.warn(\"Uncaught backoff with errno %d\", e.errno)\n raise\n else:\n return result\n\n\ndef rm_rf(path, max_retries=5, trash=True):\n \"\"\"\n Completely delete path\n max_retries is the number of times to retry on failure. The default is 5. This only applies\n to deleting a directory.\n If removing path fails and trash is True, files will be moved to the trash directory.\n \"\"\"\n try:\n path = abspath(path)\n log.debug(\"rm_rf %s\", path)\n if isdir(path):\n try:\n # On Windows, always move to trash first.\n if trash and on_win:\n move_result = move_path_to_trash(path, preclean=False)\n if move_result:\n return True\n backoff_rmdir(path)\n finally:\n # If path was removed, ensure it's not in linked_data_\n if not isdir(path):\n from conda.install import delete_linked_data_any\n delete_linked_data_any(path)\n elif lexists(path):\n try:\n backoff_unlink(path)\n return True\n except (OSError, IOError) as e:\n log.debug(\"%r errno %d\\nCannot unlink %s.\", e, e.errno, path)\n if trash:\n move_result = move_path_to_trash(path)\n if move_result:\n return True\n log.info(\"Failed to remove %s.\", path)\n\n else:\n log.debug(\"rm_rf failed. Not a link, file, or directory: %s\", path)\n return True\n finally:\n if lexists(path):\n log.info(\"rm_rf failed for %s\", path)\n return False\n\n\ndef delete_trash(prefix=None):\n from ..base.context import context\n for pkg_dir in context.pkgs_dirs:\n trash_dir = join(pkg_dir, '.trash')\n if not lexists(trash_dir):\n log.debug(\"Trash directory %s doesn't exist. Moving on.\", trash_dir)\n continue\n log.debug(\"removing trash for %s\", trash_dir)\n for p in listdir(trash_dir):\n path = join(trash_dir, p)\n try:\n if isdir(path):\n backoff_rmdir(path)\n else:\n backoff_unlink(path)\n except (IOError, OSError) as e:\n log.info(\"Could not delete path in trash dir %s\\n%r\", path, e)\n if listdir(trash_dir):\n log.info(\"Unable to clean trash directory %s\", trash_dir)\n\n\ndef move_to_trash(prefix, f, tempdir=None):\n \"\"\"\n Move a file or folder f from prefix to the trash\n\n tempdir is a deprecated parameter, and will be ignored.\n\n This function is deprecated in favor of `move_path_to_trash`.\n \"\"\"\n return move_path_to_trash(join(prefix, f) if f else prefix)\n\n\ndef move_path_to_trash(path, preclean=True):\n \"\"\"\n Move a path to the trash\n \"\"\"\n from ..base.context import context\n for pkg_dir in context.pkgs_dirs:\n trash_dir = join(pkg_dir, '.trash')\n\n try:\n makedirs(trash_dir)\n except (IOError, OSError) as e1:\n if e1.errno != EEXIST:\n continue\n\n trash_file = join(trash_dir, text_type(uuid4()))\n\n try:\n rename(path, trash_file)\n except (IOError, OSError) as e:\n log.debug(\"Could not move %s to %s.\\n%r\", path, trash_file, e)\n else:\n log.debug(\"Moved to trash: %s\", path)\n from ..install import delete_linked_data_any\n delete_linked_data_any(path)\n return True\n\n return False\n", "path": "conda/common/disk.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport sys\nfrom errno import EACCES, EEXIST, ENOENT, EPERM\nfrom itertools import chain\nfrom logging import getLogger\nfrom os import W_OK, access, chmod, getpid, listdir, lstat, makedirs, rename, unlink, walk\nfrom os.path import abspath, basename, dirname, isdir, isfile, islink, join, lexists\nfrom shutil import rmtree\nfrom stat import S_IEXEC, S_IMODE, S_ISDIR, S_ISLNK, S_ISREG, S_IWRITE\nfrom time import sleep\nfrom uuid import uuid4\n\nfrom ..compat import lchmod, text_type\nfrom ..utils import on_win\n\n__all__ = [\"rm_rf\", \"exp_backoff_fn\", \"try_write\"]\n\nlog = getLogger(__name__)\n\n\ndef try_write(dir_path, heavy=False):\n \"\"\"Test write access to a directory.\n\n Args:\n dir_path (str): directory to test write access\n heavy (bool): Actually create and delete a file, or do a faster os.access test.\n https://docs.python.org/dev/library/os.html?highlight=xattr#os.access\n\n Returns:\n bool\n\n \"\"\"\n if not isdir(dir_path):\n return False\n if on_win or heavy:\n # try to create a file to see if `dir_path` is writable, see #2151\n temp_filename = join(dir_path, '.conda-try-write-%d' % getpid())\n try:\n with open(temp_filename, mode='wb') as fo:\n fo.write(b'This is a test file.\\n')\n backoff_unlink(temp_filename)\n return True\n except (IOError, OSError):\n return False\n finally:\n backoff_unlink(temp_filename)\n else:\n return access(dir_path, W_OK)\n\n\ndef backoff_unlink(file_or_symlink_path):\n def _unlink(path):\n make_writable(path)\n unlink(path)\n\n try:\n exp_backoff_fn(lambda f: lexists(f) and _unlink(f), file_or_symlink_path)\n except (IOError, OSError) as e:\n if e.errno not in (ENOENT,):\n # errno.ENOENT File not found error / No such file or directory\n raise\n\n\ndef backoff_rmdir(dirpath):\n if not isdir(dirpath):\n return\n\n # shutil.rmtree:\n # if onerror is set, it is called to handle the error with arguments (func, path, exc_info)\n # where func is os.listdir, os.remove, or os.rmdir;\n # path is the argument to that function that caused it to fail; and\n # exc_info is a tuple returned by sys.exc_info() ==> (type, value, traceback).\n def retry(func, path, exc_info):\n if getattr(exc_info[1], 'errno', None) == ENOENT:\n return\n recursive_make_writable(dirname(path))\n func(path)\n\n def _rmdir(path):\n try:\n recursive_make_writable(path)\n exp_backoff_fn(rmtree, path, onerror=retry)\n except (IOError, OSError) as e:\n if e.errno == ENOENT:\n log.debug(\"no such file or directory: %s\", path)\n else:\n raise\n\n for root, dirs, files in walk(dirpath, topdown=False):\n for file in files:\n backoff_unlink(join(root, file))\n for dir in dirs:\n _rmdir(join(root, dir))\n\n _rmdir(dirpath)\n\n\ndef make_writable(path):\n try:\n mode = lstat(path).st_mode\n if S_ISDIR(mode):\n chmod(path, S_IMODE(mode) | S_IWRITE | S_IEXEC)\n elif S_ISREG(mode):\n chmod(path, S_IMODE(mode) | S_IWRITE)\n elif S_ISLNK(mode):\n lchmod(path, S_IMODE(mode) | S_IWRITE)\n else:\n log.debug(\"path cannot be made writable: %s\", path)\n except Exception as e:\n eno = getattr(e, 'errno', None)\n if eno in (ENOENT,):\n log.debug(\"tried to make writable, but didn't exist: %s\", path)\n raise\n elif eno in (EACCES, EPERM):\n log.debug(\"tried make writable but failed: %s\\n%r\", path, e)\n else:\n log.warn(\"Error making path writable: %s\\n%r\", path, e)\n raise\n\n\ndef recursive_make_writable(path):\n # The need for this function was pointed out at\n # https://github.com/conda/conda/issues/3266#issuecomment-239241915\n # Especially on windows, file removal will often fail because it is marked read-only\n if isdir(path):\n for root, dirs, files in walk(path):\n for path in chain.from_iterable((files, dirs)):\n try:\n exp_backoff_fn(make_writable, join(root, path))\n except (IOError, OSError) as e:\n if e.errno == ENOENT:\n log.debug(\"no such file or directory: %s\", path)\n else:\n raise\n else:\n exp_backoff_fn(make_writable, path)\n\n\ndef exp_backoff_fn(fn, *args, **kwargs):\n \"\"\"Mostly for retrying file operations that fail on Windows due to virus scanners\"\"\"\n if not on_win:\n return fn(*args, **kwargs)\n\n import random\n # with max_tries = 6, max total time ~= 3.2 sec\n # with max_tries = 7, max total time ~= 6.5 sec\n max_tries = 7\n for n in range(max_tries):\n try:\n result = fn(*args, **kwargs)\n except (OSError, IOError) as e:\n log.debug(repr(e))\n if e.errno in (EPERM, EACCES):\n if n == max_tries-1:\n raise\n sleep_time = ((2 ** n) + random.random()) * 0.1\n caller_frame = sys._getframe(1)\n log.debug(\"retrying %s/%s %s() in %g sec\",\n basename(caller_frame.f_code.co_filename),\n caller_frame.f_lineno, fn.__name__,\n sleep_time)\n sleep(sleep_time)\n elif e.errno in (ENOENT,):\n # errno.ENOENT File not found error / No such file or directory\n raise\n else:\n log.warn(\"Uncaught backoff with errno %d\", e.errno)\n raise\n else:\n return result\n\n\ndef rm_rf(path, max_retries=5, trash=True):\n \"\"\"\n Completely delete path\n max_retries is the number of times to retry on failure. The default is 5. This only applies\n to deleting a directory.\n If removing path fails and trash is True, files will be moved to the trash directory.\n \"\"\"\n try:\n path = abspath(path)\n log.debug(\"rm_rf %s\", path)\n if isdir(path):\n try:\n # On Windows, always move to trash first.\n if trash and on_win:\n move_result = move_path_to_trash(path, preclean=False)\n if move_result:\n return True\n backoff_rmdir(path)\n finally:\n # If path was removed, ensure it's not in linked_data_\n if islink(path) or isfile(path):\n from conda.install import delete_linked_data_any\n delete_linked_data_any(path)\n if lexists(path):\n try:\n backoff_unlink(path)\n return True\n except (OSError, IOError) as e:\n log.debug(\"%r errno %d\\nCannot unlink %s.\", e, e.errno, path)\n if trash:\n move_result = move_path_to_trash(path)\n if move_result:\n return True\n log.info(\"Failed to remove %s.\", path)\n\n else:\n log.debug(\"rm_rf failed. Not a link, file, or directory: %s\", path)\n return True\n finally:\n if lexists(path):\n log.info(\"rm_rf failed for %s\", path)\n return False\n\n\ndef delete_trash(prefix=None):\n from ..base.context import context\n for pkg_dir in context.pkgs_dirs:\n trash_dir = join(pkg_dir, '.trash')\n if not lexists(trash_dir):\n log.debug(\"Trash directory %s doesn't exist. Moving on.\", trash_dir)\n continue\n log.debug(\"removing trash for %s\", trash_dir)\n for p in listdir(trash_dir):\n path = join(trash_dir, p)\n try:\n if isdir(path):\n backoff_rmdir(path)\n else:\n backoff_unlink(path)\n except (IOError, OSError) as e:\n log.info(\"Could not delete path in trash dir %s\\n%r\", path, e)\n if listdir(trash_dir):\n log.info(\"Unable to clean trash directory %s\", trash_dir)\n\n\ndef move_to_trash(prefix, f, tempdir=None):\n \"\"\"\n Move a file or folder f from prefix to the trash\n\n tempdir is a deprecated parameter, and will be ignored.\n\n This function is deprecated in favor of `move_path_to_trash`.\n \"\"\"\n return move_path_to_trash(join(prefix, f) if f else prefix)\n\n\ndef move_path_to_trash(path, preclean=True):\n \"\"\"\n Move a path to the trash\n \"\"\"\n from ..base.context import context\n for pkg_dir in context.pkgs_dirs:\n trash_dir = join(pkg_dir, '.trash')\n\n try:\n makedirs(trash_dir)\n except (IOError, OSError) as e1:\n if e1.errno != EEXIST:\n continue\n\n trash_file = join(trash_dir, text_type(uuid4()))\n\n try:\n rename(path, trash_file)\n except (IOError, OSError) as e:\n log.debug(\"Could not move %s to %s.\\n%r\", path, trash_file, e)\n else:\n log.debug(\"Moved to trash: %s\", path)\n from ..install import delete_linked_data_any\n delete_linked_data_any(path)\n return True\n\n return False\n", "path": "conda/common/disk.py"}]}
| 3,466 | 274 |
gh_patches_debug_58007
|
rasdani/github-patches
|
git_diff
|
CiviWiki__OpenCiviWiki-1390
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
enable django-linear-migrationss
## Task
- [ ] add `django-linear-migrations` with the following command `poetry add django-linear-migrations --group dev`
- [ ] follow the rest of the [django-linear-migrations configuration instructions](https://github.com/adamchainz/django-linear-migrations#installation)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `project/core/settings.py`
Content:
```
1 """
2 Django settings for civiwiki project.
3 Darius Calliet May 12, 2016
4
5 Production settings file to select proper environment variables.
6 """
7 import os
8
9 # False if not in os.environ
10 DEBUG = os.getenv("DEBUG", False)
11
12 # defaults to second value if not found in os.environ
13 DJANGO_HOST = os.getenv("DJANGO_HOST", "LOCALHOST")
14
15 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
16 SECRET_KEY = os.getenv("DJANGO_SECRET_KEY", "TEST_KEY_FOR_DEVELOPMENT")
17 ALLOWED_HOSTS = [".herokuapp.com", ".civiwiki.org", "127.0.0.1", "localhost", "0.0.0.0"]
18
19 INSTALLED_APPS = (
20 "django.contrib.admin",
21 "django.contrib.auth",
22 "django.contrib.contenttypes",
23 "django.contrib.sessions",
24 "django.contrib.messages",
25 "django.contrib.staticfiles",
26 "django_extensions",
27 "storages",
28 "core",
29 "rest_framework",
30 "accounts.apps.AccountsConfig",
31 "threads",
32 "notifications",
33 "corsheaders",
34 "taggit",
35 "categories",
36 "notification",
37 "debug_toolbar",
38 )
39
40 MIDDLEWARE = [
41 "debug_toolbar.middleware.DebugToolbarMiddleware",
42 "corsheaders.middleware.CorsMiddleware",
43 "django.middleware.security.SecurityMiddleware",
44 "whitenoise.middleware.WhiteNoiseMiddleware",
45 "django.contrib.sessions.middleware.SessionMiddleware",
46 "django.middleware.common.CommonMiddleware",
47 "django.middleware.csrf.CsrfViewMiddleware",
48 "django.contrib.auth.middleware.AuthenticationMiddleware",
49 # 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
50 "django.contrib.messages.middleware.MessageMiddleware",
51 "django.middleware.clickjacking.XFrameOptionsMiddleware",
52 ]
53
54 INTERNAL_IPS = [
55 "127.0.0.1",
56 ]
57
58 CSRF_USE_SESSIONS = (
59 True # Store the CSRF token in the users session instead of in a cookie
60 )
61
62 CORS_ORIGIN_ALLOW_ALL = True
63 ROOT_URLCONF = "core.urls"
64
65 # SSL Setup
66 if DJANGO_HOST != "LOCALHOST":
67 SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
68 SECURE_SSL_REDIRECT = True
69 SESSION_COOKIE_SECURE = True
70 CSRF_COOKIE_SECURE = True
71
72 # Internationalization & Localization
73 LANGUAGE_CODE = "en-us"
74 TIME_ZONE = "UTC"
75 USE_I18N = True
76 USE_L10N = True
77 USE_TZ = True
78
79 TEMPLATES = [
80 {
81 "BACKEND": "django.template.backends.django.DjangoTemplates",
82 "DIRS": [
83 os.path.join(BASE_DIR, "threads/templates/threads"),
84 os.path.join(BASE_DIR, "accounts/templates/accounts"),
85 ], # TODO: Add non-webapp template directory
86 "APP_DIRS": True,
87 "OPTIONS": {
88 "context_processors": [
89 "django.template.context_processors.debug",
90 "django.template.context_processors.request",
91 "django.contrib.auth.context_processors.auth",
92 "django.contrib.messages.context_processors.messages",
93 ],
94 },
95 },
96 ]
97
98 WSGI_APPLICATION = "core.wsgi.application"
99
100 # Apex Contact for Production Errors
101 ADMINS = [("Development Team", "[email protected]")]
102
103 STATIC_URL = "/static/"
104 STATICFILES_DIRS = (os.path.join(BASE_DIR, "core/templates/static"),)
105 STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
106
107 MEDIA_ROOT = os.path.join(BASE_DIR, "media")
108 MEDIA_URL = "/media/"
109
110 # TODO: re-organize and simplify staticfiles settings
111 if "CIVIWIKI_LOCAL_NAME" not in os.environ:
112 STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
113
114 # Use DATABASE_URL in production
115 DATABASE_URL = os.getenv("DATABASE_URL")
116
117 if DATABASE_URL is not None:
118 DATABASES = {"default": DATABASE_URL}
119 else:
120 # Default to sqlite for simplicity in development
121 DATABASES = {
122 "default": {
123 "ENGINE": "django.db.backends.sqlite3",
124 "NAME": BASE_DIR + "/" + "db.sqlite3",
125 }
126 }
127
128 # Email Backend Setup
129 if "EMAIL_HOST" not in os.environ:
130 EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
131 EMAIL_HOST_USER = "[email protected]"
132 else:
133 EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
134 EMAIL_HOST = os.getenv("EMAIL_HOST")
135 EMAIL_PORT = os.getenv("EMAIL_PORT")
136 EMAIL_HOST_USER = os.getenv("EMAIL_HOST_USER")
137 EMAIL_HOST_PASSWORD = os.getenv("EMAIL_HOST_PASSWORD")
138 EMAIL_USE_SSL = True
139 DEFAULT_FROM_EMAIL = EMAIL_HOST
140
141 # Notification API Settings
142 NOTIFICATIONS_SOFT_DELETE = True
143 NOTIFICATIONS_USE_JSONFIELD = True
144
145 # Django REST API Settings
146 DEFAULT_RENDERER_CLASSES = ("rest_framework.renderers.JSONRenderer",)
147
148 if DEBUG:
149 # Browsable HTML - Enabled only in Debug mode (dev)
150 DEFAULT_RENDERER_CLASSES = DEFAULT_RENDERER_CLASSES + (
151 "rest_framework.renderers.BrowsableAPIRenderer",
152 )
153
154 REST_FRAMEWORK = {
155 "DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.IsAuthenticated",),
156 "DEFAULT_RENDERER_CLASSES": DEFAULT_RENDERER_CLASSES,
157 "DEFAULT_AUTHENTICATION_CLASSES": (
158 "rest_framework.authentication.BasicAuthentication",
159 "rest_framework.authentication.SessionAuthentication",
160 ),
161 }
162
163 # CORS Settings
164 CORS_ORIGIN_ALLOW_ALL = True
165
166 # Custom User model
167 AUTH_USER_MODEL = "accounts.User"
168
169 DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
170
171 # Login Logout URLS
172 LOGIN_URL = "login/"
173 LOGIN_REDIRECT_URL = "/"
174 LOGOUT_REDIRECT_URL = "/"
175
176 AUTH_PASSWORD_VALIDATORS = [
177 {
178 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator", # noqa: E501
179 },
180 {
181 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
182 "OPTIONS": {
183 "min_length": 4,
184 },
185 },
186 {
187 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
188 },
189 {
190 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
191 },
192 ]
193
194 LOGGING = {
195 "version": 1,
196 "disable_existing_loggers": False,
197 "formatters": {"rich": {"datefmt": "[%X]"}},
198 "handlers": {
199 "console": {
200 "class": "rich.logging.RichHandler",
201 "formatter": "rich",
202 "level": "WARNING",
203 # "filters": ["require_debug_true"],
204 "rich_tracebacks": True,
205 "tracebacks_show_locals": True,
206 }
207 },
208 "loggers": {"django": {"handlers": ["console"]}},
209 }
210
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/project/core/settings.py b/project/core/settings.py
--- a/project/core/settings.py
+++ b/project/core/settings.py
@@ -24,6 +24,7 @@
"django.contrib.messages",
"django.contrib.staticfiles",
"django_extensions",
+ "django_linear_migrations",
"storages",
"core",
"rest_framework",
@@ -73,7 +74,6 @@
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
-USE_L10N = True
USE_TZ = True
TEMPLATES = [
|
{"golden_diff": "diff --git a/project/core/settings.py b/project/core/settings.py\n--- a/project/core/settings.py\n+++ b/project/core/settings.py\n@@ -24,6 +24,7 @@\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django_extensions\",\n+ \"django_linear_migrations\",\n \"storages\",\n \"core\",\n \"rest_framework\",\n@@ -73,7 +74,6 @@\n LANGUAGE_CODE = \"en-us\"\n TIME_ZONE = \"UTC\"\n USE_I18N = True\n-USE_L10N = True\n USE_TZ = True\n \n TEMPLATES = [\n", "issue": "enable django-linear-migrationss\n## Task\r\n\r\n- [ ] add `django-linear-migrations` with the following command `poetry add django-linear-migrations --group dev`\r\n- [ ] follow the rest of the [django-linear-migrations configuration instructions](https://github.com/adamchainz/django-linear-migrations#installation)\n", "before_files": [{"content": "\"\"\"\nDjango settings for civiwiki project.\nDarius Calliet May 12, 2016\n\nProduction settings file to select proper environment variables.\n\"\"\"\nimport os\n\n# False if not in os.environ\nDEBUG = os.getenv(\"DEBUG\", False)\n\n# defaults to second value if not found in os.environ\nDJANGO_HOST = os.getenv(\"DJANGO_HOST\", \"LOCALHOST\")\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nSECRET_KEY = os.getenv(\"DJANGO_SECRET_KEY\", \"TEST_KEY_FOR_DEVELOPMENT\")\nALLOWED_HOSTS = [\".herokuapp.com\", \".civiwiki.org\", \"127.0.0.1\", \"localhost\", \"0.0.0.0\"]\n\nINSTALLED_APPS = (\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django_extensions\",\n \"storages\",\n \"core\",\n \"rest_framework\",\n \"accounts.apps.AccountsConfig\",\n \"threads\",\n \"notifications\",\n \"corsheaders\",\n \"taggit\",\n \"categories\",\n \"notification\",\n \"debug_toolbar\",\n)\n\nMIDDLEWARE = [\n \"debug_toolbar.middleware.DebugToolbarMiddleware\",\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n # 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nINTERNAL_IPS = [\n \"127.0.0.1\",\n]\n\nCSRF_USE_SESSIONS = (\n True # Store the CSRF token in the users session instead of in a cookie\n)\n\nCORS_ORIGIN_ALLOW_ALL = True\nROOT_URLCONF = \"core.urls\"\n\n# SSL Setup\nif DJANGO_HOST != \"LOCALHOST\":\n SECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n SECURE_SSL_REDIRECT = True\n SESSION_COOKIE_SECURE = True\n CSRF_COOKIE_SECURE = True\n\n# Internationalization & Localization\nLANGUAGE_CODE = \"en-us\"\nTIME_ZONE = \"UTC\"\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\n os.path.join(BASE_DIR, \"threads/templates/threads\"),\n os.path.join(BASE_DIR, \"accounts/templates/accounts\"),\n ], # TODO: Add non-webapp template directory\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"core.wsgi.application\"\n\n# Apex Contact for Production Errors\nADMINS = [(\"Development Team\", \"[email protected]\")]\n\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = (os.path.join(BASE_DIR, \"core/templates/static\"),)\nSTATIC_ROOT = os.path.join(BASE_DIR, \"staticfiles\")\n\nMEDIA_ROOT = os.path.join(BASE_DIR, \"media\")\nMEDIA_URL = \"/media/\"\n\n# TODO: re-organize and simplify staticfiles settings\nif \"CIVIWIKI_LOCAL_NAME\" not in os.environ:\n STATICFILES_STORAGE = \"whitenoise.storage.CompressedManifestStaticFilesStorage\"\n\n# Use DATABASE_URL in production\nDATABASE_URL = os.getenv(\"DATABASE_URL\")\n\nif DATABASE_URL is not None:\n DATABASES = {\"default\": DATABASE_URL}\nelse:\n # Default to sqlite for simplicity in development\n DATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": BASE_DIR + \"/\" + \"db.sqlite3\",\n }\n }\n\n# Email Backend Setup\nif \"EMAIL_HOST\" not in os.environ:\n EMAIL_BACKEND = \"django.core.mail.backends.console.EmailBackend\"\n EMAIL_HOST_USER = \"[email protected]\"\nelse:\n EMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\n EMAIL_HOST = os.getenv(\"EMAIL_HOST\")\n EMAIL_PORT = os.getenv(\"EMAIL_PORT\")\n EMAIL_HOST_USER = os.getenv(\"EMAIL_HOST_USER\")\n EMAIL_HOST_PASSWORD = os.getenv(\"EMAIL_HOST_PASSWORD\")\n EMAIL_USE_SSL = True\n DEFAULT_FROM_EMAIL = EMAIL_HOST\n\n# Notification API Settings\nNOTIFICATIONS_SOFT_DELETE = True\nNOTIFICATIONS_USE_JSONFIELD = True\n\n# Django REST API Settings\nDEFAULT_RENDERER_CLASSES = (\"rest_framework.renderers.JSONRenderer\",)\n\nif DEBUG:\n # Browsable HTML - Enabled only in Debug mode (dev)\n DEFAULT_RENDERER_CLASSES = DEFAULT_RENDERER_CLASSES + (\n \"rest_framework.renderers.BrowsableAPIRenderer\",\n )\n\nREST_FRAMEWORK = {\n \"DEFAULT_PERMISSION_CLASSES\": (\"rest_framework.permissions.IsAuthenticated\",),\n \"DEFAULT_RENDERER_CLASSES\": DEFAULT_RENDERER_CLASSES,\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"rest_framework.authentication.BasicAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n ),\n}\n\n# CORS Settings\nCORS_ORIGIN_ALLOW_ALL = True\n\n# Custom User model\nAUTH_USER_MODEL = \"accounts.User\"\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\n# Login Logout URLS\nLOGIN_URL = \"login/\"\nLOGIN_REDIRECT_URL = \"/\"\nLOGOUT_REDIRECT_URL = \"/\"\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\", # noqa: E501\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n \"OPTIONS\": {\n \"min_length\": 4,\n },\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\"rich\": {\"datefmt\": \"[%X]\"}},\n \"handlers\": {\n \"console\": {\n \"class\": \"rich.logging.RichHandler\",\n \"formatter\": \"rich\",\n \"level\": \"WARNING\",\n # \"filters\": [\"require_debug_true\"],\n \"rich_tracebacks\": True,\n \"tracebacks_show_locals\": True,\n }\n },\n \"loggers\": {\"django\": {\"handlers\": [\"console\"]}},\n}\n", "path": "project/core/settings.py"}], "after_files": [{"content": "\"\"\"\nDjango settings for civiwiki project.\nDarius Calliet May 12, 2016\n\nProduction settings file to select proper environment variables.\n\"\"\"\nimport os\n\n# False if not in os.environ\nDEBUG = os.getenv(\"DEBUG\", False)\n\n# defaults to second value if not found in os.environ\nDJANGO_HOST = os.getenv(\"DJANGO_HOST\", \"LOCALHOST\")\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nSECRET_KEY = os.getenv(\"DJANGO_SECRET_KEY\", \"TEST_KEY_FOR_DEVELOPMENT\")\nALLOWED_HOSTS = [\".herokuapp.com\", \".civiwiki.org\", \"127.0.0.1\", \"localhost\", \"0.0.0.0\"]\n\nINSTALLED_APPS = (\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django_extensions\",\n \"django_linear_migrations\",\n \"storages\",\n \"core\",\n \"rest_framework\",\n \"accounts.apps.AccountsConfig\",\n \"threads\",\n \"notifications\",\n \"corsheaders\",\n \"taggit\",\n \"categories\",\n \"notification\",\n \"debug_toolbar\",\n)\n\nMIDDLEWARE = [\n \"debug_toolbar.middleware.DebugToolbarMiddleware\",\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n # 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nINTERNAL_IPS = [\n \"127.0.0.1\",\n]\n\nCSRF_USE_SESSIONS = (\n True # Store the CSRF token in the users session instead of in a cookie\n)\n\nCORS_ORIGIN_ALLOW_ALL = True\nROOT_URLCONF = \"core.urls\"\n\n# SSL Setup\nif DJANGO_HOST != \"LOCALHOST\":\n SECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n SECURE_SSL_REDIRECT = True\n SESSION_COOKIE_SECURE = True\n CSRF_COOKIE_SECURE = True\n\n# Internationalization & Localization\nLANGUAGE_CODE = \"en-us\"\nTIME_ZONE = \"UTC\"\nUSE_I18N = True\nUSE_TZ = True\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\n os.path.join(BASE_DIR, \"threads/templates/threads\"),\n os.path.join(BASE_DIR, \"accounts/templates/accounts\"),\n ], # TODO: Add non-webapp template directory\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"core.wsgi.application\"\n\n# Apex Contact for Production Errors\nADMINS = [(\"Development Team\", \"[email protected]\")]\n\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = (os.path.join(BASE_DIR, \"core/templates/static\"),)\nSTATIC_ROOT = os.path.join(BASE_DIR, \"staticfiles\")\n\nMEDIA_ROOT = os.path.join(BASE_DIR, \"media\")\nMEDIA_URL = \"/media/\"\n\n# TODO: re-organize and simplify staticfiles settings\nif \"CIVIWIKI_LOCAL_NAME\" not in os.environ:\n STATICFILES_STORAGE = \"whitenoise.storage.CompressedManifestStaticFilesStorage\"\n\n# Use DATABASE_URL in production\nDATABASE_URL = os.getenv(\"DATABASE_URL\")\n\nif DATABASE_URL is not None:\n DATABASES = {\"default\": DATABASE_URL}\nelse:\n # Default to sqlite for simplicity in development\n DATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": BASE_DIR + \"/\" + \"db.sqlite3\",\n }\n }\n\n# Email Backend Setup\nif \"EMAIL_HOST\" not in os.environ:\n EMAIL_BACKEND = \"django.core.mail.backends.console.EmailBackend\"\n EMAIL_HOST_USER = \"[email protected]\"\nelse:\n EMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\n EMAIL_HOST = os.getenv(\"EMAIL_HOST\")\n EMAIL_PORT = os.getenv(\"EMAIL_PORT\")\n EMAIL_HOST_USER = os.getenv(\"EMAIL_HOST_USER\")\n EMAIL_HOST_PASSWORD = os.getenv(\"EMAIL_HOST_PASSWORD\")\n EMAIL_USE_SSL = True\n DEFAULT_FROM_EMAIL = EMAIL_HOST\n\n# Notification API Settings\nNOTIFICATIONS_SOFT_DELETE = True\nNOTIFICATIONS_USE_JSONFIELD = True\n\n# Django REST API Settings\nDEFAULT_RENDERER_CLASSES = (\"rest_framework.renderers.JSONRenderer\",)\n\nif DEBUG:\n # Browsable HTML - Enabled only in Debug mode (dev)\n DEFAULT_RENDERER_CLASSES = DEFAULT_RENDERER_CLASSES + (\n \"rest_framework.renderers.BrowsableAPIRenderer\",\n )\n\nREST_FRAMEWORK = {\n \"DEFAULT_PERMISSION_CLASSES\": (\"rest_framework.permissions.IsAuthenticated\",),\n \"DEFAULT_RENDERER_CLASSES\": DEFAULT_RENDERER_CLASSES,\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"rest_framework.authentication.BasicAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n ),\n}\n\n# CORS Settings\nCORS_ORIGIN_ALLOW_ALL = True\n\n# Custom User model\nAUTH_USER_MODEL = \"accounts.User\"\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\n# Login Logout URLS\nLOGIN_URL = \"login/\"\nLOGIN_REDIRECT_URL = \"/\"\nLOGOUT_REDIRECT_URL = \"/\"\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\", # noqa: E501\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n \"OPTIONS\": {\n \"min_length\": 4,\n },\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\"rich\": {\"datefmt\": \"[%X]\"}},\n \"handlers\": {\n \"console\": {\n \"class\": \"rich.logging.RichHandler\",\n \"formatter\": \"rich\",\n \"level\": \"WARNING\",\n # \"filters\": [\"require_debug_true\"],\n \"rich_tracebacks\": True,\n \"tracebacks_show_locals\": True,\n }\n },\n \"loggers\": {\"django\": {\"handlers\": [\"console\"]}},\n}\n", "path": "project/core/settings.py"}]}
| 2,271 | 133 |
gh_patches_debug_17951
|
rasdani/github-patches
|
git_diff
|
wemake-services__wemake-python-styleguide-243
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Forbid to compare the identical variables
# Rule request
<!-- Thanks for coming up with a proposal! -->
## Thesis
We need to find cases like this: `if some_value == some_value:`.
We also need to find other operators: `is`, `is not`, `>`, `<`, `!=`, `>=`, `<=`.
## Reasoning
It clearly does not make any sense. It is a bug in the source code.
Related: https://help.semmle.com/wiki/display/PYTHON/Comparison+of+identical+values
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wemake_python_styleguide/visitors/ast/comparisons.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 import ast
4 from typing import ClassVar, Sequence
5
6 from wemake_python_styleguide.types import AnyNodes
7 from wemake_python_styleguide.violations.consistency import (
8 ComparisonOrderViolation,
9 ConstantComparisonViolation,
10 MultipleInComparisonViolation,
11 )
12 from wemake_python_styleguide.visitors.base import BaseNodeVisitor
13
14
15 class ConstantComparisonVisitor(BaseNodeVisitor):
16 """Restricts the comparison of literals."""
17
18 def _check_is_literal(self, node: ast.AST) -> bool:
19 """
20 Checks for nodes that contains only constants.
21
22 If the node contains only literals it will be evaluted.
23 When node relies on some other names, it won't be evaluted.
24 """
25 try:
26 ast.literal_eval(node)
27 except ValueError:
28 return False
29 else:
30 return True
31
32 def _check_literal_compare(self, node: ast.Compare) -> None:
33 last_was_literal = self._check_is_literal(node.left)
34 for comparator in node.comparators:
35 next_is_literal = self._check_is_literal(comparator)
36 if last_was_literal and next_is_literal:
37 self.add_violation(ConstantComparisonViolation(node))
38 break
39 last_was_literal = next_is_literal
40
41 def visit_Compare(self, node: ast.Compare) -> None:
42 """
43 Ensures that compares are written correctly.
44
45 Raises:
46 ConstantComparisonViolation
47
48 """
49 self._check_literal_compare(node)
50 self.generic_visit(node)
51
52
53 class WrongOrderVisitor(BaseNodeVisitor):
54 """Restricts comparision where argument doesn't come first."""
55
56 _allowed_left_nodes: ClassVar[AnyNodes] = (
57 ast.Name,
58 ast.Call,
59 ast.Attribute,
60 )
61
62 _special_cases: ClassVar[AnyNodes] = (
63 ast.In,
64 ast.NotIn,
65 )
66
67 def _is_special_case(self, node: ast.Compare) -> bool:
68 """
69 Operators ``in`` and ``not in`` are special cases.
70
71 Why? Because it is perfectly fine to use something like:
72 ``if 'key' in some_dict: ...``
73 This should not be an issue.
74
75 When there are multiple special operators it is still a separate issue.
76 """
77 return isinstance(node.ops[0], self._special_cases)
78
79 def _is_left_node_valid(self, left: ast.AST) -> bool:
80 if isinstance(left, self._allowed_left_nodes):
81 return True
82 if isinstance(left, ast.BinOp):
83 return (
84 self._is_left_node_valid(left.left) or
85 self._is_left_node_valid(left.right)
86 )
87 return False
88
89 def _has_wrong_nodes_on_the_right(
90 self,
91 comparators: Sequence[ast.AST],
92 ) -> bool:
93 for right in comparators:
94 if isinstance(right, self._allowed_left_nodes):
95 return True
96 if isinstance(right, ast.BinOp):
97 return self._has_wrong_nodes_on_the_right([
98 right.left, right.right,
99 ])
100 return False
101
102 def _check_ordering(self, node: ast.Compare) -> None:
103 if self._is_left_node_valid(node.left):
104 return
105
106 if self._is_special_case(node):
107 return
108
109 if len(node.comparators) > 1:
110 return
111
112 if not self._has_wrong_nodes_on_the_right(node.comparators):
113 return
114
115 self.add_violation(ComparisonOrderViolation(node))
116
117 def visit_Compare(self, node: ast.Compare) -> None:
118 """
119 Forbids comparision where argument doesn't come first.
120
121 Raises:
122 ComparisonOrderViolation
123
124 """
125 self._check_ordering(node)
126 self.generic_visit(node)
127
128
129 # TODO(@sobolevn): refactor to be a single visitor
130 class MultipleInVisitor(BaseNodeVisitor):
131 """Restricts comparision where multiple `in`s are used."""
132
133 def _has_multiple_in_comparisons(self, node: ast.Compare) -> bool:
134 count = 0
135 for op in node.ops:
136 if isinstance(op, ast.In):
137 count += 1
138 return count > 1
139
140 def _count_in_comparisons(self, node: ast.Compare) -> None:
141 if self._has_multiple_in_comparisons(node):
142 self.add_violation(MultipleInComparisonViolation(node))
143
144 def visit_Compare(self, node: ast.Compare) -> None:
145 """
146 Forbids comparisons including multiple 'in's in a statement.
147
148 Raise:
149 MultipleInComparisonViolation
150
151 """
152 self._count_in_comparisons(node)
153 self.generic_visit(node)
154
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/wemake_python_styleguide/visitors/ast/comparisons.py b/wemake_python_styleguide/visitors/ast/comparisons.py
--- a/wemake_python_styleguide/visitors/ast/comparisons.py
+++ b/wemake_python_styleguide/visitors/ast/comparisons.py
@@ -151,3 +151,32 @@
"""
self._count_in_comparisons(node)
self.generic_visit(node)
+
+
+class RedundantComparisonVisitor(BaseNodeVisitor):
+ """Restricts the comparison where always same result."""
+
+ def visit_Compare(self, node: ast.Compare) -> None:
+ """
+ Ensures that compares are not for same variable.
+
+ Raises:
+ ConstantComparisonViolation
+
+ """
+ self._check_redundant_compare(node)
+ self.generic_visit(node)
+
+ def _is_same_variable(self, left: ast.AST, right: ast.AST) -> bool:
+ if isinstance(left, ast.Name) and isinstance(right, ast.Name):
+ if left.id is right.id:
+ return True
+ return False
+
+ def _check_redundant_compare(self, node: ast.Compare) -> None:
+ last_variable = node.left
+ for next_variable in node.comparators:
+ if self._is_same_variable(last_variable, next_variable):
+ self.add_violation(ConstantComparisonViolation(node))
+ break
+ last_variable = next_variable
|
{"golden_diff": "diff --git a/wemake_python_styleguide/visitors/ast/comparisons.py b/wemake_python_styleguide/visitors/ast/comparisons.py\n--- a/wemake_python_styleguide/visitors/ast/comparisons.py\n+++ b/wemake_python_styleguide/visitors/ast/comparisons.py\n@@ -151,3 +151,32 @@\n \"\"\"\n self._count_in_comparisons(node)\n self.generic_visit(node)\n+\n+\n+class RedundantComparisonVisitor(BaseNodeVisitor):\n+ \"\"\"Restricts the comparison where always same result.\"\"\"\n+\n+ def visit_Compare(self, node: ast.Compare) -> None:\n+ \"\"\"\n+ Ensures that compares are not for same variable.\n+\n+ Raises:\n+ ConstantComparisonViolation\n+\n+ \"\"\"\n+ self._check_redundant_compare(node)\n+ self.generic_visit(node)\n+\n+ def _is_same_variable(self, left: ast.AST, right: ast.AST) -> bool:\n+ if isinstance(left, ast.Name) and isinstance(right, ast.Name):\n+ if left.id is right.id:\n+ return True\n+ return False\n+\n+ def _check_redundant_compare(self, node: ast.Compare) -> None:\n+ last_variable = node.left\n+ for next_variable in node.comparators:\n+ if self._is_same_variable(last_variable, next_variable):\n+ self.add_violation(ConstantComparisonViolation(node))\n+ break\n+ last_variable = next_variable\n", "issue": "Forbid to compare the identical variables\n# Rule request\r\n\r\n<!-- Thanks for coming up with a proposal! -->\r\n\r\n## Thesis\r\n\r\nWe need to find cases like this: `if some_value == some_value:`.\r\nWe also need to find other operators: `is`, `is not`, `>`, `<`, `!=`, `>=`, `<=`.\r\n\r\n## Reasoning\r\n\r\nIt clearly does not make any sense. It is a bug in the source code.\r\n\r\nRelated: https://help.semmle.com/wiki/display/PYTHON/Comparison+of+identical+values\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport ast\nfrom typing import ClassVar, Sequence\n\nfrom wemake_python_styleguide.types import AnyNodes\nfrom wemake_python_styleguide.violations.consistency import (\n ComparisonOrderViolation,\n ConstantComparisonViolation,\n MultipleInComparisonViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseNodeVisitor\n\n\nclass ConstantComparisonVisitor(BaseNodeVisitor):\n \"\"\"Restricts the comparison of literals.\"\"\"\n\n def _check_is_literal(self, node: ast.AST) -> bool:\n \"\"\"\n Checks for nodes that contains only constants.\n\n If the node contains only literals it will be evaluted.\n When node relies on some other names, it won't be evaluted.\n \"\"\"\n try:\n ast.literal_eval(node)\n except ValueError:\n return False\n else:\n return True\n\n def _check_literal_compare(self, node: ast.Compare) -> None:\n last_was_literal = self._check_is_literal(node.left)\n for comparator in node.comparators:\n next_is_literal = self._check_is_literal(comparator)\n if last_was_literal and next_is_literal:\n self.add_violation(ConstantComparisonViolation(node))\n break\n last_was_literal = next_is_literal\n\n def visit_Compare(self, node: ast.Compare) -> None:\n \"\"\"\n Ensures that compares are written correctly.\n\n Raises:\n ConstantComparisonViolation\n\n \"\"\"\n self._check_literal_compare(node)\n self.generic_visit(node)\n\n\nclass WrongOrderVisitor(BaseNodeVisitor):\n \"\"\"Restricts comparision where argument doesn't come first.\"\"\"\n\n _allowed_left_nodes: ClassVar[AnyNodes] = (\n ast.Name,\n ast.Call,\n ast.Attribute,\n )\n\n _special_cases: ClassVar[AnyNodes] = (\n ast.In,\n ast.NotIn,\n )\n\n def _is_special_case(self, node: ast.Compare) -> bool:\n \"\"\"\n Operators ``in`` and ``not in`` are special cases.\n\n Why? Because it is perfectly fine to use something like:\n ``if 'key' in some_dict: ...``\n This should not be an issue.\n\n When there are multiple special operators it is still a separate issue.\n \"\"\"\n return isinstance(node.ops[0], self._special_cases)\n\n def _is_left_node_valid(self, left: ast.AST) -> bool:\n if isinstance(left, self._allowed_left_nodes):\n return True\n if isinstance(left, ast.BinOp):\n return (\n self._is_left_node_valid(left.left) or\n self._is_left_node_valid(left.right)\n )\n return False\n\n def _has_wrong_nodes_on_the_right(\n self,\n comparators: Sequence[ast.AST],\n ) -> bool:\n for right in comparators:\n if isinstance(right, self._allowed_left_nodes):\n return True\n if isinstance(right, ast.BinOp):\n return self._has_wrong_nodes_on_the_right([\n right.left, right.right,\n ])\n return False\n\n def _check_ordering(self, node: ast.Compare) -> None:\n if self._is_left_node_valid(node.left):\n return\n\n if self._is_special_case(node):\n return\n\n if len(node.comparators) > 1:\n return\n\n if not self._has_wrong_nodes_on_the_right(node.comparators):\n return\n\n self.add_violation(ComparisonOrderViolation(node))\n\n def visit_Compare(self, node: ast.Compare) -> None:\n \"\"\"\n Forbids comparision where argument doesn't come first.\n\n Raises:\n ComparisonOrderViolation\n\n \"\"\"\n self._check_ordering(node)\n self.generic_visit(node)\n\n\n# TODO(@sobolevn): refactor to be a single visitor\nclass MultipleInVisitor(BaseNodeVisitor):\n \"\"\"Restricts comparision where multiple `in`s are used.\"\"\"\n\n def _has_multiple_in_comparisons(self, node: ast.Compare) -> bool:\n count = 0\n for op in node.ops:\n if isinstance(op, ast.In):\n count += 1\n return count > 1\n\n def _count_in_comparisons(self, node: ast.Compare) -> None:\n if self._has_multiple_in_comparisons(node):\n self.add_violation(MultipleInComparisonViolation(node))\n\n def visit_Compare(self, node: ast.Compare) -> None:\n \"\"\"\n Forbids comparisons including multiple 'in's in a statement.\n\n Raise:\n MultipleInComparisonViolation\n\n \"\"\"\n self._count_in_comparisons(node)\n self.generic_visit(node)\n", "path": "wemake_python_styleguide/visitors/ast/comparisons.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport ast\nfrom typing import ClassVar, Sequence\n\nfrom wemake_python_styleguide.types import AnyNodes\nfrom wemake_python_styleguide.violations.consistency import (\n ComparisonOrderViolation,\n ConstantComparisonViolation,\n MultipleInComparisonViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseNodeVisitor\n\n\nclass ConstantComparisonVisitor(BaseNodeVisitor):\n \"\"\"Restricts the comparison of literals.\"\"\"\n\n def _check_is_literal(self, node: ast.AST) -> bool:\n \"\"\"\n Checks for nodes that contains only constants.\n\n If the node contains only literals it will be evaluted.\n When node relies on some other names, it won't be evaluted.\n \"\"\"\n try:\n ast.literal_eval(node)\n except ValueError:\n return False\n else:\n return True\n\n def _check_literal_compare(self, node: ast.Compare) -> None:\n last_was_literal = self._check_is_literal(node.left)\n for comparator in node.comparators:\n next_is_literal = self._check_is_literal(comparator)\n if last_was_literal and next_is_literal:\n self.add_violation(ConstantComparisonViolation(node))\n break\n last_was_literal = next_is_literal\n\n def visit_Compare(self, node: ast.Compare) -> None:\n \"\"\"\n Ensures that compares are written correctly.\n\n Raises:\n ConstantComparisonViolation\n\n \"\"\"\n self._check_literal_compare(node)\n self.generic_visit(node)\n\n\nclass WrongOrderVisitor(BaseNodeVisitor):\n \"\"\"Restricts comparision where argument doesn't come first.\"\"\"\n\n _allowed_left_nodes: ClassVar[AnyNodes] = (\n ast.Name,\n ast.Call,\n ast.Attribute,\n )\n\n _special_cases: ClassVar[AnyNodes] = (\n ast.In,\n ast.NotIn,\n )\n\n def _is_special_case(self, node: ast.Compare) -> bool:\n \"\"\"\n Operators ``in`` and ``not in`` are special cases.\n\n Why? Because it is perfectly fine to use something like:\n ``if 'key' in some_dict: ...``\n This should not be an issue.\n\n When there are multiple special operators it is still a separate issue.\n \"\"\"\n return isinstance(node.ops[0], self._special_cases)\n\n def _is_left_node_valid(self, left: ast.AST) -> bool:\n if isinstance(left, self._allowed_left_nodes):\n return True\n if isinstance(left, ast.BinOp):\n return (\n self._is_left_node_valid(left.left) or\n self._is_left_node_valid(left.right)\n )\n return False\n\n def _has_wrong_nodes_on_the_right(\n self,\n comparators: Sequence[ast.AST],\n ) -> bool:\n for right in comparators:\n if isinstance(right, self._allowed_left_nodes):\n return True\n if isinstance(right, ast.BinOp):\n return self._has_wrong_nodes_on_the_right([\n right.left, right.right,\n ])\n return False\n\n def _check_ordering(self, node: ast.Compare) -> None:\n if self._is_left_node_valid(node.left):\n return\n\n if self._is_special_case(node):\n return\n\n if len(node.comparators) > 1:\n return\n\n if not self._has_wrong_nodes_on_the_right(node.comparators):\n return\n\n self.add_violation(ComparisonOrderViolation(node))\n\n def visit_Compare(self, node: ast.Compare) -> None:\n \"\"\"\n Forbids comparision where argument doesn't come first.\n\n Raises:\n ComparisonOrderViolation\n\n \"\"\"\n self._check_ordering(node)\n self.generic_visit(node)\n\n\n# TODO(@sobolevn): refactor to be a single visitor\nclass MultipleInVisitor(BaseNodeVisitor):\n \"\"\"Restricts comparision where multiple `in`s are used.\"\"\"\n\n def _has_multiple_in_comparisons(self, node: ast.Compare) -> bool:\n count = 0\n for op in node.ops:\n if isinstance(op, ast.In):\n count += 1\n return count > 1\n\n def _count_in_comparisons(self, node: ast.Compare) -> None:\n if self._has_multiple_in_comparisons(node):\n self.add_violation(MultipleInComparisonViolation(node))\n\n def visit_Compare(self, node: ast.Compare) -> None:\n \"\"\"\n Forbids comparisons including multiple 'in's in a statement.\n\n Raise:\n MultipleInComparisonViolation\n\n \"\"\"\n self._count_in_comparisons(node)\n self.generic_visit(node)\n\n\nclass RedundantComparisonVisitor(BaseNodeVisitor):\n \"\"\"Restricts the comparison where always same result.\"\"\"\n\n def visit_Compare(self, node: ast.Compare) -> None:\n \"\"\"\n Ensures that compares are not for same variable.\n\n Raises:\n ConstantComparisonViolation\n\n \"\"\"\n self._check_redundant_compare(node)\n self.generic_visit(node)\n\n def _is_same_variable(self, left: ast.AST, right: ast.AST) -> bool:\n if isinstance(left, ast.Name) and isinstance(right, ast.Name):\n if left.id is right.id:\n return True\n return False\n\n def _check_redundant_compare(self, node: ast.Compare) -> None:\n last_variable = node.left\n for next_variable in node.comparators:\n if self._is_same_variable(last_variable, next_variable):\n self.add_violation(ConstantComparisonViolation(node))\n break\n last_variable = next_variable\n", "path": "wemake_python_styleguide/visitors/ast/comparisons.py"}]}
| 1,742 | 334 |
gh_patches_debug_26380
|
rasdani/github-patches
|
git_diff
|
conda__conda-2445
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
conda info --json and package lookup
If you set the `--json` flag for `conda info` when searching for packages, you sometimes get nothing:
``` bash
$ conda info numpy=1.11.0=py35_0
Fetching package metadata: ....
numpy 1.11.0 py35_0
-------------------
file name : numpy-1.11.0-py35_0.tar.bz2
name : numpy
version : 1.11.0
build number: 0
build string: py35_0
channel : defaults
size : 6.1 MB
date : 2016-03-28
license : BSD
md5 : 1900998c19c5e310687013f95374bba2
installed environments:
dependencies:
mkl 11.3.1
python 3.5*
$ conda info --json numpy=1.11.0=py35_0
{}
```
Things work fine for `conda info --json numpy`, so it's something with the spec format.
conda info:
```
platform : linux-64
conda version : 4.0.5
conda-build version : not installed
python version : 2.7.11.final.0
requests version : 2.9.1
root environment : /opt/conda (writable)
default environment : /opt/conda
envs directories : /opt/conda/envs
package cache : /opt/conda/pkgs
channel URLs : https://repo.continuum.io/pkgs/free/linux-64/
https://repo.continuum.io/pkgs/free/noarch/
https://repo.continuum.io/pkgs/pro/linux-64/
https://repo.continuum.io/pkgs/pro/noarch/
config file : None
is foreign system : False
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda/cli/main_info.py`
Content:
```
1 # (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io
2 # All Rights Reserved
3 #
4 # conda is distributed under the terms of the BSD 3-clause license.
5 # Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
6
7 from __future__ import print_function, division, absolute_import
8
9 import json
10 import os
11 import re
12 import sys
13 from collections import defaultdict, OrderedDict
14 from itertools import chain
15 from os import listdir
16 from os.path import exists, expanduser, join
17
18 from conda.cli import common
19 from conda.compat import iteritems
20
21 help = "Display information about current conda install."
22
23 example = """
24
25 Examples:
26
27 conda info -a
28 """
29
30 def configure_parser(sub_parsers):
31 p = sub_parsers.add_parser(
32 'info',
33 description=help,
34 help=help,
35 epilog=example,
36 )
37 common.add_parser_json(p)
38 p.add_argument(
39 '-a', "--all",
40 action="store_true",
41 help="Show all information, (environments, license, and system "
42 "information.")
43 p.add_argument(
44 '-e', "--envs",
45 action="store_true",
46 help="List all known conda environments.",
47 )
48 p.add_argument(
49 '-l', "--license",
50 action="store_true",
51 help="Display information about the local conda licenses list.",
52 )
53 p.add_argument(
54 '-s', "--system",
55 action="store_true",
56 help="List environment variables.",
57 )
58 p.add_argument(
59 'packages',
60 action="store",
61 nargs='*',
62 help="Display information about packages.",
63 )
64 p.add_argument(
65 '--root',
66 action='store_true',
67 help='Display root environment path.',
68 )
69 p.add_argument(
70 '--unsafe-channels',
71 action='store_true',
72 help='Display list of channels with tokens exposed.',
73 )
74 p.set_defaults(func=execute)
75
76
77 def show_pkg_info(name):
78 from conda.api import get_index
79 from conda.resolve import Resolve
80
81 index = get_index()
82 r = Resolve(index)
83 print(name)
84 if name in r.groups:
85 for pkg in sorted(r.get_pkgs(name)):
86 print(' %-15s %15s %s' % (
87 pkg.version,
88 pkg.build,
89 common.disp_features(r.features(pkg.fn))))
90 else:
91 print(' not available')
92 # TODO
93
94
95 python_re = re.compile('python\d\.\d')
96 def get_user_site():
97 site_dirs = []
98 if sys.platform != 'win32':
99 if exists(expanduser('~/.local/lib')):
100 for path in listdir(expanduser('~/.local/lib/')):
101 if python_re.match(path):
102 site_dirs.append("~/.local/lib/%s" % path)
103 else:
104 if 'APPDATA' not in os.environ:
105 return site_dirs
106 APPDATA = os.environ['APPDATA']
107 if exists(join(APPDATA, 'Python')):
108 site_dirs = [join(APPDATA, 'Python', i) for i in
109 listdir(join(APPDATA, 'PYTHON'))]
110 return site_dirs
111
112
113 def pretty_package(pkg):
114 import conda.config as config
115 from conda.utils import human_bytes
116
117 d = OrderedDict([
118 ('file name', pkg.fn),
119 ('name', pkg.name),
120 ('version', pkg.version),
121 ('build number', pkg.build_number),
122 ('build string', pkg.build),
123 ('channel', config.canonical_channel_name(pkg.channel)),
124 ('size', human_bytes(pkg.info['size'])),
125 ])
126 rest = pkg.info
127 for key in sorted(rest):
128 if key in {'build', 'depends', 'requires', 'channel', 'name',
129 'version', 'build_number', 'size'}:
130 continue
131 d[key] = rest[key]
132
133 print()
134 header = "%s %s %s" % (d['name'], d['version'], d['build string'])
135 print(header)
136 print('-'*len(header))
137 for key in d:
138 print("%-12s: %s" % (key, d[key]))
139 print('dependencies:')
140 for dep in pkg.info['depends']:
141 print(' %s' % dep)
142
143 def execute(args, parser):
144 import os
145 from os.path import dirname
146
147 import conda
148 import conda.config as config
149 from conda.resolve import Resolve
150 from conda.cli.main_init import is_initialized
151 from conda.api import get_index, get_package_versions
152
153 if args.root:
154 if args.json:
155 common.stdout_json({'root_prefix': config.root_dir})
156 else:
157 print(config.root_dir)
158 return
159
160 if args.packages:
161 if args.json:
162 results = defaultdict(list)
163 for arg in args.packages:
164 for pkg in get_package_versions(arg):
165 results[arg].append(pkg._asdict())
166 common.stdout_json(results)
167 return
168 index = get_index()
169 r = Resolve(index)
170 specs = map(common.arg2spec, args.packages)
171
172 for spec in specs:
173 versions = r.get_pkgs(spec)
174 for pkg in sorted(versions):
175 pretty_package(pkg)
176 return
177
178 options = 'envs', 'system', 'license'
179
180 try:
181 import requests
182 requests_version = requests.__version__
183 except ImportError:
184 requests_version = "could not import"
185 except Exception as e:
186 requests_version = "Error %s" % e
187
188 try:
189 import conda_build
190 except ImportError:
191 conda_build_version = "not installed"
192 except Exception as e:
193 conda_build_version = "Error %s" % e
194 else:
195 conda_build_version = conda_build.__version__
196
197 # this is a hack associated with channel weight until we get the package cache reworked
198 # in a future release
199 # for now, just ordering the channels for display in a semi-plausible way
200 d = defaultdict(list)
201 any(d[v[1]].append(k) for k, v in iteritems(config.get_channel_urls()))
202 channels = list(chain.from_iterable(d[q] for q in sorted(d, reverse=True)))
203
204 info_dict = dict(
205 platform=config.subdir,
206 conda_version=conda.__version__,
207 conda_build_version=conda_build_version,
208 root_prefix=config.root_dir,
209 root_writable=config.root_writable,
210 pkgs_dirs=config.pkgs_dirs,
211 envs_dirs=config.envs_dirs,
212 default_prefix=config.default_prefix,
213 channels=channels,
214 rc_path=config.rc_path,
215 user_rc_path=config.user_rc_path,
216 sys_rc_path=config.sys_rc_path,
217 is_foreign=bool(config.foreign),
218 envs=[],
219 python_version='.'.join(map(str, sys.version_info)),
220 requests_version=requests_version,
221 )
222
223 if args.unsafe_channels:
224 if not args.json:
225 print("\n".join(info_dict["channels"]))
226 else:
227 print(json.dumps({"channels": info_dict["channels"]}))
228 return 0
229 else:
230 info_dict['channels'] = [config.hide_binstar_tokens(c) for c in
231 info_dict['channels']]
232 if args.all or args.json:
233 for option in options:
234 setattr(args, option, True)
235
236 if args.all or all(not getattr(args, opt) for opt in options):
237 for key in 'pkgs_dirs', 'envs_dirs', 'channels':
238 info_dict['_' + key] = ('\n' + 24 * ' ').join(info_dict[key])
239 info_dict['_rtwro'] = ('writable' if info_dict['root_writable'] else
240 'read only')
241 print("""\
242 Current conda install:
243
244 platform : %(platform)s
245 conda version : %(conda_version)s
246 conda-build version : %(conda_build_version)s
247 python version : %(python_version)s
248 requests version : %(requests_version)s
249 root environment : %(root_prefix)s (%(_rtwro)s)
250 default environment : %(default_prefix)s
251 envs directories : %(_envs_dirs)s
252 package cache : %(_pkgs_dirs)s
253 channel URLs : %(_channels)s
254 config file : %(rc_path)s
255 is foreign system : %(is_foreign)s
256 """ % info_dict)
257 if not is_initialized():
258 print("""\
259 # NOTE:
260 # root directory '%s' is uninitialized""" % config.root_dir)
261
262 if args.envs:
263 common.handle_envs_list(info_dict['envs'], not args.json)
264
265 if args.system and not args.json:
266 from conda.cli.find_commands import find_commands, find_executable
267
268 print("sys.version: %s..." % (sys.version[:40]))
269 print("sys.prefix: %s" % sys.prefix)
270 print("sys.executable: %s" % sys.executable)
271 print("conda location: %s" % dirname(conda.__file__))
272 for cmd in sorted(set(find_commands() + ['build'])):
273 print("conda-%s: %s" % (cmd, find_executable('conda-' + cmd)))
274 print("user site dirs: ", end='')
275 site_dirs = get_user_site()
276 if site_dirs:
277 print(site_dirs[0])
278 else:
279 print()
280 for site_dir in site_dirs[1:]:
281 print(' %s' % site_dir)
282 print()
283
284 evars = ['PATH', 'PYTHONPATH', 'PYTHONHOME', 'CONDA_DEFAULT_ENV',
285 'CIO_TEST', 'CONDA_ENVS_PATH']
286 if config.platform == 'linux':
287 evars.append('LD_LIBRARY_PATH')
288 elif config.platform == 'osx':
289 evars.append('DYLD_LIBRARY_PATH')
290 for ev in sorted(evars):
291 print("%s: %s" % (ev, os.getenv(ev, '<not set>')))
292 print()
293
294 if args.license and not args.json:
295 try:
296 from _license import show_info
297 show_info()
298 except ImportError:
299 print("""\
300 WARNING: could not import _license.show_info
301 # try:
302 # $ conda install -n root _license""")
303
304 if args.json:
305 common.stdout_json(info_dict)
306
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/conda/cli/main_info.py b/conda/cli/main_info.py
--- a/conda/cli/main_info.py
+++ b/conda/cli/main_info.py
@@ -148,7 +148,7 @@
import conda.config as config
from conda.resolve import Resolve
from conda.cli.main_init import is_initialized
- from conda.api import get_index, get_package_versions
+ from conda.api import get_index
if args.root:
if args.json:
@@ -158,21 +158,19 @@
return
if args.packages:
- if args.json:
- results = defaultdict(list)
- for arg in args.packages:
- for pkg in get_package_versions(arg):
- results[arg].append(pkg._asdict())
- common.stdout_json(results)
- return
index = get_index()
r = Resolve(index)
- specs = map(common.arg2spec, args.packages)
-
- for spec in specs:
- versions = r.get_pkgs(spec)
- for pkg in sorted(versions):
- pretty_package(pkg)
+ if args.json:
+ common.stdout_json({
+ package: [p._asdict()
+ for p in sorted(r.get_pkgs(common.arg2spec(package)))]
+ for package in args.packages
+ })
+ else:
+ for package in args.packages:
+ versions = r.get_pkgs(common.arg2spec(package))
+ for pkg in sorted(versions):
+ pretty_package(pkg)
return
options = 'envs', 'system', 'license'
|
{"golden_diff": "diff --git a/conda/cli/main_info.py b/conda/cli/main_info.py\n--- a/conda/cli/main_info.py\n+++ b/conda/cli/main_info.py\n@@ -148,7 +148,7 @@\n import conda.config as config\n from conda.resolve import Resolve\n from conda.cli.main_init import is_initialized\n- from conda.api import get_index, get_package_versions\n+ from conda.api import get_index\n \n if args.root:\n if args.json:\n@@ -158,21 +158,19 @@\n return\n \n if args.packages:\n- if args.json:\n- results = defaultdict(list)\n- for arg in args.packages:\n- for pkg in get_package_versions(arg):\n- results[arg].append(pkg._asdict())\n- common.stdout_json(results)\n- return\n index = get_index()\n r = Resolve(index)\n- specs = map(common.arg2spec, args.packages)\n-\n- for spec in specs:\n- versions = r.get_pkgs(spec)\n- for pkg in sorted(versions):\n- pretty_package(pkg)\n+ if args.json:\n+ common.stdout_json({\n+ package: [p._asdict()\n+ for p in sorted(r.get_pkgs(common.arg2spec(package)))]\n+ for package in args.packages\n+ })\n+ else:\n+ for package in args.packages:\n+ versions = r.get_pkgs(common.arg2spec(package))\n+ for pkg in sorted(versions):\n+ pretty_package(pkg)\n return\n \n options = 'envs', 'system', 'license'\n", "issue": "conda info --json and package lookup\nIf you set the `--json` flag for `conda info` when searching for packages, you sometimes get nothing:\n\n``` bash\n$ conda info numpy=1.11.0=py35_0\nFetching package metadata: ....\n\nnumpy 1.11.0 py35_0\n-------------------\nfile name : numpy-1.11.0-py35_0.tar.bz2\nname : numpy\nversion : 1.11.0\nbuild number: 0\nbuild string: py35_0\nchannel : defaults\nsize : 6.1 MB\ndate : 2016-03-28\nlicense : BSD\nmd5 : 1900998c19c5e310687013f95374bba2\ninstalled environments:\ndependencies:\n mkl 11.3.1\n python 3.5*\n$ conda info --json numpy=1.11.0=py35_0\n{}\n```\n\nThings work fine for `conda info --json numpy`, so it's something with the spec format.\n\nconda info:\n\n```\n platform : linux-64\n conda version : 4.0.5\n conda-build version : not installed\n python version : 2.7.11.final.0\n requests version : 2.9.1\n root environment : /opt/conda (writable)\n default environment : /opt/conda\n envs directories : /opt/conda/envs\n package cache : /opt/conda/pkgs\n channel URLs : https://repo.continuum.io/pkgs/free/linux-64/\n https://repo.continuum.io/pkgs/free/noarch/\n https://repo.continuum.io/pkgs/pro/linux-64/\n https://repo.continuum.io/pkgs/pro/noarch/\n config file : None\n is foreign system : False\n```\n\n", "before_files": [{"content": "# (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io\n# All Rights Reserved\n#\n# conda is distributed under the terms of the BSD 3-clause license.\n# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\n\nfrom __future__ import print_function, division, absolute_import\n\nimport json\nimport os\nimport re\nimport sys\nfrom collections import defaultdict, OrderedDict\nfrom itertools import chain\nfrom os import listdir\nfrom os.path import exists, expanduser, join\n\nfrom conda.cli import common\nfrom conda.compat import iteritems\n\nhelp = \"Display information about current conda install.\"\n\nexample = \"\"\"\n\nExamples:\n\n conda info -a\n\"\"\"\n\ndef configure_parser(sub_parsers):\n p = sub_parsers.add_parser(\n 'info',\n description=help,\n help=help,\n epilog=example,\n )\n common.add_parser_json(p)\n p.add_argument(\n '-a', \"--all\",\n action=\"store_true\",\n help=\"Show all information, (environments, license, and system \"\n \"information.\")\n p.add_argument(\n '-e', \"--envs\",\n action=\"store_true\",\n help=\"List all known conda environments.\",\n )\n p.add_argument(\n '-l', \"--license\",\n action=\"store_true\",\n help=\"Display information about the local conda licenses list.\",\n )\n p.add_argument(\n '-s', \"--system\",\n action=\"store_true\",\n help=\"List environment variables.\",\n )\n p.add_argument(\n 'packages',\n action=\"store\",\n nargs='*',\n help=\"Display information about packages.\",\n )\n p.add_argument(\n '--root',\n action='store_true',\n help='Display root environment path.',\n )\n p.add_argument(\n '--unsafe-channels',\n action='store_true',\n help='Display list of channels with tokens exposed.',\n )\n p.set_defaults(func=execute)\n\n\ndef show_pkg_info(name):\n from conda.api import get_index\n from conda.resolve import Resolve\n\n index = get_index()\n r = Resolve(index)\n print(name)\n if name in r.groups:\n for pkg in sorted(r.get_pkgs(name)):\n print(' %-15s %15s %s' % (\n pkg.version,\n pkg.build,\n common.disp_features(r.features(pkg.fn))))\n else:\n print(' not available')\n # TODO\n\n\npython_re = re.compile('python\\d\\.\\d')\ndef get_user_site():\n site_dirs = []\n if sys.platform != 'win32':\n if exists(expanduser('~/.local/lib')):\n for path in listdir(expanduser('~/.local/lib/')):\n if python_re.match(path):\n site_dirs.append(\"~/.local/lib/%s\" % path)\n else:\n if 'APPDATA' not in os.environ:\n return site_dirs\n APPDATA = os.environ['APPDATA']\n if exists(join(APPDATA, 'Python')):\n site_dirs = [join(APPDATA, 'Python', i) for i in\n listdir(join(APPDATA, 'PYTHON'))]\n return site_dirs\n\n\ndef pretty_package(pkg):\n import conda.config as config\n from conda.utils import human_bytes\n\n d = OrderedDict([\n ('file name', pkg.fn),\n ('name', pkg.name),\n ('version', pkg.version),\n ('build number', pkg.build_number),\n ('build string', pkg.build),\n ('channel', config.canonical_channel_name(pkg.channel)),\n ('size', human_bytes(pkg.info['size'])),\n ])\n rest = pkg.info\n for key in sorted(rest):\n if key in {'build', 'depends', 'requires', 'channel', 'name',\n 'version', 'build_number', 'size'}:\n continue\n d[key] = rest[key]\n\n print()\n header = \"%s %s %s\" % (d['name'], d['version'], d['build string'])\n print(header)\n print('-'*len(header))\n for key in d:\n print(\"%-12s: %s\" % (key, d[key]))\n print('dependencies:')\n for dep in pkg.info['depends']:\n print(' %s' % dep)\n\ndef execute(args, parser):\n import os\n from os.path import dirname\n\n import conda\n import conda.config as config\n from conda.resolve import Resolve\n from conda.cli.main_init import is_initialized\n from conda.api import get_index, get_package_versions\n\n if args.root:\n if args.json:\n common.stdout_json({'root_prefix': config.root_dir})\n else:\n print(config.root_dir)\n return\n\n if args.packages:\n if args.json:\n results = defaultdict(list)\n for arg in args.packages:\n for pkg in get_package_versions(arg):\n results[arg].append(pkg._asdict())\n common.stdout_json(results)\n return\n index = get_index()\n r = Resolve(index)\n specs = map(common.arg2spec, args.packages)\n\n for spec in specs:\n versions = r.get_pkgs(spec)\n for pkg in sorted(versions):\n pretty_package(pkg)\n return\n\n options = 'envs', 'system', 'license'\n\n try:\n import requests\n requests_version = requests.__version__\n except ImportError:\n requests_version = \"could not import\"\n except Exception as e:\n requests_version = \"Error %s\" % e\n\n try:\n import conda_build\n except ImportError:\n conda_build_version = \"not installed\"\n except Exception as e:\n conda_build_version = \"Error %s\" % e\n else:\n conda_build_version = conda_build.__version__\n\n # this is a hack associated with channel weight until we get the package cache reworked\n # in a future release\n # for now, just ordering the channels for display in a semi-plausible way\n d = defaultdict(list)\n any(d[v[1]].append(k) for k, v in iteritems(config.get_channel_urls()))\n channels = list(chain.from_iterable(d[q] for q in sorted(d, reverse=True)))\n\n info_dict = dict(\n platform=config.subdir,\n conda_version=conda.__version__,\n conda_build_version=conda_build_version,\n root_prefix=config.root_dir,\n root_writable=config.root_writable,\n pkgs_dirs=config.pkgs_dirs,\n envs_dirs=config.envs_dirs,\n default_prefix=config.default_prefix,\n channels=channels,\n rc_path=config.rc_path,\n user_rc_path=config.user_rc_path,\n sys_rc_path=config.sys_rc_path,\n is_foreign=bool(config.foreign),\n envs=[],\n python_version='.'.join(map(str, sys.version_info)),\n requests_version=requests_version,\n )\n\n if args.unsafe_channels:\n if not args.json:\n print(\"\\n\".join(info_dict[\"channels\"]))\n else:\n print(json.dumps({\"channels\": info_dict[\"channels\"]}))\n return 0\n else:\n info_dict['channels'] = [config.hide_binstar_tokens(c) for c in\n info_dict['channels']]\n if args.all or args.json:\n for option in options:\n setattr(args, option, True)\n\n if args.all or all(not getattr(args, opt) for opt in options):\n for key in 'pkgs_dirs', 'envs_dirs', 'channels':\n info_dict['_' + key] = ('\\n' + 24 * ' ').join(info_dict[key])\n info_dict['_rtwro'] = ('writable' if info_dict['root_writable'] else\n 'read only')\n print(\"\"\"\\\nCurrent conda install:\n\n platform : %(platform)s\n conda version : %(conda_version)s\n conda-build version : %(conda_build_version)s\n python version : %(python_version)s\n requests version : %(requests_version)s\n root environment : %(root_prefix)s (%(_rtwro)s)\n default environment : %(default_prefix)s\n envs directories : %(_envs_dirs)s\n package cache : %(_pkgs_dirs)s\n channel URLs : %(_channels)s\n config file : %(rc_path)s\n is foreign system : %(is_foreign)s\n\"\"\" % info_dict)\n if not is_initialized():\n print(\"\"\"\\\n# NOTE:\n# root directory '%s' is uninitialized\"\"\" % config.root_dir)\n\n if args.envs:\n common.handle_envs_list(info_dict['envs'], not args.json)\n\n if args.system and not args.json:\n from conda.cli.find_commands import find_commands, find_executable\n\n print(\"sys.version: %s...\" % (sys.version[:40]))\n print(\"sys.prefix: %s\" % sys.prefix)\n print(\"sys.executable: %s\" % sys.executable)\n print(\"conda location: %s\" % dirname(conda.__file__))\n for cmd in sorted(set(find_commands() + ['build'])):\n print(\"conda-%s: %s\" % (cmd, find_executable('conda-' + cmd)))\n print(\"user site dirs: \", end='')\n site_dirs = get_user_site()\n if site_dirs:\n print(site_dirs[0])\n else:\n print()\n for site_dir in site_dirs[1:]:\n print(' %s' % site_dir)\n print()\n\n evars = ['PATH', 'PYTHONPATH', 'PYTHONHOME', 'CONDA_DEFAULT_ENV',\n 'CIO_TEST', 'CONDA_ENVS_PATH']\n if config.platform == 'linux':\n evars.append('LD_LIBRARY_PATH')\n elif config.platform == 'osx':\n evars.append('DYLD_LIBRARY_PATH')\n for ev in sorted(evars):\n print(\"%s: %s\" % (ev, os.getenv(ev, '<not set>')))\n print()\n\n if args.license and not args.json:\n try:\n from _license import show_info\n show_info()\n except ImportError:\n print(\"\"\"\\\nWARNING: could not import _license.show_info\n# try:\n# $ conda install -n root _license\"\"\")\n\n if args.json:\n common.stdout_json(info_dict)\n", "path": "conda/cli/main_info.py"}], "after_files": [{"content": "# (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io\n# All Rights Reserved\n#\n# conda is distributed under the terms of the BSD 3-clause license.\n# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\n\nfrom __future__ import print_function, division, absolute_import\n\nimport json\nimport os\nimport re\nimport sys\nfrom collections import defaultdict, OrderedDict\nfrom itertools import chain\nfrom os import listdir\nfrom os.path import exists, expanduser, join\n\nfrom conda.cli import common\nfrom conda.compat import iteritems\n\nhelp = \"Display information about current conda install.\"\n\nexample = \"\"\"\n\nExamples:\n\n conda info -a\n\"\"\"\n\ndef configure_parser(sub_parsers):\n p = sub_parsers.add_parser(\n 'info',\n description=help,\n help=help,\n epilog=example,\n )\n common.add_parser_json(p)\n p.add_argument(\n '-a', \"--all\",\n action=\"store_true\",\n help=\"Show all information, (environments, license, and system \"\n \"information.\")\n p.add_argument(\n '-e', \"--envs\",\n action=\"store_true\",\n help=\"List all known conda environments.\",\n )\n p.add_argument(\n '-l', \"--license\",\n action=\"store_true\",\n help=\"Display information about the local conda licenses list.\",\n )\n p.add_argument(\n '-s', \"--system\",\n action=\"store_true\",\n help=\"List environment variables.\",\n )\n p.add_argument(\n 'packages',\n action=\"store\",\n nargs='*',\n help=\"Display information about packages.\",\n )\n p.add_argument(\n '--root',\n action='store_true',\n help='Display root environment path.',\n )\n p.add_argument(\n '--unsafe-channels',\n action='store_true',\n help='Display list of channels with tokens exposed.',\n )\n p.set_defaults(func=execute)\n\n\ndef show_pkg_info(name):\n from conda.api import get_index\n from conda.resolve import Resolve\n\n index = get_index()\n r = Resolve(index)\n print(name)\n if name in r.groups:\n for pkg in sorted(r.get_pkgs(name)):\n print(' %-15s %15s %s' % (\n pkg.version,\n pkg.build,\n common.disp_features(r.features(pkg.fn))))\n else:\n print(' not available')\n # TODO\n\n\npython_re = re.compile('python\\d\\.\\d')\ndef get_user_site():\n site_dirs = []\n if sys.platform != 'win32':\n if exists(expanduser('~/.local/lib')):\n for path in listdir(expanduser('~/.local/lib/')):\n if python_re.match(path):\n site_dirs.append(\"~/.local/lib/%s\" % path)\n else:\n if 'APPDATA' not in os.environ:\n return site_dirs\n APPDATA = os.environ['APPDATA']\n if exists(join(APPDATA, 'Python')):\n site_dirs = [join(APPDATA, 'Python', i) for i in\n listdir(join(APPDATA, 'PYTHON'))]\n return site_dirs\n\n\ndef pretty_package(pkg):\n import conda.config as config\n from conda.utils import human_bytes\n\n d = OrderedDict([\n ('file name', pkg.fn),\n ('name', pkg.name),\n ('version', pkg.version),\n ('build number', pkg.build_number),\n ('build string', pkg.build),\n ('channel', config.canonical_channel_name(pkg.channel)),\n ('size', human_bytes(pkg.info['size'])),\n ])\n rest = pkg.info\n for key in sorted(rest):\n if key in {'build', 'depends', 'requires', 'channel', 'name',\n 'version', 'build_number', 'size'}:\n continue\n d[key] = rest[key]\n\n print()\n header = \"%s %s %s\" % (d['name'], d['version'], d['build string'])\n print(header)\n print('-'*len(header))\n for key in d:\n print(\"%-12s: %s\" % (key, d[key]))\n print('dependencies:')\n for dep in pkg.info['depends']:\n print(' %s' % dep)\n\ndef execute(args, parser):\n import os\n from os.path import dirname\n\n import conda\n import conda.config as config\n from conda.resolve import Resolve\n from conda.cli.main_init import is_initialized\n from conda.api import get_index\n\n if args.root:\n if args.json:\n common.stdout_json({'root_prefix': config.root_dir})\n else:\n print(config.root_dir)\n return\n\n if args.packages:\n index = get_index()\n r = Resolve(index)\n if args.json:\n common.stdout_json({\n package: [p._asdict()\n for p in sorted(r.get_pkgs(common.arg2spec(package)))]\n for package in args.packages\n })\n else:\n for package in args.packages:\n versions = r.get_pkgs(common.arg2spec(package))\n for pkg in sorted(versions):\n pretty_package(pkg)\n return\n\n options = 'envs', 'system', 'license'\n\n try:\n import requests\n requests_version = requests.__version__\n except ImportError:\n requests_version = \"could not import\"\n except Exception as e:\n requests_version = \"Error %s\" % e\n\n try:\n import conda_build\n except ImportError:\n conda_build_version = \"not installed\"\n except Exception as e:\n conda_build_version = \"Error %s\" % e\n else:\n conda_build_version = conda_build.__version__\n\n # this is a hack associated with channel weight until we get the package cache reworked\n # in a future release\n # for now, just ordering the channels for display in a semi-plausible way\n d = defaultdict(list)\n any(d[v[1]].append(k) for k, v in iteritems(config.get_channel_urls()))\n channels = list(chain.from_iterable(d[q] for q in sorted(d, reverse=True)))\n\n info_dict = dict(\n platform=config.subdir,\n conda_version=conda.__version__,\n conda_build_version=conda_build_version,\n root_prefix=config.root_dir,\n root_writable=config.root_writable,\n pkgs_dirs=config.pkgs_dirs,\n envs_dirs=config.envs_dirs,\n default_prefix=config.default_prefix,\n channels=channels,\n rc_path=config.rc_path,\n user_rc_path=config.user_rc_path,\n sys_rc_path=config.sys_rc_path,\n is_foreign=bool(config.foreign),\n envs=[],\n python_version='.'.join(map(str, sys.version_info)),\n requests_version=requests_version,\n )\n\n if args.unsafe_channels:\n if not args.json:\n print(\"\\n\".join(info_dict[\"channels\"]))\n else:\n print(json.dumps({\"channels\": info_dict[\"channels\"]}))\n return 0\n else:\n info_dict['channels'] = [config.hide_binstar_tokens(c) for c in\n info_dict['channels']]\n if args.all or args.json:\n for option in options:\n setattr(args, option, True)\n\n if args.all or all(not getattr(args, opt) for opt in options):\n for key in 'pkgs_dirs', 'envs_dirs', 'channels':\n info_dict['_' + key] = ('\\n' + 24 * ' ').join(info_dict[key])\n info_dict['_rtwro'] = ('writable' if info_dict['root_writable'] else\n 'read only')\n print(\"\"\"\\\nCurrent conda install:\n\n platform : %(platform)s\n conda version : %(conda_version)s\n conda-build version : %(conda_build_version)s\n python version : %(python_version)s\n requests version : %(requests_version)s\n root environment : %(root_prefix)s (%(_rtwro)s)\n default environment : %(default_prefix)s\n envs directories : %(_envs_dirs)s\n package cache : %(_pkgs_dirs)s\n channel URLs : %(_channels)s\n config file : %(rc_path)s\n is foreign system : %(is_foreign)s\n\"\"\" % info_dict)\n if not is_initialized():\n print(\"\"\"\\\n# NOTE:\n# root directory '%s' is uninitialized\"\"\" % config.root_dir)\n\n if args.envs:\n common.handle_envs_list(info_dict['envs'], not args.json)\n\n if args.system and not args.json:\n from conda.cli.find_commands import find_commands, find_executable\n\n print(\"sys.version: %s...\" % (sys.version[:40]))\n print(\"sys.prefix: %s\" % sys.prefix)\n print(\"sys.executable: %s\" % sys.executable)\n print(\"conda location: %s\" % dirname(conda.__file__))\n for cmd in sorted(set(find_commands() + ['build'])):\n print(\"conda-%s: %s\" % (cmd, find_executable('conda-' + cmd)))\n print(\"user site dirs: \", end='')\n site_dirs = get_user_site()\n if site_dirs:\n print(site_dirs[0])\n else:\n print()\n for site_dir in site_dirs[1:]:\n print(' %s' % site_dir)\n print()\n\n evars = ['PATH', 'PYTHONPATH', 'PYTHONHOME', 'CONDA_DEFAULT_ENV',\n 'CIO_TEST', 'CONDA_ENVS_PATH']\n if config.platform == 'linux':\n evars.append('LD_LIBRARY_PATH')\n elif config.platform == 'osx':\n evars.append('DYLD_LIBRARY_PATH')\n for ev in sorted(evars):\n print(\"%s: %s\" % (ev, os.getenv(ev, '<not set>')))\n print()\n\n if args.license and not args.json:\n try:\n from _license import show_info\n show_info()\n except ImportError:\n print(\"\"\"\\\nWARNING: could not import _license.show_info\n# try:\n# $ conda install -n root _license\"\"\")\n\n if args.json:\n common.stdout_json(info_dict)\n", "path": "conda/cli/main_info.py"}]}
| 3,750 | 358 |
gh_patches_debug_11521
|
rasdani/github-patches
|
git_diff
|
svthalia__concrexit-1204
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Events presence registration in app creates payment
### Describe the bug
Events presence registration in app creates payment
### How to reproduce
Steps to reproduce the behaviour:
1. Create an event with registrations
2. Register
3. Mark the registration as present _in the app_
You can simulate this by going to the registration using the API on `/v1/api/registrations/` and patching the body:
```
{
"payment": "no_payment",
"present": true
}
```
4. Note that a payment with the status 'no payment' is created for this registration
### Expected behaviour
No payment should be created
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/events/services.py`
Content:
```
1 from collections import OrderedDict
2
3 from django.utils import timezone
4 from django.utils.datetime_safe import date
5 from django.utils.translation import gettext_lazy as _, get_language
6
7 from events import emails
8 from events.exceptions import RegistrationError
9 from events.models import EventRegistration, RegistrationInformationField, Event
10 from payments.api.fields import PaymentTypeField
11 from payments.models import Payment
12 from payments.services import create_payment, delete_payment
13 from utils.snippets import datetime_to_lectureyear
14
15
16 def is_user_registered(member, event):
17 """
18 Returns if the user is registered for the specified event
19
20 :param member: the user
21 :param event: the event
22 :return: None if registration is not required or no member else True/False
23 """
24 if not event.registration_required or not member.is_authenticated:
25 return None
26
27 return event.registrations.filter(member=member, date_cancelled=None).count() > 0
28
29
30 def event_permissions(member, event, name=None):
31 """
32 Returns a dictionary with the available event permissions of the user
33
34 :param member: the user
35 :param event: the event
36 :param name: the name of a non member registration
37 :return: the permission dictionary
38 """
39 perms = {
40 "create_registration": False,
41 "cancel_registration": False,
42 "update_registration": False,
43 }
44 if not member:
45 return perms
46 if not (member.is_authenticated or name):
47 return perms
48
49 registration = None
50 try:
51 registration = EventRegistration.objects.get(
52 event=event, member=member, name=name
53 )
54 except EventRegistration.DoesNotExist:
55 pass
56
57 perms["create_registration"] = (
58 (registration is None or registration.date_cancelled is not None)
59 and event.registration_allowed
60 and (name or member.can_attend_events)
61 )
62 perms["cancel_registration"] = (
63 registration is not None
64 and registration.date_cancelled is None
65 and (event.cancellation_allowed or name)
66 )
67 perms["update_registration"] = (
68 registration is not None
69 and registration.date_cancelled is None
70 and event.has_fields()
71 and event.registration_allowed
72 and (name or member.can_attend_events)
73 )
74 return perms
75
76
77 def is_organiser(member, event):
78 if member and member.is_authenticated:
79 if member.is_superuser or member.has_perm("events.override_organiser"):
80 return True
81
82 if event:
83 return member.get_member_groups().filter(pk=event.organiser.pk).count() != 0
84
85 return False
86
87
88 def create_registration(member, event):
89 """
90 Creates a new user registration for an event
91
92 :param member: the user
93 :param event: the event
94 :return: returns the registration if successful
95 """
96 if event_permissions(member, event)["create_registration"]:
97 registration = None
98 try:
99 registration = EventRegistration.objects.get(event=event, member=member)
100 except EventRegistration.DoesNotExist:
101 pass
102
103 if registration is None:
104 return EventRegistration.objects.create(event=event, member=member)
105 elif registration.date_cancelled is not None:
106 if registration.is_late_cancellation():
107 raise RegistrationError(
108 _(
109 "You cannot re-register anymore "
110 "since you've cancelled after the "
111 "deadline."
112 )
113 )
114 else:
115 registration.date = timezone.now()
116 registration.date_cancelled = None
117 registration.save()
118
119 return registration
120 elif event_permissions(member, event)["cancel_registration"]:
121 raise RegistrationError(_("You were already registered."))
122 else:
123 raise RegistrationError(_("You may not register."))
124
125
126 def cancel_registration(member, event):
127 """
128 Cancel a user registration for an event
129
130 :param member: the user
131 :param event: the event
132 """
133 registration = None
134 try:
135 registration = EventRegistration.objects.get(event=event, member=member)
136 except EventRegistration.DoesNotExist:
137 pass
138
139 if event_permissions(member, event)["cancel_registration"] and registration:
140 if registration.payment is not None:
141 delete_payment(registration)
142 if registration.queue_position == 0:
143 emails.notify_first_waiting(event)
144
145 if event.send_cancel_email and event.after_cancel_deadline:
146 emails.notify_organiser(event, registration)
147
148 # Note that this doesn"t remove the values for the
149 # information fields that the user entered upon registering.
150 # But this is regarded as a feature, not a bug. Especially
151 # since the values will still appear in the backend.
152 registration.date_cancelled = timezone.now()
153 registration.save()
154 else:
155 raise RegistrationError(_("You are not registered for this event."))
156
157
158 def pay_with_tpay(member, event):
159 """
160 Add a Thalia Pay payment to an event registration
161
162 :param member: the user
163 :param event: the event
164 """
165 try:
166 registration = EventRegistration.objects.get(event=event, member=member)
167 except EventRegistration.DoesNotExist:
168 raise RegistrationError(_("You are not registered for this event."))
169
170 if registration.payment is None:
171 registration.payment = create_payment(
172 payable=registration, processed_by=member, pay_type=Payment.TPAY
173 )
174 registration.save()
175 else:
176 raise RegistrationError(_("You have already paid for this event."))
177
178
179 def update_registration(
180 member=None, event=None, name=None, registration=None, field_values=None
181 ):
182 """
183 Updates a user registration of an event
184
185 :param request: http request
186 :param member: the user
187 :param event: the event
188 :param name: the name of a registration not associated with a user
189 :param registration: the registration
190 :param field_values: values for the information fields
191 """
192 if not registration:
193 try:
194 registration = EventRegistration.objects.get(
195 event=event, member=member, name=name
196 )
197 except EventRegistration.DoesNotExist as error:
198 raise RegistrationError(
199 _("You are not registered for this event.")
200 ) from error
201 else:
202 member = registration.member
203 event = registration.event
204 name = registration.name
205
206 if (
207 not event_permissions(member, event, name)["update_registration"]
208 or not field_values
209 ):
210 return
211
212 for field_id, field_value in field_values:
213 field = RegistrationInformationField.objects.get(
214 id=field_id.replace("info_field_", "")
215 )
216
217 if (
218 field.type == RegistrationInformationField.INTEGER_FIELD
219 and field_value is None
220 ):
221 field_value = 0
222 elif (
223 field.type == RegistrationInformationField.BOOLEAN_FIELD
224 and field_value is None
225 ):
226 field_value = False
227 elif (
228 field.type == RegistrationInformationField.TEXT_FIELD
229 and field_value is None
230 ):
231 field_value = ""
232
233 field.set_value_for(registration, field_value)
234
235
236 def registration_fields(request, member=None, event=None, registration=None, name=None):
237 """
238 Returns information about the registration fields of a registration
239
240 :param member: the user (optional if registration provided)
241 :param name: the name of a non member registration
242 (optional if registration provided)
243 :param event: the event (optional if registration provided)
244 :param registration: the registration (optional if member & event provided)
245 :return: the fields
246 """
247
248 if registration is None:
249 try:
250 registration = EventRegistration.objects.get(
251 event=event, member=member, name=name
252 )
253 except EventRegistration.DoesNotExist as error:
254 raise RegistrationError(
255 _("You are not registered for this event.")
256 ) from error
257 except EventRegistration.MultipleObjectsReturned as error:
258 raise RegistrationError(
259 _("Unable to find the right registration.")
260 ) from error
261 else:
262 member = registration.member
263 event = registration.event
264 name = registration.name
265
266 perms = event_permissions(member, event, name)[
267 "update_registration"
268 ] or is_organiser(request.member, event)
269 if perms and registration:
270 information_fields = registration.information_fields
271 fields = OrderedDict()
272
273 for information_field in information_fields:
274 field = information_field["field"]
275
276 fields["info_field_{}".format(field.id)] = {
277 "type": field.type,
278 "label": getattr(field, "{}_{}".format("name", get_language())),
279 "description": getattr(
280 field, "{}_{}".format("description", get_language())
281 ),
282 "value": information_field["value"],
283 "required": field.required,
284 }
285
286 return fields
287 else:
288 raise RegistrationError(_("You are not allowed to update this registration."))
289
290
291 def update_registration_by_organiser(registration, member, data):
292 if not is_organiser(member, registration.event):
293 raise RegistrationError(_("You are not allowed to update this registration."))
294
295 if "payment" in data:
296 if (
297 data["payment"]["type"] == PaymentTypeField.NO_PAYMENT
298 and registration.payment is not None
299 ):
300 delete_payment(registration)
301 else:
302 registration.payment = create_payment(
303 payable=registration,
304 processed_by=member,
305 pay_type=data["payment"]["type"],
306 )
307
308 if "present" in data:
309 registration.present = data["present"]
310
311 registration.save()
312
313
314 def generate_category_statistics():
315 """
316 Generate statistics about events, number of events per category
317 :return: Dict with key, value resp. being category, event count.
318 """
319 year = datetime_to_lectureyear(timezone.now())
320
321 data = {}
322 for i in range(5):
323 year_start = date(year=year - i, month=9, day=1)
324 year_end = date(year=year - i + 1, month=9, day=1)
325 data[str(year - i)] = {
326 str(display): Event.objects.filter(
327 category=key, start__gte=year_start, end__lte=year_end
328 ).count()
329 for key, display in Event.EVENT_CATEGORIES
330 }
331
332 return data
333
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/website/events/services.py b/website/events/services.py
--- a/website/events/services.py
+++ b/website/events/services.py
@@ -293,11 +293,9 @@
raise RegistrationError(_("You are not allowed to update this registration."))
if "payment" in data:
- if (
- data["payment"]["type"] == PaymentTypeField.NO_PAYMENT
- and registration.payment is not None
- ):
- delete_payment(registration)
+ if data["payment"]["type"] == PaymentTypeField.NO_PAYMENT:
+ if registration.payment is not None:
+ delete_payment(registration)
else:
registration.payment = create_payment(
payable=registration,
|
{"golden_diff": "diff --git a/website/events/services.py b/website/events/services.py\n--- a/website/events/services.py\n+++ b/website/events/services.py\n@@ -293,11 +293,9 @@\n raise RegistrationError(_(\"You are not allowed to update this registration.\"))\n \n if \"payment\" in data:\n- if (\n- data[\"payment\"][\"type\"] == PaymentTypeField.NO_PAYMENT\n- and registration.payment is not None\n- ):\n- delete_payment(registration)\n+ if data[\"payment\"][\"type\"] == PaymentTypeField.NO_PAYMENT:\n+ if registration.payment is not None:\n+ delete_payment(registration)\n else:\n registration.payment = create_payment(\n payable=registration,\n", "issue": "Events presence registration in app creates payment\n### Describe the bug\r\n\r\nEvents presence registration in app creates payment\r\n\r\n### How to reproduce\r\nSteps to reproduce the behaviour:\r\n1. Create an event with registrations\r\n2. Register\r\n3. Mark the registration as present _in the app_\r\nYou can simulate this by going to the registration using the API on `/v1/api/registrations/` and patching the body:\r\n```\r\n{\r\n \"payment\": \"no_payment\",\r\n \"present\": true\r\n}\r\n```\r\n4. Note that a payment with the status 'no payment' is created for this registration\r\n\r\n### Expected behaviour\r\n\r\nNo payment should be created\r\n\r\n\n", "before_files": [{"content": "from collections import OrderedDict\n\nfrom django.utils import timezone\nfrom django.utils.datetime_safe import date\nfrom django.utils.translation import gettext_lazy as _, get_language\n\nfrom events import emails\nfrom events.exceptions import RegistrationError\nfrom events.models import EventRegistration, RegistrationInformationField, Event\nfrom payments.api.fields import PaymentTypeField\nfrom payments.models import Payment\nfrom payments.services import create_payment, delete_payment\nfrom utils.snippets import datetime_to_lectureyear\n\n\ndef is_user_registered(member, event):\n \"\"\"\n Returns if the user is registered for the specified event\n\n :param member: the user\n :param event: the event\n :return: None if registration is not required or no member else True/False\n \"\"\"\n if not event.registration_required or not member.is_authenticated:\n return None\n\n return event.registrations.filter(member=member, date_cancelled=None).count() > 0\n\n\ndef event_permissions(member, event, name=None):\n \"\"\"\n Returns a dictionary with the available event permissions of the user\n\n :param member: the user\n :param event: the event\n :param name: the name of a non member registration\n :return: the permission dictionary\n \"\"\"\n perms = {\n \"create_registration\": False,\n \"cancel_registration\": False,\n \"update_registration\": False,\n }\n if not member:\n return perms\n if not (member.is_authenticated or name):\n return perms\n\n registration = None\n try:\n registration = EventRegistration.objects.get(\n event=event, member=member, name=name\n )\n except EventRegistration.DoesNotExist:\n pass\n\n perms[\"create_registration\"] = (\n (registration is None or registration.date_cancelled is not None)\n and event.registration_allowed\n and (name or member.can_attend_events)\n )\n perms[\"cancel_registration\"] = (\n registration is not None\n and registration.date_cancelled is None\n and (event.cancellation_allowed or name)\n )\n perms[\"update_registration\"] = (\n registration is not None\n and registration.date_cancelled is None\n and event.has_fields()\n and event.registration_allowed\n and (name or member.can_attend_events)\n )\n return perms\n\n\ndef is_organiser(member, event):\n if member and member.is_authenticated:\n if member.is_superuser or member.has_perm(\"events.override_organiser\"):\n return True\n\n if event:\n return member.get_member_groups().filter(pk=event.organiser.pk).count() != 0\n\n return False\n\n\ndef create_registration(member, event):\n \"\"\"\n Creates a new user registration for an event\n\n :param member: the user\n :param event: the event\n :return: returns the registration if successful\n \"\"\"\n if event_permissions(member, event)[\"create_registration\"]:\n registration = None\n try:\n registration = EventRegistration.objects.get(event=event, member=member)\n except EventRegistration.DoesNotExist:\n pass\n\n if registration is None:\n return EventRegistration.objects.create(event=event, member=member)\n elif registration.date_cancelled is not None:\n if registration.is_late_cancellation():\n raise RegistrationError(\n _(\n \"You cannot re-register anymore \"\n \"since you've cancelled after the \"\n \"deadline.\"\n )\n )\n else:\n registration.date = timezone.now()\n registration.date_cancelled = None\n registration.save()\n\n return registration\n elif event_permissions(member, event)[\"cancel_registration\"]:\n raise RegistrationError(_(\"You were already registered.\"))\n else:\n raise RegistrationError(_(\"You may not register.\"))\n\n\ndef cancel_registration(member, event):\n \"\"\"\n Cancel a user registration for an event\n\n :param member: the user\n :param event: the event\n \"\"\"\n registration = None\n try:\n registration = EventRegistration.objects.get(event=event, member=member)\n except EventRegistration.DoesNotExist:\n pass\n\n if event_permissions(member, event)[\"cancel_registration\"] and registration:\n if registration.payment is not None:\n delete_payment(registration)\n if registration.queue_position == 0:\n emails.notify_first_waiting(event)\n\n if event.send_cancel_email and event.after_cancel_deadline:\n emails.notify_organiser(event, registration)\n\n # Note that this doesn\"t remove the values for the\n # information fields that the user entered upon registering.\n # But this is regarded as a feature, not a bug. Especially\n # since the values will still appear in the backend.\n registration.date_cancelled = timezone.now()\n registration.save()\n else:\n raise RegistrationError(_(\"You are not registered for this event.\"))\n\n\ndef pay_with_tpay(member, event):\n \"\"\"\n Add a Thalia Pay payment to an event registration\n\n :param member: the user\n :param event: the event\n \"\"\"\n try:\n registration = EventRegistration.objects.get(event=event, member=member)\n except EventRegistration.DoesNotExist:\n raise RegistrationError(_(\"You are not registered for this event.\"))\n\n if registration.payment is None:\n registration.payment = create_payment(\n payable=registration, processed_by=member, pay_type=Payment.TPAY\n )\n registration.save()\n else:\n raise RegistrationError(_(\"You have already paid for this event.\"))\n\n\ndef update_registration(\n member=None, event=None, name=None, registration=None, field_values=None\n):\n \"\"\"\n Updates a user registration of an event\n\n :param request: http request\n :param member: the user\n :param event: the event\n :param name: the name of a registration not associated with a user\n :param registration: the registration\n :param field_values: values for the information fields\n \"\"\"\n if not registration:\n try:\n registration = EventRegistration.objects.get(\n event=event, member=member, name=name\n )\n except EventRegistration.DoesNotExist as error:\n raise RegistrationError(\n _(\"You are not registered for this event.\")\n ) from error\n else:\n member = registration.member\n event = registration.event\n name = registration.name\n\n if (\n not event_permissions(member, event, name)[\"update_registration\"]\n or not field_values\n ):\n return\n\n for field_id, field_value in field_values:\n field = RegistrationInformationField.objects.get(\n id=field_id.replace(\"info_field_\", \"\")\n )\n\n if (\n field.type == RegistrationInformationField.INTEGER_FIELD\n and field_value is None\n ):\n field_value = 0\n elif (\n field.type == RegistrationInformationField.BOOLEAN_FIELD\n and field_value is None\n ):\n field_value = False\n elif (\n field.type == RegistrationInformationField.TEXT_FIELD\n and field_value is None\n ):\n field_value = \"\"\n\n field.set_value_for(registration, field_value)\n\n\ndef registration_fields(request, member=None, event=None, registration=None, name=None):\n \"\"\"\n Returns information about the registration fields of a registration\n\n :param member: the user (optional if registration provided)\n :param name: the name of a non member registration\n (optional if registration provided)\n :param event: the event (optional if registration provided)\n :param registration: the registration (optional if member & event provided)\n :return: the fields\n \"\"\"\n\n if registration is None:\n try:\n registration = EventRegistration.objects.get(\n event=event, member=member, name=name\n )\n except EventRegistration.DoesNotExist as error:\n raise RegistrationError(\n _(\"You are not registered for this event.\")\n ) from error\n except EventRegistration.MultipleObjectsReturned as error:\n raise RegistrationError(\n _(\"Unable to find the right registration.\")\n ) from error\n else:\n member = registration.member\n event = registration.event\n name = registration.name\n\n perms = event_permissions(member, event, name)[\n \"update_registration\"\n ] or is_organiser(request.member, event)\n if perms and registration:\n information_fields = registration.information_fields\n fields = OrderedDict()\n\n for information_field in information_fields:\n field = information_field[\"field\"]\n\n fields[\"info_field_{}\".format(field.id)] = {\n \"type\": field.type,\n \"label\": getattr(field, \"{}_{}\".format(\"name\", get_language())),\n \"description\": getattr(\n field, \"{}_{}\".format(\"description\", get_language())\n ),\n \"value\": information_field[\"value\"],\n \"required\": field.required,\n }\n\n return fields\n else:\n raise RegistrationError(_(\"You are not allowed to update this registration.\"))\n\n\ndef update_registration_by_organiser(registration, member, data):\n if not is_organiser(member, registration.event):\n raise RegistrationError(_(\"You are not allowed to update this registration.\"))\n\n if \"payment\" in data:\n if (\n data[\"payment\"][\"type\"] == PaymentTypeField.NO_PAYMENT\n and registration.payment is not None\n ):\n delete_payment(registration)\n else:\n registration.payment = create_payment(\n payable=registration,\n processed_by=member,\n pay_type=data[\"payment\"][\"type\"],\n )\n\n if \"present\" in data:\n registration.present = data[\"present\"]\n\n registration.save()\n\n\ndef generate_category_statistics():\n \"\"\"\n Generate statistics about events, number of events per category\n :return: Dict with key, value resp. being category, event count.\n \"\"\"\n year = datetime_to_lectureyear(timezone.now())\n\n data = {}\n for i in range(5):\n year_start = date(year=year - i, month=9, day=1)\n year_end = date(year=year - i + 1, month=9, day=1)\n data[str(year - i)] = {\n str(display): Event.objects.filter(\n category=key, start__gte=year_start, end__lte=year_end\n ).count()\n for key, display in Event.EVENT_CATEGORIES\n }\n\n return data\n", "path": "website/events/services.py"}], "after_files": [{"content": "from collections import OrderedDict\n\nfrom django.utils import timezone\nfrom django.utils.datetime_safe import date\nfrom django.utils.translation import gettext_lazy as _, get_language\n\nfrom events import emails\nfrom events.exceptions import RegistrationError\nfrom events.models import EventRegistration, RegistrationInformationField, Event\nfrom payments.api.fields import PaymentTypeField\nfrom payments.models import Payment\nfrom payments.services import create_payment, delete_payment\nfrom utils.snippets import datetime_to_lectureyear\n\n\ndef is_user_registered(member, event):\n \"\"\"\n Returns if the user is registered for the specified event\n\n :param member: the user\n :param event: the event\n :return: None if registration is not required or no member else True/False\n \"\"\"\n if not event.registration_required or not member.is_authenticated:\n return None\n\n return event.registrations.filter(member=member, date_cancelled=None).count() > 0\n\n\ndef event_permissions(member, event, name=None):\n \"\"\"\n Returns a dictionary with the available event permissions of the user\n\n :param member: the user\n :param event: the event\n :param name: the name of a non member registration\n :return: the permission dictionary\n \"\"\"\n perms = {\n \"create_registration\": False,\n \"cancel_registration\": False,\n \"update_registration\": False,\n }\n if not member:\n return perms\n if not (member.is_authenticated or name):\n return perms\n\n registration = None\n try:\n registration = EventRegistration.objects.get(\n event=event, member=member, name=name\n )\n except EventRegistration.DoesNotExist:\n pass\n\n perms[\"create_registration\"] = (\n (registration is None or registration.date_cancelled is not None)\n and event.registration_allowed\n and (name or member.can_attend_events)\n )\n perms[\"cancel_registration\"] = (\n registration is not None\n and registration.date_cancelled is None\n and (event.cancellation_allowed or name)\n )\n perms[\"update_registration\"] = (\n registration is not None\n and registration.date_cancelled is None\n and event.has_fields()\n and event.registration_allowed\n and (name or member.can_attend_events)\n )\n return perms\n\n\ndef is_organiser(member, event):\n if member and member.is_authenticated:\n if member.is_superuser or member.has_perm(\"events.override_organiser\"):\n return True\n\n if event:\n return member.get_member_groups().filter(pk=event.organiser.pk).count() != 0\n\n return False\n\n\ndef create_registration(member, event):\n \"\"\"\n Creates a new user registration for an event\n\n :param member: the user\n :param event: the event\n :return: returns the registration if successful\n \"\"\"\n if event_permissions(member, event)[\"create_registration\"]:\n registration = None\n try:\n registration = EventRegistration.objects.get(event=event, member=member)\n except EventRegistration.DoesNotExist:\n pass\n\n if registration is None:\n return EventRegistration.objects.create(event=event, member=member)\n elif registration.date_cancelled is not None:\n if registration.is_late_cancellation():\n raise RegistrationError(\n _(\n \"You cannot re-register anymore \"\n \"since you've cancelled after the \"\n \"deadline.\"\n )\n )\n else:\n registration.date = timezone.now()\n registration.date_cancelled = None\n registration.save()\n\n return registration\n elif event_permissions(member, event)[\"cancel_registration\"]:\n raise RegistrationError(_(\"You were already registered.\"))\n else:\n raise RegistrationError(_(\"You may not register.\"))\n\n\ndef cancel_registration(member, event):\n \"\"\"\n Cancel a user registration for an event\n\n :param member: the user\n :param event: the event\n \"\"\"\n registration = None\n try:\n registration = EventRegistration.objects.get(event=event, member=member)\n except EventRegistration.DoesNotExist:\n pass\n\n if event_permissions(member, event)[\"cancel_registration\"] and registration:\n if registration.payment is not None:\n delete_payment(registration)\n if registration.queue_position == 0:\n emails.notify_first_waiting(event)\n\n if event.send_cancel_email and event.after_cancel_deadline:\n emails.notify_organiser(event, registration)\n\n # Note that this doesn\"t remove the values for the\n # information fields that the user entered upon registering.\n # But this is regarded as a feature, not a bug. Especially\n # since the values will still appear in the backend.\n registration.date_cancelled = timezone.now()\n registration.save()\n else:\n raise RegistrationError(_(\"You are not registered for this event.\"))\n\n\ndef pay_with_tpay(member, event):\n \"\"\"\n Add a Thalia Pay payment to an event registration\n\n :param member: the user\n :param event: the event\n \"\"\"\n try:\n registration = EventRegistration.objects.get(event=event, member=member)\n except EventRegistration.DoesNotExist:\n raise RegistrationError(_(\"You are not registered for this event.\"))\n\n if registration.payment is None:\n registration.payment = create_payment(\n payable=registration, processed_by=member, pay_type=Payment.TPAY\n )\n registration.save()\n else:\n raise RegistrationError(_(\"You have already paid for this event.\"))\n\n\ndef update_registration(\n member=None, event=None, name=None, registration=None, field_values=None\n):\n \"\"\"\n Updates a user registration of an event\n\n :param request: http request\n :param member: the user\n :param event: the event\n :param name: the name of a registration not associated with a user\n :param registration: the registration\n :param field_values: values for the information fields\n \"\"\"\n if not registration:\n try:\n registration = EventRegistration.objects.get(\n event=event, member=member, name=name\n )\n except EventRegistration.DoesNotExist as error:\n raise RegistrationError(\n _(\"You are not registered for this event.\")\n ) from error\n else:\n member = registration.member\n event = registration.event\n name = registration.name\n\n if (\n not event_permissions(member, event, name)[\"update_registration\"]\n or not field_values\n ):\n return\n\n for field_id, field_value in field_values:\n field = RegistrationInformationField.objects.get(\n id=field_id.replace(\"info_field_\", \"\")\n )\n\n if (\n field.type == RegistrationInformationField.INTEGER_FIELD\n and field_value is None\n ):\n field_value = 0\n elif (\n field.type == RegistrationInformationField.BOOLEAN_FIELD\n and field_value is None\n ):\n field_value = False\n elif (\n field.type == RegistrationInformationField.TEXT_FIELD\n and field_value is None\n ):\n field_value = \"\"\n\n field.set_value_for(registration, field_value)\n\n\ndef registration_fields(request, member=None, event=None, registration=None, name=None):\n \"\"\"\n Returns information about the registration fields of a registration\n\n :param member: the user (optional if registration provided)\n :param name: the name of a non member registration\n (optional if registration provided)\n :param event: the event (optional if registration provided)\n :param registration: the registration (optional if member & event provided)\n :return: the fields\n \"\"\"\n\n if registration is None:\n try:\n registration = EventRegistration.objects.get(\n event=event, member=member, name=name\n )\n except EventRegistration.DoesNotExist as error:\n raise RegistrationError(\n _(\"You are not registered for this event.\")\n ) from error\n except EventRegistration.MultipleObjectsReturned as error:\n raise RegistrationError(\n _(\"Unable to find the right registration.\")\n ) from error\n else:\n member = registration.member\n event = registration.event\n name = registration.name\n\n perms = event_permissions(member, event, name)[\n \"update_registration\"\n ] or is_organiser(request.member, event)\n if perms and registration:\n information_fields = registration.information_fields\n fields = OrderedDict()\n\n for information_field in information_fields:\n field = information_field[\"field\"]\n\n fields[\"info_field_{}\".format(field.id)] = {\n \"type\": field.type,\n \"label\": getattr(field, \"{}_{}\".format(\"name\", get_language())),\n \"description\": getattr(\n field, \"{}_{}\".format(\"description\", get_language())\n ),\n \"value\": information_field[\"value\"],\n \"required\": field.required,\n }\n\n return fields\n else:\n raise RegistrationError(_(\"You are not allowed to update this registration.\"))\n\n\ndef update_registration_by_organiser(registration, member, data):\n if not is_organiser(member, registration.event):\n raise RegistrationError(_(\"You are not allowed to update this registration.\"))\n\n if \"payment\" in data:\n if data[\"payment\"][\"type\"] == PaymentTypeField.NO_PAYMENT:\n if registration.payment is not None:\n delete_payment(registration)\n else:\n registration.payment = create_payment(\n payable=registration,\n processed_by=member,\n pay_type=data[\"payment\"][\"type\"],\n )\n\n if \"present\" in data:\n registration.present = data[\"present\"]\n\n registration.save()\n\n\ndef generate_category_statistics():\n \"\"\"\n Generate statistics about events, number of events per category\n :return: Dict with key, value resp. being category, event count.\n \"\"\"\n year = datetime_to_lectureyear(timezone.now())\n\n data = {}\n for i in range(5):\n year_start = date(year=year - i, month=9, day=1)\n year_end = date(year=year - i + 1, month=9, day=1)\n data[str(year - i)] = {\n str(display): Event.objects.filter(\n category=key, start__gte=year_start, end__lte=year_end\n ).count()\n for key, display in Event.EVENT_CATEGORIES\n }\n\n return data\n", "path": "website/events/services.py"}]}
| 3,436 | 156 |
gh_patches_debug_8025
|
rasdani/github-patches
|
git_diff
|
cleanlab__cleanlab-477
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
fix return type of get_label_quality_scores in token classification
The function only returns a `Tuple[np.ndarray, list]`, but it is annotated with:
https://github.com/cleanlab/cleanlab/blob/fad4eb266dee8b9e2925d3f0d74fe4a81939eb8a/cleanlab/token_classification/rank.py#L36
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cleanlab/token_classification/rank.py`
Content:
```
1 # Copyright (C) 2017-2022 Cleanlab Inc.
2 # This file is part of cleanlab.
3 #
4 # cleanlab is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU Affero General Public License as published
6 # by the Free Software Foundation, either version 3 of the License, or
7 # (at your option) any later version.
8 #
9 # cleanlab is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU Affero General Public License for more details.
13 #
14 # You should have received a copy of the GNU Affero General Public License
15 # along with cleanlab. If not, see <https://www.gnu.org/licenses/>.
16
17 """
18 Methods to rank and score sentences in a token classification dataset (text data), based on how likely they are to contain label errors.
19 """
20
21 import pandas as pd
22 import numpy as np
23 from typing import List, Optional, Union, Tuple
24
25 from cleanlab.rank import get_label_quality_scores as main_get_label_quality_scores
26
27
28 def get_label_quality_scores(
29 labels: list,
30 pred_probs: list,
31 *,
32 tokens: Optional[list] = None,
33 token_score_method: str = "self_confidence",
34 sentence_score_method: str = "min",
35 sentence_score_kwargs: dict = {},
36 ) -> Union[np.ndarray, Tuple[np.ndarray, list]]:
37 """
38 Returns overall quality scores for the labels in each sentence, as well as for the individual tokens' labels in a token classification dataset.
39
40 Each score is between 0 and 1.
41
42 Lower scores indicate token labels that are less likely to be correct, or sentences that are more likely to contain a mislabeled token.
43
44 Parameters
45 ----------
46 labels:
47 Nested list of given labels for all tokens, such that `labels[i]` is a list of labels, one for each token in the `i`-th sentence.
48
49 For a dataset with K classes, each label must be in 0, 1, ..., K-1.
50
51 pred_probs:
52 List of np arrays, such that `pred_probs[i]` has shape ``(T, K)`` if the `i`-th sentence contains T tokens.
53
54 Each row of `pred_probs[i]` corresponds to a token `t` in the `i`-th sentence,
55 and contains model-predicted probabilities that `t` belongs to each of the K possible classes.
56
57 Columns of each `pred_probs[i]` should be ordered such that the probabilities correspond to class 0, 1, ..., K-1.
58
59 tokens:
60 Nested list such that `tokens[i]` is a list of tokens (strings/words) that comprise the `i`-th sentence.
61
62 These strings are used to annotated the returned `token_scores` object, see its documentation for more information.
63
64 sentence_score_method: {"min", "softmin"}, default="min"
65 Method to aggregate individual token label quality scores into a single score for the sentence.
66
67 - `min`: sentence score = minimum of token scores in the sentence
68 - `softmin`: sentence score = ``<s, softmax(1-s, t)>``, where `s` denotes the token label scores of the sentence, and ``<a, b> == np.dot(a, b)``.
69 Here parameter `t` controls the softmax temperature, such that the score converges toward `min` as ``t -> 0``.
70 Unlike `min`, `softmin` is affected by the scores of all tokens in the sentence.
71
72 token_score_method: {"self_confidence", "normalized_margin", "confidence_weighted_entropy"}, default="self_confidence"
73 Label quality scoring method for each token.
74
75 See :py:func:`cleanlab.rank.get_label_quality_scores <cleanlab.rank.get_label_quality_scores>` documentation for more info.
76
77 sentence_score_kwargs:
78 Optional keyword arguments for `sentence_score_method` function (for advanced users only).
79
80 See `cleanlab.token_classification.rank._softmin_sentence_score` for more info about keyword arguments supported for that scoring method.
81
82 Returns
83 -------
84 sentence_scores:
85 Array of shape ``(N, )`` of scores between 0 and 1, one per sentence in the dataset.
86
87 Lower scores indicate sentences more likely to contain a label issue.
88
89 token_scores:
90 List of ``pd.Series``, such that `token_info[i]` contains the
91 label quality scores for individual tokens in the `i`-th sentence.
92
93 If `tokens` strings were provided, they are used as index for each ``Series``.
94
95 Examples
96 --------
97 >>> import numpy as np
98 >>> from cleanlab.token_classification.rank import get_label_quality_scores
99 >>> labels = [[0, 0, 1], [0, 1]]
100 >>> pred_probs = [
101 ... np.array([[0.9, 0.1], [0.7, 0.3], [0.05, 0.95]]),
102 ... np.array([[0.8, 0.2], [0.8, 0.2]]),
103 ... ]
104 >>> sentence_scores, token_scores = get_label_quality_scores(labels, pred_probs)
105 >>> sentence_scores
106 array([0.7, 0.2])
107 >>> token_scores
108 [0 0.90
109 1 0.70
110 2 0.95
111 dtype: float64, 0 0.8
112 1 0.2
113 dtype: float64]
114 """
115 methods = ["min", "softmin"]
116 assert sentence_score_method in methods, "Select from the following methods:\n%s" % "\n".join(
117 methods
118 )
119
120 labels_flatten = np.array([l for label in labels for l in label])
121 pred_probs_flatten = np.array([p for pred_prob in pred_probs for p in pred_prob])
122
123 sentence_length = [len(label) for label in labels]
124
125 def nested_list(x, sentence_length):
126 i = iter(x)
127 return [[next(i) for _ in range(length)] for length in sentence_length]
128
129 token_scores = main_get_label_quality_scores(
130 labels=labels_flatten, pred_probs=pred_probs_flatten, method=token_score_method
131 )
132 scores_nl = nested_list(token_scores, sentence_length)
133
134 if sentence_score_method == "min":
135 sentence_scores = np.array(list(map(np.min, scores_nl)))
136 else:
137 assert sentence_score_method == "softmin"
138 temperature = sentence_score_kwargs.get("temperature", 0.05)
139 sentence_scores = _softmin_sentence_score(scores_nl, temperature=temperature)
140
141 if tokens:
142 token_info = [pd.Series(scores, index=token) for scores, token in zip(scores_nl, tokens)]
143 else:
144 token_info = [pd.Series(scores) for scores in scores_nl]
145 return sentence_scores, token_info
146
147
148 def issues_from_scores(
149 sentence_scores: np.ndarray, *, token_scores: Optional[list] = None, threshold: float = 0.1
150 ) -> Union[list, np.ndarray]:
151 """
152 Converts scores output by :py:func:`token_classification.rank.get_label_quality_scores <cleanlab.token_classification.rank.get_label_quality_scores>`
153 to a list of issues of similar format as output by :py:func:`token_classification.filter.find_label_issues <cleanlab.token_classification.filter.find_label_issues>`.
154
155 Only considers as issues those tokens with label quality score lower than `threshold`.
156
157 Issues are sorted by label quality score, from most severe to least.
158
159 Parameters
160 ----------
161 sentence_scores:
162 Array of shape `(N, )` of overall sentence scores, where `N` is the number of sentences in the dataset.
163
164 Same format as the `sentence_scores` returned by :py:func:`token_classification.rank.get_label_quality_scores <cleanlab.token_classification.rank.get_label_quality_scores>`.
165
166 token_scores:
167 Optional list such that `token_scores[i]` contains the individual token scores for the `i`-th sentence.
168
169 Same format as the `token_scores` returned by :py:func:`token_classification.rank.get_label_quality_scores <cleanlab.token_classification.rank.get_label_quality_scores>`.
170
171 threshold:
172 Tokens (or sentences, if `token_scores` is not provided) with quality scores above the `threshold` are not
173 included in the result.
174
175 Returns
176 ---------
177 issues:
178 List of label issues identified by comparing quality scores to threshold, such that each element is a tuple ``(i, j)``, which
179 indicates that the `j`-th token of the `i`-th sentence has a label issue.
180
181 These tuples are ordered in `issues` list based on the token label quality score.
182
183 Use :py:func:`token_classification.summary.display_issues <cleanlab.token_classification.summary.display_issues>`
184 to view these issues within the original sentences.
185
186 If `token_scores` is not provided, returns array of integer indices (rather than tuples) of the sentences whose label quality score
187 falls below the `threshold` (also sorted by overall label quality score of each sentence).
188
189 Examples
190 --------
191 >>> import numpy as np
192 >>> from cleanlab.token_classification.rank import issues_from_scores
193 >>> sentence_scores = np.array([0.1, 0.3, 0.6, 0.2, 0.05, 0.9, 0.8, 0.0125, 0.5, 0.6])
194 >>> issues_from_scores(sentence_scores)
195 array([7, 4])
196
197 Changing the score threshold
198
199 >>> issues_from_scores(sentence_scores, threshold=0.5)
200 array([7, 4, 0, 3, 1])
201
202 Providing token scores along with sentence scores finds issues at the token level
203
204 >>> token_scores = [
205 ... [0.9, 0.6],
206 ... [0.0, 0.8, 0.8],
207 ... [0.8, 0.8],
208 ... [0.1, 0.02, 0.3, 0.4],
209 ... [0.1, 0.2, 0.03, 0.4],
210 ... [0.1, 0.2, 0.3, 0.04],
211 ... [0.1, 0.2, 0.4],
212 ... [0.3, 0.4],
213 ... [0.08, 0.2, 0.5, 0.4],
214 ... [0.1, 0.2, 0.3, 0.4],
215 ... ]
216 >>> issues_from_scores(sentence_scores, token_scores=token_scores)
217 [(1, 0), (3, 1), (4, 2), (5, 3), (8, 0)]
218 """
219 if token_scores:
220 issues_with_scores = []
221 for sentence_index, scores in enumerate(token_scores):
222 for token_index, score in enumerate(scores):
223 if score < threshold:
224 issues_with_scores.append((sentence_index, token_index, score))
225
226 issues_with_scores = sorted(issues_with_scores, key=lambda x: x[2])
227 issues = [(i, j) for i, j, _ in issues_with_scores]
228 return issues
229
230 else:
231 ranking = np.argsort(sentence_scores)
232 cutoff = 0
233 while sentence_scores[ranking[cutoff]] < threshold and cutoff < len(ranking):
234 cutoff += 1
235 return ranking[:cutoff]
236
237
238 def _softmin_sentence_score(
239 token_scores: List[np.ndarray], *, temperature: float = 0.05
240 ) -> np.ndarray:
241 """
242 Sentence overall label quality scoring using the "softmin" method.
243
244 Parameters
245 ----------
246 token_scores:
247 Per-token label quality scores in nested list format,
248 where `token_scores[i]` is a list of scores for each toke in the i'th sentence.
249
250 temperature:
251 Temperature of the softmax function.
252
253 Lower values encourage this method to converge toward the label quality score of the token with the lowest quality label in the sentence.
254
255 Higher values encourage this method to converge toward the average label quality score of all tokens in the sentence.
256
257 Returns
258 ---------
259 sentence_scores:
260 Array of shape ``(N, )``, where N is the number of sentences in the dataset, with one overall label quality score for each sentence.
261
262 Examples
263 ---------
264 >>> from cleanlab.token_classification.rank import _softmin_sentence_score
265 >>> token_scores = [[0.9, 0.6], [0.0, 0.8, 0.8], [0.8]]
266 >>> _softmin_sentence_score(token_scores)
267 array([6.00741787e-01, 1.80056239e-07, 8.00000000e-01])
268 """
269 if temperature == 0:
270 return np.array([np.min(scores) for scores in token_scores])
271
272 if temperature == np.inf:
273 return np.array([np.mean(scores) for scores in token_scores])
274
275 def softmax(scores: np.ndarray) -> np.ndarray:
276 exp_scores = np.exp(scores / temperature)
277 return exp_scores / np.sum(exp_scores)
278
279 def fun(scores: np.ndarray) -> float:
280 return np.dot(scores, softmax(1 - np.array(scores)))
281
282 sentence_scores = list(map(fun, token_scores))
283 return np.array(sentence_scores)
284
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/cleanlab/token_classification/rank.py b/cleanlab/token_classification/rank.py
--- a/cleanlab/token_classification/rank.py
+++ b/cleanlab/token_classification/rank.py
@@ -33,7 +33,7 @@
token_score_method: str = "self_confidence",
sentence_score_method: str = "min",
sentence_score_kwargs: dict = {},
-) -> Union[np.ndarray, Tuple[np.ndarray, list]]:
+) -> Tuple[np.ndarray, list]:
"""
Returns overall quality scores for the labels in each sentence, as well as for the individual tokens' labels in a token classification dataset.
|
{"golden_diff": "diff --git a/cleanlab/token_classification/rank.py b/cleanlab/token_classification/rank.py\n--- a/cleanlab/token_classification/rank.py\n+++ b/cleanlab/token_classification/rank.py\n@@ -33,7 +33,7 @@\n token_score_method: str = \"self_confidence\",\n sentence_score_method: str = \"min\",\n sentence_score_kwargs: dict = {},\n-) -> Union[np.ndarray, Tuple[np.ndarray, list]]:\n+) -> Tuple[np.ndarray, list]:\n \"\"\"\n Returns overall quality scores for the labels in each sentence, as well as for the individual tokens' labels in a token classification dataset.\n", "issue": "fix return type of get_label_quality_scores in token classification\nThe function only returns a `Tuple[np.ndarray, list]`, but it is annotated with:\r\n\r\nhttps://github.com/cleanlab/cleanlab/blob/fad4eb266dee8b9e2925d3f0d74fe4a81939eb8a/cleanlab/token_classification/rank.py#L36\n", "before_files": [{"content": "# Copyright (C) 2017-2022 Cleanlab Inc.\n# This file is part of cleanlab.\n#\n# cleanlab is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published\n# by the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# cleanlab is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with cleanlab. If not, see <https://www.gnu.org/licenses/>.\n\n\"\"\"\nMethods to rank and score sentences in a token classification dataset (text data), based on how likely they are to contain label errors.\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom typing import List, Optional, Union, Tuple\n\nfrom cleanlab.rank import get_label_quality_scores as main_get_label_quality_scores\n\n\ndef get_label_quality_scores(\n labels: list,\n pred_probs: list,\n *,\n tokens: Optional[list] = None,\n token_score_method: str = \"self_confidence\",\n sentence_score_method: str = \"min\",\n sentence_score_kwargs: dict = {},\n) -> Union[np.ndarray, Tuple[np.ndarray, list]]:\n \"\"\"\n Returns overall quality scores for the labels in each sentence, as well as for the individual tokens' labels in a token classification dataset.\n\n Each score is between 0 and 1.\n\n Lower scores indicate token labels that are less likely to be correct, or sentences that are more likely to contain a mislabeled token.\n\n Parameters\n ----------\n labels:\n Nested list of given labels for all tokens, such that `labels[i]` is a list of labels, one for each token in the `i`-th sentence.\n\n For a dataset with K classes, each label must be in 0, 1, ..., K-1.\n\n pred_probs:\n List of np arrays, such that `pred_probs[i]` has shape ``(T, K)`` if the `i`-th sentence contains T tokens.\n\n Each row of `pred_probs[i]` corresponds to a token `t` in the `i`-th sentence,\n and contains model-predicted probabilities that `t` belongs to each of the K possible classes.\n\n Columns of each `pred_probs[i]` should be ordered such that the probabilities correspond to class 0, 1, ..., K-1.\n\n tokens:\n Nested list such that `tokens[i]` is a list of tokens (strings/words) that comprise the `i`-th sentence.\n\n These strings are used to annotated the returned `token_scores` object, see its documentation for more information.\n\n sentence_score_method: {\"min\", \"softmin\"}, default=\"min\"\n Method to aggregate individual token label quality scores into a single score for the sentence.\n\n - `min`: sentence score = minimum of token scores in the sentence\n - `softmin`: sentence score = ``<s, softmax(1-s, t)>``, where `s` denotes the token label scores of the sentence, and ``<a, b> == np.dot(a, b)``.\n Here parameter `t` controls the softmax temperature, such that the score converges toward `min` as ``t -> 0``.\n Unlike `min`, `softmin` is affected by the scores of all tokens in the sentence.\n\n token_score_method: {\"self_confidence\", \"normalized_margin\", \"confidence_weighted_entropy\"}, default=\"self_confidence\"\n Label quality scoring method for each token.\n\n See :py:func:`cleanlab.rank.get_label_quality_scores <cleanlab.rank.get_label_quality_scores>` documentation for more info.\n\n sentence_score_kwargs:\n Optional keyword arguments for `sentence_score_method` function (for advanced users only).\n\n See `cleanlab.token_classification.rank._softmin_sentence_score` for more info about keyword arguments supported for that scoring method.\n\n Returns\n -------\n sentence_scores:\n Array of shape ``(N, )`` of scores between 0 and 1, one per sentence in the dataset.\n\n Lower scores indicate sentences more likely to contain a label issue.\n\n token_scores:\n List of ``pd.Series``, such that `token_info[i]` contains the\n label quality scores for individual tokens in the `i`-th sentence.\n\n If `tokens` strings were provided, they are used as index for each ``Series``.\n\n Examples\n --------\n >>> import numpy as np\n >>> from cleanlab.token_classification.rank import get_label_quality_scores\n >>> labels = [[0, 0, 1], [0, 1]]\n >>> pred_probs = [\n ... np.array([[0.9, 0.1], [0.7, 0.3], [0.05, 0.95]]),\n ... np.array([[0.8, 0.2], [0.8, 0.2]]),\n ... ]\n >>> sentence_scores, token_scores = get_label_quality_scores(labels, pred_probs)\n >>> sentence_scores\n array([0.7, 0.2])\n >>> token_scores\n [0 0.90\n 1 0.70\n 2 0.95\n dtype: float64, 0 0.8\n 1 0.2\n dtype: float64]\n \"\"\"\n methods = [\"min\", \"softmin\"]\n assert sentence_score_method in methods, \"Select from the following methods:\\n%s\" % \"\\n\".join(\n methods\n )\n\n labels_flatten = np.array([l for label in labels for l in label])\n pred_probs_flatten = np.array([p for pred_prob in pred_probs for p in pred_prob])\n\n sentence_length = [len(label) for label in labels]\n\n def nested_list(x, sentence_length):\n i = iter(x)\n return [[next(i) for _ in range(length)] for length in sentence_length]\n\n token_scores = main_get_label_quality_scores(\n labels=labels_flatten, pred_probs=pred_probs_flatten, method=token_score_method\n )\n scores_nl = nested_list(token_scores, sentence_length)\n\n if sentence_score_method == \"min\":\n sentence_scores = np.array(list(map(np.min, scores_nl)))\n else:\n assert sentence_score_method == \"softmin\"\n temperature = sentence_score_kwargs.get(\"temperature\", 0.05)\n sentence_scores = _softmin_sentence_score(scores_nl, temperature=temperature)\n\n if tokens:\n token_info = [pd.Series(scores, index=token) for scores, token in zip(scores_nl, tokens)]\n else:\n token_info = [pd.Series(scores) for scores in scores_nl]\n return sentence_scores, token_info\n\n\ndef issues_from_scores(\n sentence_scores: np.ndarray, *, token_scores: Optional[list] = None, threshold: float = 0.1\n) -> Union[list, np.ndarray]:\n \"\"\"\n Converts scores output by :py:func:`token_classification.rank.get_label_quality_scores <cleanlab.token_classification.rank.get_label_quality_scores>`\n to a list of issues of similar format as output by :py:func:`token_classification.filter.find_label_issues <cleanlab.token_classification.filter.find_label_issues>`.\n\n Only considers as issues those tokens with label quality score lower than `threshold`.\n\n Issues are sorted by label quality score, from most severe to least.\n\n Parameters\n ----------\n sentence_scores:\n Array of shape `(N, )` of overall sentence scores, where `N` is the number of sentences in the dataset.\n\n Same format as the `sentence_scores` returned by :py:func:`token_classification.rank.get_label_quality_scores <cleanlab.token_classification.rank.get_label_quality_scores>`.\n\n token_scores:\n Optional list such that `token_scores[i]` contains the individual token scores for the `i`-th sentence.\n\n Same format as the `token_scores` returned by :py:func:`token_classification.rank.get_label_quality_scores <cleanlab.token_classification.rank.get_label_quality_scores>`.\n\n threshold:\n Tokens (or sentences, if `token_scores` is not provided) with quality scores above the `threshold` are not\n included in the result.\n\n Returns\n ---------\n issues:\n List of label issues identified by comparing quality scores to threshold, such that each element is a tuple ``(i, j)``, which\n indicates that the `j`-th token of the `i`-th sentence has a label issue.\n\n These tuples are ordered in `issues` list based on the token label quality score.\n\n Use :py:func:`token_classification.summary.display_issues <cleanlab.token_classification.summary.display_issues>`\n to view these issues within the original sentences.\n\n If `token_scores` is not provided, returns array of integer indices (rather than tuples) of the sentences whose label quality score\n falls below the `threshold` (also sorted by overall label quality score of each sentence).\n\n Examples\n --------\n >>> import numpy as np\n >>> from cleanlab.token_classification.rank import issues_from_scores\n >>> sentence_scores = np.array([0.1, 0.3, 0.6, 0.2, 0.05, 0.9, 0.8, 0.0125, 0.5, 0.6])\n >>> issues_from_scores(sentence_scores)\n array([7, 4])\n\n Changing the score threshold\n\n >>> issues_from_scores(sentence_scores, threshold=0.5)\n array([7, 4, 0, 3, 1])\n\n Providing token scores along with sentence scores finds issues at the token level\n\n >>> token_scores = [\n ... [0.9, 0.6],\n ... [0.0, 0.8, 0.8],\n ... [0.8, 0.8],\n ... [0.1, 0.02, 0.3, 0.4],\n ... [0.1, 0.2, 0.03, 0.4],\n ... [0.1, 0.2, 0.3, 0.04],\n ... [0.1, 0.2, 0.4],\n ... [0.3, 0.4],\n ... [0.08, 0.2, 0.5, 0.4],\n ... [0.1, 0.2, 0.3, 0.4],\n ... ]\n >>> issues_from_scores(sentence_scores, token_scores=token_scores)\n [(1, 0), (3, 1), (4, 2), (5, 3), (8, 0)]\n \"\"\"\n if token_scores:\n issues_with_scores = []\n for sentence_index, scores in enumerate(token_scores):\n for token_index, score in enumerate(scores):\n if score < threshold:\n issues_with_scores.append((sentence_index, token_index, score))\n\n issues_with_scores = sorted(issues_with_scores, key=lambda x: x[2])\n issues = [(i, j) for i, j, _ in issues_with_scores]\n return issues\n\n else:\n ranking = np.argsort(sentence_scores)\n cutoff = 0\n while sentence_scores[ranking[cutoff]] < threshold and cutoff < len(ranking):\n cutoff += 1\n return ranking[:cutoff]\n\n\ndef _softmin_sentence_score(\n token_scores: List[np.ndarray], *, temperature: float = 0.05\n) -> np.ndarray:\n \"\"\"\n Sentence overall label quality scoring using the \"softmin\" method.\n\n Parameters\n ----------\n token_scores:\n Per-token label quality scores in nested list format,\n where `token_scores[i]` is a list of scores for each toke in the i'th sentence.\n\n temperature:\n Temperature of the softmax function.\n\n Lower values encourage this method to converge toward the label quality score of the token with the lowest quality label in the sentence.\n\n Higher values encourage this method to converge toward the average label quality score of all tokens in the sentence.\n\n Returns\n ---------\n sentence_scores:\n Array of shape ``(N, )``, where N is the number of sentences in the dataset, with one overall label quality score for each sentence.\n\n Examples\n ---------\n >>> from cleanlab.token_classification.rank import _softmin_sentence_score\n >>> token_scores = [[0.9, 0.6], [0.0, 0.8, 0.8], [0.8]]\n >>> _softmin_sentence_score(token_scores)\n array([6.00741787e-01, 1.80056239e-07, 8.00000000e-01])\n \"\"\"\n if temperature == 0:\n return np.array([np.min(scores) for scores in token_scores])\n\n if temperature == np.inf:\n return np.array([np.mean(scores) for scores in token_scores])\n\n def softmax(scores: np.ndarray) -> np.ndarray:\n exp_scores = np.exp(scores / temperature)\n return exp_scores / np.sum(exp_scores)\n\n def fun(scores: np.ndarray) -> float:\n return np.dot(scores, softmax(1 - np.array(scores)))\n\n sentence_scores = list(map(fun, token_scores))\n return np.array(sentence_scores)\n", "path": "cleanlab/token_classification/rank.py"}], "after_files": [{"content": "# Copyright (C) 2017-2022 Cleanlab Inc.\n# This file is part of cleanlab.\n#\n# cleanlab is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published\n# by the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# cleanlab is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with cleanlab. If not, see <https://www.gnu.org/licenses/>.\n\n\"\"\"\nMethods to rank and score sentences in a token classification dataset (text data), based on how likely they are to contain label errors.\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom typing import List, Optional, Union, Tuple\n\nfrom cleanlab.rank import get_label_quality_scores as main_get_label_quality_scores\n\n\ndef get_label_quality_scores(\n labels: list,\n pred_probs: list,\n *,\n tokens: Optional[list] = None,\n token_score_method: str = \"self_confidence\",\n sentence_score_method: str = \"min\",\n sentence_score_kwargs: dict = {},\n) -> Tuple[np.ndarray, list]:\n \"\"\"\n Returns overall quality scores for the labels in each sentence, as well as for the individual tokens' labels in a token classification dataset.\n\n Each score is between 0 and 1.\n\n Lower scores indicate token labels that are less likely to be correct, or sentences that are more likely to contain a mislabeled token.\n\n Parameters\n ----------\n labels:\n Nested list of given labels for all tokens, such that `labels[i]` is a list of labels, one for each token in the `i`-th sentence.\n\n For a dataset with K classes, each label must be in 0, 1, ..., K-1.\n\n pred_probs:\n List of np arrays, such that `pred_probs[i]` has shape ``(T, K)`` if the `i`-th sentence contains T tokens.\n\n Each row of `pred_probs[i]` corresponds to a token `t` in the `i`-th sentence,\n and contains model-predicted probabilities that `t` belongs to each of the K possible classes.\n\n Columns of each `pred_probs[i]` should be ordered such that the probabilities correspond to class 0, 1, ..., K-1.\n\n tokens:\n Nested list such that `tokens[i]` is a list of tokens (strings/words) that comprise the `i`-th sentence.\n\n These strings are used to annotated the returned `token_scores` object, see its documentation for more information.\n\n sentence_score_method: {\"min\", \"softmin\"}, default=\"min\"\n Method to aggregate individual token label quality scores into a single score for the sentence.\n\n - `min`: sentence score = minimum of token scores in the sentence\n - `softmin`: sentence score = ``<s, softmax(1-s, t)>``, where `s` denotes the token label scores of the sentence, and ``<a, b> == np.dot(a, b)``.\n Here parameter `t` controls the softmax temperature, such that the score converges toward `min` as ``t -> 0``.\n Unlike `min`, `softmin` is affected by the scores of all tokens in the sentence.\n\n token_score_method: {\"self_confidence\", \"normalized_margin\", \"confidence_weighted_entropy\"}, default=\"self_confidence\"\n Label quality scoring method for each token.\n\n See :py:func:`cleanlab.rank.get_label_quality_scores <cleanlab.rank.get_label_quality_scores>` documentation for more info.\n\n sentence_score_kwargs:\n Optional keyword arguments for `sentence_score_method` function (for advanced users only).\n\n See `cleanlab.token_classification.rank._softmin_sentence_score` for more info about keyword arguments supported for that scoring method.\n\n Returns\n -------\n sentence_scores:\n Array of shape ``(N, )`` of scores between 0 and 1, one per sentence in the dataset.\n\n Lower scores indicate sentences more likely to contain a label issue.\n\n token_scores:\n List of ``pd.Series``, such that `token_info[i]` contains the\n label quality scores for individual tokens in the `i`-th sentence.\n\n If `tokens` strings were provided, they are used as index for each ``Series``.\n\n Examples\n --------\n >>> import numpy as np\n >>> from cleanlab.token_classification.rank import get_label_quality_scores\n >>> labels = [[0, 0, 1], [0, 1]]\n >>> pred_probs = [\n ... np.array([[0.9, 0.1], [0.7, 0.3], [0.05, 0.95]]),\n ... np.array([[0.8, 0.2], [0.8, 0.2]]),\n ... ]\n >>> sentence_scores, token_scores = get_label_quality_scores(labels, pred_probs)\n >>> sentence_scores\n array([0.7, 0.2])\n >>> token_scores\n [0 0.90\n 1 0.70\n 2 0.95\n dtype: float64, 0 0.8\n 1 0.2\n dtype: float64]\n \"\"\"\n methods = [\"min\", \"softmin\"]\n assert sentence_score_method in methods, \"Select from the following methods:\\n%s\" % \"\\n\".join(\n methods\n )\n\n labels_flatten = np.array([l for label in labels for l in label])\n pred_probs_flatten = np.array([p for pred_prob in pred_probs for p in pred_prob])\n\n sentence_length = [len(label) for label in labels]\n\n def nested_list(x, sentence_length):\n i = iter(x)\n return [[next(i) for _ in range(length)] for length in sentence_length]\n\n token_scores = main_get_label_quality_scores(\n labels=labels_flatten, pred_probs=pred_probs_flatten, method=token_score_method\n )\n scores_nl = nested_list(token_scores, sentence_length)\n\n if sentence_score_method == \"min\":\n sentence_scores = np.array(list(map(np.min, scores_nl)))\n else:\n assert sentence_score_method == \"softmin\"\n temperature = sentence_score_kwargs.get(\"temperature\", 0.05)\n sentence_scores = _softmin_sentence_score(scores_nl, temperature=temperature)\n\n if tokens:\n token_info = [pd.Series(scores, index=token) for scores, token in zip(scores_nl, tokens)]\n else:\n token_info = [pd.Series(scores) for scores in scores_nl]\n return sentence_scores, token_info\n\n\ndef issues_from_scores(\n sentence_scores: np.ndarray, *, token_scores: Optional[list] = None, threshold: float = 0.1\n) -> Union[list, np.ndarray]:\n \"\"\"\n Converts scores output by :py:func:`token_classification.rank.get_label_quality_scores <cleanlab.token_classification.rank.get_label_quality_scores>`\n to a list of issues of similar format as output by :py:func:`token_classification.filter.find_label_issues <cleanlab.token_classification.filter.find_label_issues>`.\n\n Only considers as issues those tokens with label quality score lower than `threshold`.\n\n Issues are sorted by label quality score, from most severe to least.\n\n Parameters\n ----------\n sentence_scores:\n Array of shape `(N, )` of overall sentence scores, where `N` is the number of sentences in the dataset.\n\n Same format as the `sentence_scores` returned by :py:func:`token_classification.rank.get_label_quality_scores <cleanlab.token_classification.rank.get_label_quality_scores>`.\n\n token_scores:\n Optional list such that `token_scores[i]` contains the individual token scores for the `i`-th sentence.\n\n Same format as the `token_scores` returned by :py:func:`token_classification.rank.get_label_quality_scores <cleanlab.token_classification.rank.get_label_quality_scores>`.\n\n threshold:\n Tokens (or sentences, if `token_scores` is not provided) with quality scores above the `threshold` are not\n included in the result.\n\n Returns\n ---------\n issues:\n List of label issues identified by comparing quality scores to threshold, such that each element is a tuple ``(i, j)``, which\n indicates that the `j`-th token of the `i`-th sentence has a label issue.\n\n These tuples are ordered in `issues` list based on the token label quality score.\n\n Use :py:func:`token_classification.summary.display_issues <cleanlab.token_classification.summary.display_issues>`\n to view these issues within the original sentences.\n\n If `token_scores` is not provided, returns array of integer indices (rather than tuples) of the sentences whose label quality score\n falls below the `threshold` (also sorted by overall label quality score of each sentence).\n\n Examples\n --------\n >>> import numpy as np\n >>> from cleanlab.token_classification.rank import issues_from_scores\n >>> sentence_scores = np.array([0.1, 0.3, 0.6, 0.2, 0.05, 0.9, 0.8, 0.0125, 0.5, 0.6])\n >>> issues_from_scores(sentence_scores)\n array([7, 4])\n\n Changing the score threshold\n\n >>> issues_from_scores(sentence_scores, threshold=0.5)\n array([7, 4, 0, 3, 1])\n\n Providing token scores along with sentence scores finds issues at the token level\n\n >>> token_scores = [\n ... [0.9, 0.6],\n ... [0.0, 0.8, 0.8],\n ... [0.8, 0.8],\n ... [0.1, 0.02, 0.3, 0.4],\n ... [0.1, 0.2, 0.03, 0.4],\n ... [0.1, 0.2, 0.3, 0.04],\n ... [0.1, 0.2, 0.4],\n ... [0.3, 0.4],\n ... [0.08, 0.2, 0.5, 0.4],\n ... [0.1, 0.2, 0.3, 0.4],\n ... ]\n >>> issues_from_scores(sentence_scores, token_scores=token_scores)\n [(1, 0), (3, 1), (4, 2), (5, 3), (8, 0)]\n \"\"\"\n if token_scores:\n issues_with_scores = []\n for sentence_index, scores in enumerate(token_scores):\n for token_index, score in enumerate(scores):\n if score < threshold:\n issues_with_scores.append((sentence_index, token_index, score))\n\n issues_with_scores = sorted(issues_with_scores, key=lambda x: x[2])\n issues = [(i, j) for i, j, _ in issues_with_scores]\n return issues\n\n else:\n ranking = np.argsort(sentence_scores)\n cutoff = 0\n while sentence_scores[ranking[cutoff]] < threshold and cutoff < len(ranking):\n cutoff += 1\n return ranking[:cutoff]\n\n\ndef _softmin_sentence_score(\n token_scores: List[np.ndarray], *, temperature: float = 0.05\n) -> np.ndarray:\n \"\"\"\n Sentence overall label quality scoring using the \"softmin\" method.\n\n Parameters\n ----------\n token_scores:\n Per-token label quality scores in nested list format,\n where `token_scores[i]` is a list of scores for each toke in the i'th sentence.\n\n temperature:\n Temperature of the softmax function.\n\n Lower values encourage this method to converge toward the label quality score of the token with the lowest quality label in the sentence.\n\n Higher values encourage this method to converge toward the average label quality score of all tokens in the sentence.\n\n Returns\n ---------\n sentence_scores:\n Array of shape ``(N, )``, where N is the number of sentences in the dataset, with one overall label quality score for each sentence.\n\n Examples\n ---------\n >>> from cleanlab.token_classification.rank import _softmin_sentence_score\n >>> token_scores = [[0.9, 0.6], [0.0, 0.8, 0.8], [0.8]]\n >>> _softmin_sentence_score(token_scores)\n array([6.00741787e-01, 1.80056239e-07, 8.00000000e-01])\n \"\"\"\n if temperature == 0:\n return np.array([np.min(scores) for scores in token_scores])\n\n if temperature == np.inf:\n return np.array([np.mean(scores) for scores in token_scores])\n\n def softmax(scores: np.ndarray) -> np.ndarray:\n exp_scores = np.exp(scores / temperature)\n return exp_scores / np.sum(exp_scores)\n\n def fun(scores: np.ndarray) -> float:\n return np.dot(scores, softmax(1 - np.array(scores)))\n\n sentence_scores = list(map(fun, token_scores))\n return np.array(sentence_scores)\n", "path": "cleanlab/token_classification/rank.py"}]}
| 4,079 | 135 |
gh_patches_debug_15473
|
rasdani/github-patches
|
git_diff
|
matrix-org__synapse-5354
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Implement rooms v5
Synapse v1.0 will default to rooms v4, however the intention is that it also ships with rooms v5 which is envisaged to be rooms v4 + https://github.com/matrix-org/synapse/issues/4364
rooms v5 is yet to be MSC'd so does not formally exist at this point, this issue is a place holder.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `synapse/api/room_versions.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright 2019 New Vector Ltd
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 import attr
16
17
18 class EventFormatVersions(object):
19 """This is an internal enum for tracking the version of the event format,
20 independently from the room version.
21 """
22 V1 = 1 # $id:server event id format
23 V2 = 2 # MSC1659-style $hash event id format: introduced for room v3
24 V3 = 3 # MSC1884-style $hash format: introduced for room v4
25
26
27 KNOWN_EVENT_FORMAT_VERSIONS = {
28 EventFormatVersions.V1,
29 EventFormatVersions.V2,
30 EventFormatVersions.V3,
31 }
32
33
34 class StateResolutionVersions(object):
35 """Enum to identify the state resolution algorithms"""
36 V1 = 1 # room v1 state res
37 V2 = 2 # MSC1442 state res: room v2 and later
38
39
40 class RoomDisposition(object):
41 STABLE = "stable"
42 UNSTABLE = "unstable"
43
44
45 @attr.s(slots=True, frozen=True)
46 class RoomVersion(object):
47 """An object which describes the unique attributes of a room version."""
48
49 identifier = attr.ib() # str; the identifier for this version
50 disposition = attr.ib() # str; one of the RoomDispositions
51 event_format = attr.ib() # int; one of the EventFormatVersions
52 state_res = attr.ib() # int; one of the StateResolutionVersions
53 enforce_key_validity = attr.ib() # bool
54
55
56 class RoomVersions(object):
57 V1 = RoomVersion(
58 "1",
59 RoomDisposition.STABLE,
60 EventFormatVersions.V1,
61 StateResolutionVersions.V1,
62 enforce_key_validity=False,
63 )
64 V2 = RoomVersion(
65 "2",
66 RoomDisposition.STABLE,
67 EventFormatVersions.V1,
68 StateResolutionVersions.V2,
69 enforce_key_validity=False,
70 )
71 V3 = RoomVersion(
72 "3",
73 RoomDisposition.STABLE,
74 EventFormatVersions.V2,
75 StateResolutionVersions.V2,
76 enforce_key_validity=False,
77 )
78 V4 = RoomVersion(
79 "4",
80 RoomDisposition.STABLE,
81 EventFormatVersions.V3,
82 StateResolutionVersions.V2,
83 enforce_key_validity=False,
84 )
85 VDH_TEST_KEY_VALIDITY = RoomVersion(
86 "vdh-test-key-validity",
87 RoomDisposition.UNSTABLE,
88 EventFormatVersions.V3,
89 StateResolutionVersions.V2,
90 enforce_key_validity=False,
91 )
92
93
94 KNOWN_ROOM_VERSIONS = {
95 v.identifier: v for v in (
96 RoomVersions.V1,
97 RoomVersions.V2,
98 RoomVersions.V3,
99 RoomVersions.V4,
100 RoomVersions.VDH_TEST_KEY_VALIDITY,
101 )
102 } # type: dict[str, RoomVersion]
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py
--- a/synapse/api/room_versions.py
+++ b/synapse/api/room_versions.py
@@ -82,12 +82,12 @@
StateResolutionVersions.V2,
enforce_key_validity=False,
)
- VDH_TEST_KEY_VALIDITY = RoomVersion(
- "vdh-test-key-validity",
- RoomDisposition.UNSTABLE,
+ V5 = RoomVersion(
+ "5",
+ RoomDisposition.STABLE,
EventFormatVersions.V3,
StateResolutionVersions.V2,
- enforce_key_validity=False,
+ enforce_key_validity=True,
)
@@ -97,6 +97,6 @@
RoomVersions.V2,
RoomVersions.V3,
RoomVersions.V4,
- RoomVersions.VDH_TEST_KEY_VALIDITY,
+ RoomVersions.V5,
)
} # type: dict[str, RoomVersion]
|
{"golden_diff": "diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py\n--- a/synapse/api/room_versions.py\n+++ b/synapse/api/room_versions.py\n@@ -82,12 +82,12 @@\n StateResolutionVersions.V2,\n enforce_key_validity=False,\n )\n- VDH_TEST_KEY_VALIDITY = RoomVersion(\n- \"vdh-test-key-validity\",\n- RoomDisposition.UNSTABLE,\n+ V5 = RoomVersion(\n+ \"5\",\n+ RoomDisposition.STABLE,\n EventFormatVersions.V3,\n StateResolutionVersions.V2,\n- enforce_key_validity=False,\n+ enforce_key_validity=True,\n )\n \n \n@@ -97,6 +97,6 @@\n RoomVersions.V2,\n RoomVersions.V3,\n RoomVersions.V4,\n- RoomVersions.VDH_TEST_KEY_VALIDITY,\n+ RoomVersions.V5,\n )\n } # type: dict[str, RoomVersion]\n", "issue": "Implement rooms v5\nSynapse v1.0 will default to rooms v4, however the intention is that it also ships with rooms v5 which is envisaged to be rooms v4 + https://github.com/matrix-org/synapse/issues/4364\r\n\r\nrooms v5 is yet to be MSC'd so does not formally exist at this point, this issue is a place holder.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2019 New Vector Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport attr\n\n\nclass EventFormatVersions(object):\n \"\"\"This is an internal enum for tracking the version of the event format,\n independently from the room version.\n \"\"\"\n V1 = 1 # $id:server event id format\n V2 = 2 # MSC1659-style $hash event id format: introduced for room v3\n V3 = 3 # MSC1884-style $hash format: introduced for room v4\n\n\nKNOWN_EVENT_FORMAT_VERSIONS = {\n EventFormatVersions.V1,\n EventFormatVersions.V2,\n EventFormatVersions.V3,\n}\n\n\nclass StateResolutionVersions(object):\n \"\"\"Enum to identify the state resolution algorithms\"\"\"\n V1 = 1 # room v1 state res\n V2 = 2 # MSC1442 state res: room v2 and later\n\n\nclass RoomDisposition(object):\n STABLE = \"stable\"\n UNSTABLE = \"unstable\"\n\n\[email protected](slots=True, frozen=True)\nclass RoomVersion(object):\n \"\"\"An object which describes the unique attributes of a room version.\"\"\"\n\n identifier = attr.ib() # str; the identifier for this version\n disposition = attr.ib() # str; one of the RoomDispositions\n event_format = attr.ib() # int; one of the EventFormatVersions\n state_res = attr.ib() # int; one of the StateResolutionVersions\n enforce_key_validity = attr.ib() # bool\n\n\nclass RoomVersions(object):\n V1 = RoomVersion(\n \"1\",\n RoomDisposition.STABLE,\n EventFormatVersions.V1,\n StateResolutionVersions.V1,\n enforce_key_validity=False,\n )\n V2 = RoomVersion(\n \"2\",\n RoomDisposition.STABLE,\n EventFormatVersions.V1,\n StateResolutionVersions.V2,\n enforce_key_validity=False,\n )\n V3 = RoomVersion(\n \"3\",\n RoomDisposition.STABLE,\n EventFormatVersions.V2,\n StateResolutionVersions.V2,\n enforce_key_validity=False,\n )\n V4 = RoomVersion(\n \"4\",\n RoomDisposition.STABLE,\n EventFormatVersions.V3,\n StateResolutionVersions.V2,\n enforce_key_validity=False,\n )\n VDH_TEST_KEY_VALIDITY = RoomVersion(\n \"vdh-test-key-validity\",\n RoomDisposition.UNSTABLE,\n EventFormatVersions.V3,\n StateResolutionVersions.V2,\n enforce_key_validity=False,\n )\n\n\nKNOWN_ROOM_VERSIONS = {\n v.identifier: v for v in (\n RoomVersions.V1,\n RoomVersions.V2,\n RoomVersions.V3,\n RoomVersions.V4,\n RoomVersions.VDH_TEST_KEY_VALIDITY,\n )\n} # type: dict[str, RoomVersion]\n", "path": "synapse/api/room_versions.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2019 New Vector Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport attr\n\n\nclass EventFormatVersions(object):\n \"\"\"This is an internal enum for tracking the version of the event format,\n independently from the room version.\n \"\"\"\n V1 = 1 # $id:server event id format\n V2 = 2 # MSC1659-style $hash event id format: introduced for room v3\n V3 = 3 # MSC1884-style $hash format: introduced for room v4\n\n\nKNOWN_EVENT_FORMAT_VERSIONS = {\n EventFormatVersions.V1,\n EventFormatVersions.V2,\n EventFormatVersions.V3,\n}\n\n\nclass StateResolutionVersions(object):\n \"\"\"Enum to identify the state resolution algorithms\"\"\"\n V1 = 1 # room v1 state res\n V2 = 2 # MSC1442 state res: room v2 and later\n\n\nclass RoomDisposition(object):\n STABLE = \"stable\"\n UNSTABLE = \"unstable\"\n\n\[email protected](slots=True, frozen=True)\nclass RoomVersion(object):\n \"\"\"An object which describes the unique attributes of a room version.\"\"\"\n\n identifier = attr.ib() # str; the identifier for this version\n disposition = attr.ib() # str; one of the RoomDispositions\n event_format = attr.ib() # int; one of the EventFormatVersions\n state_res = attr.ib() # int; one of the StateResolutionVersions\n enforce_key_validity = attr.ib() # bool\n\n\nclass RoomVersions(object):\n V1 = RoomVersion(\n \"1\",\n RoomDisposition.STABLE,\n EventFormatVersions.V1,\n StateResolutionVersions.V1,\n enforce_key_validity=False,\n )\n V2 = RoomVersion(\n \"2\",\n RoomDisposition.STABLE,\n EventFormatVersions.V1,\n StateResolutionVersions.V2,\n enforce_key_validity=False,\n )\n V3 = RoomVersion(\n \"3\",\n RoomDisposition.STABLE,\n EventFormatVersions.V2,\n StateResolutionVersions.V2,\n enforce_key_validity=False,\n )\n V4 = RoomVersion(\n \"4\",\n RoomDisposition.STABLE,\n EventFormatVersions.V3,\n StateResolutionVersions.V2,\n enforce_key_validity=False,\n )\n V5 = RoomVersion(\n \"5\",\n RoomDisposition.STABLE,\n EventFormatVersions.V3,\n StateResolutionVersions.V2,\n enforce_key_validity=True,\n )\n\n\nKNOWN_ROOM_VERSIONS = {\n v.identifier: v for v in (\n RoomVersions.V1,\n RoomVersions.V2,\n RoomVersions.V3,\n RoomVersions.V4,\n RoomVersions.V5,\n )\n} # type: dict[str, RoomVersion]\n", "path": "synapse/api/room_versions.py"}]}
| 1,290 | 218 |
gh_patches_debug_2853
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-python-1545
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Typo in setup.py for 1.9.1 release breaks `conda export`
### How do you use Sentry?
Self-hosted/on-premise
### Version
1.9.1
### Steps to Reproduce
This line
https://github.com/getsentry/sentry-python/commit/b7c0dc412a1505fff382732f567952c8a9572b60#diff-60f61ab7a8d1910d86d9fda2261620314edcae5894d5aaa236b821c7256badd7R43
`'urllib3>=1.26.11"; python_version >="3.6"',`
Has a stray `"` after the urllib3 version number.
This passes by pipi but confuses conda, causing a very opaque error message when you try to run `conda export`:
```
InvalidVersionSpec: Invalid version '(>=1.26.11': unable to convert to expression tree: ['(']
```
Steps to reproduce:
```
conda create -n test-sentry-sdk python=3.8
conda activate test-sentry-sdk
pip install sentry-sdk==1.9.1
conda export
```
### Expected Result
Expect to get a conda environment yaml spec.
### Actual Result
```
InvalidVersionSpec: Invalid version '(>=1.26.11': unable to convert to expression tree: ['(']
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 """
4 Sentry-Python - Sentry SDK for Python
5 =====================================
6
7 **Sentry-Python is an SDK for Sentry.** Check out `GitHub
8 <https://github.com/getsentry/sentry-python>`_ to find out more.
9 """
10
11 import os
12 from setuptools import setup, find_packages
13
14 here = os.path.abspath(os.path.dirname(__file__))
15
16
17 def get_file_text(file_name):
18 with open(os.path.join(here, file_name)) as in_file:
19 return in_file.read()
20
21
22 setup(
23 name="sentry-sdk",
24 version="1.9.1",
25 author="Sentry Team and Contributors",
26 author_email="[email protected]",
27 url="https://github.com/getsentry/sentry-python",
28 project_urls={
29 "Documentation": "https://docs.sentry.io/platforms/python/",
30 "Changelog": "https://github.com/getsentry/sentry-python/blob/master/CHANGELOG.md",
31 },
32 description="Python client for Sentry (https://sentry.io)",
33 long_description=get_file_text("README.md"),
34 long_description_content_type="text/markdown",
35 packages=find_packages(exclude=("tests", "tests.*")),
36 # PEP 561
37 package_data={"sentry_sdk": ["py.typed"]},
38 zip_safe=False,
39 license="BSD",
40 install_requires=[
41 'urllib3>=1.25.7; python_version<="3.4"',
42 'urllib3>=1.26.9; python_version>="3.5"',
43 'urllib3>=1.26.11"; python_version >="3.6"',
44 "certifi",
45 ],
46 extras_require={
47 "flask": ["flask>=0.11", "blinker>=1.1"],
48 "quart": ["quart>=0.16.1", "blinker>=1.1"],
49 "bottle": ["bottle>=0.12.13"],
50 "falcon": ["falcon>=1.4"],
51 "django": ["django>=1.8"],
52 "sanic": ["sanic>=0.8"],
53 "celery": ["celery>=3"],
54 "beam": ["apache-beam>=2.12"],
55 "rq": ["rq>=0.6"],
56 "aiohttp": ["aiohttp>=3.5"],
57 "tornado": ["tornado>=5"],
58 "sqlalchemy": ["sqlalchemy>=1.2"],
59 "pyspark": ["pyspark>=2.4.4"],
60 "pure_eval": ["pure_eval", "executing", "asttokens"],
61 "chalice": ["chalice>=1.16.0"],
62 "httpx": ["httpx>=0.16.0"],
63 "starlette": ["starlette>=0.19.1"],
64 "fastapi": ["fastapi>=0.79.0"],
65 },
66 classifiers=[
67 "Development Status :: 5 - Production/Stable",
68 "Environment :: Web Environment",
69 "Intended Audience :: Developers",
70 "License :: OSI Approved :: BSD License",
71 "Operating System :: OS Independent",
72 "Programming Language :: Python",
73 "Programming Language :: Python :: 2",
74 "Programming Language :: Python :: 2.7",
75 "Programming Language :: Python :: 3",
76 "Programming Language :: Python :: 3.4",
77 "Programming Language :: Python :: 3.5",
78 "Programming Language :: Python :: 3.6",
79 "Programming Language :: Python :: 3.7",
80 "Programming Language :: Python :: 3.8",
81 "Programming Language :: Python :: 3.9",
82 "Programming Language :: Python :: 3.10",
83 "Topic :: Software Development :: Libraries :: Python Modules",
84 ],
85 options={"bdist_wheel": {"universal": "1"}},
86 )
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -40,7 +40,7 @@
install_requires=[
'urllib3>=1.25.7; python_version<="3.4"',
'urllib3>=1.26.9; python_version>="3.5"',
- 'urllib3>=1.26.11"; python_version >="3.6"',
+ 'urllib3>=1.26.11; python_version >="3.6"',
"certifi",
],
extras_require={
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -40,7 +40,7 @@\n install_requires=[\n 'urllib3>=1.25.7; python_version<=\"3.4\"',\n 'urllib3>=1.26.9; python_version>=\"3.5\"',\n- 'urllib3>=1.26.11\"; python_version >=\"3.6\"',\n+ 'urllib3>=1.26.11; python_version >=\"3.6\"',\n \"certifi\",\n ],\n extras_require={\n", "issue": "Typo in setup.py for 1.9.1 release breaks `conda export`\n### How do you use Sentry?\n\nSelf-hosted/on-premise\n\n### Version\n\n1.9.1\n\n### Steps to Reproduce\n\nThis line \r\nhttps://github.com/getsentry/sentry-python/commit/b7c0dc412a1505fff382732f567952c8a9572b60#diff-60f61ab7a8d1910d86d9fda2261620314edcae5894d5aaa236b821c7256badd7R43\r\n\r\n`'urllib3>=1.26.11\"; python_version >=\"3.6\"',`\r\n\r\nHas a stray `\"` after the urllib3 version number.\r\n\r\nThis passes by pipi but confuses conda, causing a very opaque error message when you try to run `conda export`:\r\n```\r\nInvalidVersionSpec: Invalid version '(>=1.26.11': unable to convert to expression tree: ['(']\r\n```\r\n\r\nSteps to reproduce:\r\n```\r\nconda create -n test-sentry-sdk python=3.8\r\nconda activate test-sentry-sdk\r\npip install sentry-sdk==1.9.1\r\nconda export\r\n```\n\n### Expected Result\n\nExpect to get a conda environment yaml spec.\n\n### Actual Result\n\n```\r\nInvalidVersionSpec: Invalid version '(>=1.26.11': unable to convert to expression tree: ['(']\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\nSentry-Python - Sentry SDK for Python\n=====================================\n\n**Sentry-Python is an SDK for Sentry.** Check out `GitHub\n<https://github.com/getsentry/sentry-python>`_ to find out more.\n\"\"\"\n\nimport os\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef get_file_text(file_name):\n with open(os.path.join(here, file_name)) as in_file:\n return in_file.read()\n\n\nsetup(\n name=\"sentry-sdk\",\n version=\"1.9.1\",\n author=\"Sentry Team and Contributors\",\n author_email=\"[email protected]\",\n url=\"https://github.com/getsentry/sentry-python\",\n project_urls={\n \"Documentation\": \"https://docs.sentry.io/platforms/python/\",\n \"Changelog\": \"https://github.com/getsentry/sentry-python/blob/master/CHANGELOG.md\",\n },\n description=\"Python client for Sentry (https://sentry.io)\",\n long_description=get_file_text(\"README.md\"),\n long_description_content_type=\"text/markdown\",\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n # PEP 561\n package_data={\"sentry_sdk\": [\"py.typed\"]},\n zip_safe=False,\n license=\"BSD\",\n install_requires=[\n 'urllib3>=1.25.7; python_version<=\"3.4\"',\n 'urllib3>=1.26.9; python_version>=\"3.5\"',\n 'urllib3>=1.26.11\"; python_version >=\"3.6\"',\n \"certifi\",\n ],\n extras_require={\n \"flask\": [\"flask>=0.11\", \"blinker>=1.1\"],\n \"quart\": [\"quart>=0.16.1\", \"blinker>=1.1\"],\n \"bottle\": [\"bottle>=0.12.13\"],\n \"falcon\": [\"falcon>=1.4\"],\n \"django\": [\"django>=1.8\"],\n \"sanic\": [\"sanic>=0.8\"],\n \"celery\": [\"celery>=3\"],\n \"beam\": [\"apache-beam>=2.12\"],\n \"rq\": [\"rq>=0.6\"],\n \"aiohttp\": [\"aiohttp>=3.5\"],\n \"tornado\": [\"tornado>=5\"],\n \"sqlalchemy\": [\"sqlalchemy>=1.2\"],\n \"pyspark\": [\"pyspark>=2.4.4\"],\n \"pure_eval\": [\"pure_eval\", \"executing\", \"asttokens\"],\n \"chalice\": [\"chalice>=1.16.0\"],\n \"httpx\": [\"httpx>=0.16.0\"],\n \"starlette\": [\"starlette>=0.19.1\"],\n \"fastapi\": [\"fastapi>=0.79.0\"],\n },\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n options={\"bdist_wheel\": {\"universal\": \"1\"}},\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\nSentry-Python - Sentry SDK for Python\n=====================================\n\n**Sentry-Python is an SDK for Sentry.** Check out `GitHub\n<https://github.com/getsentry/sentry-python>`_ to find out more.\n\"\"\"\n\nimport os\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef get_file_text(file_name):\n with open(os.path.join(here, file_name)) as in_file:\n return in_file.read()\n\n\nsetup(\n name=\"sentry-sdk\",\n version=\"1.9.1\",\n author=\"Sentry Team and Contributors\",\n author_email=\"[email protected]\",\n url=\"https://github.com/getsentry/sentry-python\",\n project_urls={\n \"Documentation\": \"https://docs.sentry.io/platforms/python/\",\n \"Changelog\": \"https://github.com/getsentry/sentry-python/blob/master/CHANGELOG.md\",\n },\n description=\"Python client for Sentry (https://sentry.io)\",\n long_description=get_file_text(\"README.md\"),\n long_description_content_type=\"text/markdown\",\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n # PEP 561\n package_data={\"sentry_sdk\": [\"py.typed\"]},\n zip_safe=False,\n license=\"BSD\",\n install_requires=[\n 'urllib3>=1.25.7; python_version<=\"3.4\"',\n 'urllib3>=1.26.9; python_version>=\"3.5\"',\n 'urllib3>=1.26.11; python_version >=\"3.6\"',\n \"certifi\",\n ],\n extras_require={\n \"flask\": [\"flask>=0.11\", \"blinker>=1.1\"],\n \"quart\": [\"quart>=0.16.1\", \"blinker>=1.1\"],\n \"bottle\": [\"bottle>=0.12.13\"],\n \"falcon\": [\"falcon>=1.4\"],\n \"django\": [\"django>=1.8\"],\n \"sanic\": [\"sanic>=0.8\"],\n \"celery\": [\"celery>=3\"],\n \"beam\": [\"apache-beam>=2.12\"],\n \"rq\": [\"rq>=0.6\"],\n \"aiohttp\": [\"aiohttp>=3.5\"],\n \"tornado\": [\"tornado>=5\"],\n \"sqlalchemy\": [\"sqlalchemy>=1.2\"],\n \"pyspark\": [\"pyspark>=2.4.4\"],\n \"pure_eval\": [\"pure_eval\", \"executing\", \"asttokens\"],\n \"chalice\": [\"chalice>=1.16.0\"],\n \"httpx\": [\"httpx>=0.16.0\"],\n \"starlette\": [\"starlette>=0.19.1\"],\n \"fastapi\": [\"fastapi>=0.79.0\"],\n },\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n options={\"bdist_wheel\": {\"universal\": \"1\"}},\n)\n", "path": "setup.py"}]}
| 1,609 | 135 |
gh_patches_debug_34016
|
rasdani/github-patches
|
git_diff
|
akvo__akvo-rsr-2044
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
File upload for Indicator Period Data requires Django login
When uploading a photo/file to the /rest/v1/indicator_period_data/nnn/upload_file/ endpoint, a Django login is required, which is not the case for the rest of the API; an API key should be sufficient.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rest/views/indicator_period_data.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7
8 from akvo.rsr.models import IndicatorPeriodData, IndicatorPeriodDataComment
9
10 from ..serializers import (IndicatorPeriodDataSerializer, IndicatorPeriodDataFrameworkSerializer,
11 IndicatorPeriodDataCommentSerializer)
12 from ..viewsets import PublicProjectViewSet
13
14 from rest_framework.decorators import api_view, permission_classes
15 from rest_framework.permissions import IsAuthenticated
16 from rest_framework.response import Response
17
18
19 class IndicatorPeriodDataViewSet(PublicProjectViewSet):
20 """
21 """
22 queryset = IndicatorPeriodData.objects.all()
23 serializer_class = IndicatorPeriodDataSerializer
24 filter_fields = {
25 'period': ['exact'],
26 'period__indicator': ['exact'],
27 'period__indicator__result': ['exact'],
28 'period__indicator__result__project': ['exact'],
29 'user': ['exact'],
30 'status': ['exact'],
31 'created_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],
32 'last_modified_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],
33 'relative_data': ['exact'],
34 'data': ['exact', 'gt', 'gte', 'lt', 'lte', ],
35 'period_actual_value': ['exact', 'gt', 'gte', 'lt', 'lte', ],
36 'update_method': ['exact']
37 }
38
39 project_relation = 'period__indicator__result__project__'
40
41
42 class IndicatorPeriodDataFrameworkViewSet(PublicProjectViewSet):
43 """
44 """
45 queryset = IndicatorPeriodData.objects.all()
46 serializer_class = IndicatorPeriodDataFrameworkSerializer
47 filter_fields = {
48 'period': ['exact'],
49 'period__indicator': ['exact'],
50 'period__indicator__result': ['exact'],
51 'period__indicator__result__project': ['exact'],
52 'user': ['exact'],
53 'status': ['exact'],
54 'created_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],
55 'last_modified_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],
56 'relative_data': ['exact'],
57 'data': ['exact', 'gt', 'gte', 'lt', 'lte', ],
58 'period_actual_value': ['exact', 'gt', 'gte', 'lt', 'lte', ],
59 'update_method': ['exact']
60 }
61 project_relation = 'period__indicator__result__project__'
62
63
64 class IndicatorPeriodDataCommentViewSet(PublicProjectViewSet):
65 """
66 """
67 queryset = IndicatorPeriodDataComment.objects.all()
68 serializer_class = IndicatorPeriodDataCommentSerializer
69 filter_fields = {
70 'data': ['exact'],
71 'data__period': ['exact'],
72 'data__period__indicator': ['exact'],
73 'data__period__indicator__result': ['exact'],
74 'data__period__indicator__result__project': ['exact'],
75 'user': ['exact'],
76 'created_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],
77 'last_modified_at': ['exact', 'gt', 'gte', 'lt', 'lte', ]
78 }
79 project_relation = 'period__indicator__result__project__'
80
81
82 @api_view(['POST'])
83 @permission_classes((IsAuthenticated, ))
84 def indicator_upload_file(request, pk=None):
85 """
86 Special API call for directly uploading a file.
87
88 :param request; A Django request object.
89 :param pk; The primary key of an IndicatorPeriodData instance.
90 """
91 update = IndicatorPeriodData.objects.get(pk=pk)
92 upload_file = request.FILES['file']
93
94 # TODO: Permissions
95 # user = request.user
96
97 file_type = request.POST.copy()['type']
98 if file_type == 'photo':
99 update.photo = upload_file
100 update.save(update_fields=['photo'])
101 return Response({'file': update.photo.url})
102 elif file_type == 'file':
103 update.file = upload_file
104 update.save(update_fields=['file'])
105 return Response({'file': update.file.url})
106
107 # TODO: Error response
108 return Response({})
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/akvo/rest/views/indicator_period_data.py b/akvo/rest/views/indicator_period_data.py
--- a/akvo/rest/views/indicator_period_data.py
+++ b/akvo/rest/views/indicator_period_data.py
@@ -11,8 +11,10 @@
IndicatorPeriodDataCommentSerializer)
from ..viewsets import PublicProjectViewSet
+from django.http import HttpResponseForbidden
+
+from rest_framework import status
from rest_framework.decorators import api_view, permission_classes
-from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
@@ -80,7 +82,6 @@
@api_view(['POST'])
-@permission_classes((IsAuthenticated, ))
def indicator_upload_file(request, pk=None):
"""
Special API call for directly uploading a file.
@@ -91,18 +92,24 @@
update = IndicatorPeriodData.objects.get(pk=pk)
upload_file = request.FILES['file']
- # TODO: Permissions
- # user = request.user
-
- file_type = request.POST.copy()['type']
- if file_type == 'photo':
- update.photo = upload_file
- update.save(update_fields=['photo'])
- return Response({'file': update.photo.url})
- elif file_type == 'file':
- update.file = upload_file
- update.save(update_fields=['file'])
- return Response({'file': update.file.url})
-
- # TODO: Error response
- return Response({})
+ # Permissions
+ user = getattr(request, 'user', None)
+ if not user:
+ return Response({'error': 'User is not logged in'}, status=status.HTTP_403_FORBIDDEN)
+
+ if not user.has_perm('rsr.change_project', update.period.indicator.result.project):
+ return Response({'error': 'User has no permission to place an update'},
+ status=status.HTTP_403_FORBIDDEN)
+
+ try:
+ file_type = request.POST.copy()['type']
+ if file_type == 'photo':
+ update.photo = upload_file
+ update.save(update_fields=['photo'])
+ return Response({'file': update.photo.url})
+ elif file_type == 'file':
+ update.file = upload_file
+ update.save(update_fields=['file'])
+ return Response({'file': update.file.url})
+ except Exception as e:
+ return Response({'error': str(e)}, status=status.HTTP_400_BAD_REQUEST)
|
{"golden_diff": "diff --git a/akvo/rest/views/indicator_period_data.py b/akvo/rest/views/indicator_period_data.py\n--- a/akvo/rest/views/indicator_period_data.py\n+++ b/akvo/rest/views/indicator_period_data.py\n@@ -11,8 +11,10 @@\n IndicatorPeriodDataCommentSerializer)\n from ..viewsets import PublicProjectViewSet\n \n+from django.http import HttpResponseForbidden\n+\n+from rest_framework import status\n from rest_framework.decorators import api_view, permission_classes\n-from rest_framework.permissions import IsAuthenticated\n from rest_framework.response import Response\n \n \n@@ -80,7 +82,6 @@\n \n \n @api_view(['POST'])\n-@permission_classes((IsAuthenticated, ))\n def indicator_upload_file(request, pk=None):\n \"\"\"\n Special API call for directly uploading a file.\n@@ -91,18 +92,24 @@\n update = IndicatorPeriodData.objects.get(pk=pk)\n upload_file = request.FILES['file']\n \n- # TODO: Permissions\n- # user = request.user\n-\n- file_type = request.POST.copy()['type']\n- if file_type == 'photo':\n- update.photo = upload_file\n- update.save(update_fields=['photo'])\n- return Response({'file': update.photo.url})\n- elif file_type == 'file':\n- update.file = upload_file\n- update.save(update_fields=['file'])\n- return Response({'file': update.file.url})\n-\n- # TODO: Error response\n- return Response({})\n+ # Permissions\n+ user = getattr(request, 'user', None)\n+ if not user:\n+ return Response({'error': 'User is not logged in'}, status=status.HTTP_403_FORBIDDEN)\n+\n+ if not user.has_perm('rsr.change_project', update.period.indicator.result.project):\n+ return Response({'error': 'User has no permission to place an update'},\n+ status=status.HTTP_403_FORBIDDEN)\n+\n+ try:\n+ file_type = request.POST.copy()['type']\n+ if file_type == 'photo':\n+ update.photo = upload_file\n+ update.save(update_fields=['photo'])\n+ return Response({'file': update.photo.url})\n+ elif file_type == 'file':\n+ update.file = upload_file\n+ update.save(update_fields=['file'])\n+ return Response({'file': update.file.url})\n+ except Exception as e:\n+ return Response({'error': str(e)}, status=status.HTTP_400_BAD_REQUEST)\n", "issue": "File upload for Indicator Period Data requires Django login\nWhen uploading a photo/file to the /rest/v1/indicator_period_data/nnn/upload_file/ endpoint, a Django login is required, which is not the case for the rest of the API; an API key should be sufficient.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom akvo.rsr.models import IndicatorPeriodData, IndicatorPeriodDataComment\n\nfrom ..serializers import (IndicatorPeriodDataSerializer, IndicatorPeriodDataFrameworkSerializer,\n IndicatorPeriodDataCommentSerializer)\nfrom ..viewsets import PublicProjectViewSet\n\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\n\n\nclass IndicatorPeriodDataViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorPeriodData.objects.all()\n serializer_class = IndicatorPeriodDataSerializer\n filter_fields = {\n 'period': ['exact'],\n 'period__indicator': ['exact'],\n 'period__indicator__result': ['exact'],\n 'period__indicator__result__project': ['exact'],\n 'user': ['exact'],\n 'status': ['exact'],\n 'created_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'last_modified_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'relative_data': ['exact'],\n 'data': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'period_actual_value': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'update_method': ['exact']\n }\n\n project_relation = 'period__indicator__result__project__'\n\n\nclass IndicatorPeriodDataFrameworkViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorPeriodData.objects.all()\n serializer_class = IndicatorPeriodDataFrameworkSerializer\n filter_fields = {\n 'period': ['exact'],\n 'period__indicator': ['exact'],\n 'period__indicator__result': ['exact'],\n 'period__indicator__result__project': ['exact'],\n 'user': ['exact'],\n 'status': ['exact'],\n 'created_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'last_modified_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'relative_data': ['exact'],\n 'data': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'period_actual_value': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'update_method': ['exact']\n }\n project_relation = 'period__indicator__result__project__'\n\n\nclass IndicatorPeriodDataCommentViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorPeriodDataComment.objects.all()\n serializer_class = IndicatorPeriodDataCommentSerializer\n filter_fields = {\n 'data': ['exact'],\n 'data__period': ['exact'],\n 'data__period__indicator': ['exact'],\n 'data__period__indicator__result': ['exact'],\n 'data__period__indicator__result__project': ['exact'],\n 'user': ['exact'],\n 'created_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'last_modified_at': ['exact', 'gt', 'gte', 'lt', 'lte', ]\n }\n project_relation = 'period__indicator__result__project__'\n\n\n@api_view(['POST'])\n@permission_classes((IsAuthenticated, ))\ndef indicator_upload_file(request, pk=None):\n \"\"\"\n Special API call for directly uploading a file.\n\n :param request; A Django request object.\n :param pk; The primary key of an IndicatorPeriodData instance.\n \"\"\"\n update = IndicatorPeriodData.objects.get(pk=pk)\n upload_file = request.FILES['file']\n\n # TODO: Permissions\n # user = request.user\n\n file_type = request.POST.copy()['type']\n if file_type == 'photo':\n update.photo = upload_file\n update.save(update_fields=['photo'])\n return Response({'file': update.photo.url})\n elif file_type == 'file':\n update.file = upload_file\n update.save(update_fields=['file'])\n return Response({'file': update.file.url})\n\n # TODO: Error response\n return Response({})\n", "path": "akvo/rest/views/indicator_period_data.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom akvo.rsr.models import IndicatorPeriodData, IndicatorPeriodDataComment\n\nfrom ..serializers import (IndicatorPeriodDataSerializer, IndicatorPeriodDataFrameworkSerializer,\n IndicatorPeriodDataCommentSerializer)\nfrom ..viewsets import PublicProjectViewSet\n\nfrom django.http import HttpResponseForbidden\n\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.response import Response\n\n\nclass IndicatorPeriodDataViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorPeriodData.objects.all()\n serializer_class = IndicatorPeriodDataSerializer\n filter_fields = {\n 'period': ['exact'],\n 'period__indicator': ['exact'],\n 'period__indicator__result': ['exact'],\n 'period__indicator__result__project': ['exact'],\n 'user': ['exact'],\n 'status': ['exact'],\n 'created_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'last_modified_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'relative_data': ['exact'],\n 'data': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'period_actual_value': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'update_method': ['exact']\n }\n\n project_relation = 'period__indicator__result__project__'\n\n\nclass IndicatorPeriodDataFrameworkViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorPeriodData.objects.all()\n serializer_class = IndicatorPeriodDataFrameworkSerializer\n filter_fields = {\n 'period': ['exact'],\n 'period__indicator': ['exact'],\n 'period__indicator__result': ['exact'],\n 'period__indicator__result__project': ['exact'],\n 'user': ['exact'],\n 'status': ['exact'],\n 'created_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'last_modified_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'relative_data': ['exact'],\n 'data': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'period_actual_value': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'update_method': ['exact']\n }\n project_relation = 'period__indicator__result__project__'\n\n\nclass IndicatorPeriodDataCommentViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorPeriodDataComment.objects.all()\n serializer_class = IndicatorPeriodDataCommentSerializer\n filter_fields = {\n 'data': ['exact'],\n 'data__period': ['exact'],\n 'data__period__indicator': ['exact'],\n 'data__period__indicator__result': ['exact'],\n 'data__period__indicator__result__project': ['exact'],\n 'user': ['exact'],\n 'created_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'last_modified_at': ['exact', 'gt', 'gte', 'lt', 'lte', ]\n }\n project_relation = 'period__indicator__result__project__'\n\n\n@api_view(['POST'])\ndef indicator_upload_file(request, pk=None):\n \"\"\"\n Special API call for directly uploading a file.\n\n :param request; A Django request object.\n :param pk; The primary key of an IndicatorPeriodData instance.\n \"\"\"\n update = IndicatorPeriodData.objects.get(pk=pk)\n upload_file = request.FILES['file']\n\n # Permissions\n user = getattr(request, 'user', None)\n if not user:\n return Response({'error': 'User is not logged in'}, status=status.HTTP_403_FORBIDDEN)\n\n if not user.has_perm('rsr.change_project', update.period.indicator.result.project):\n return Response({'error': 'User has no permission to place an update'},\n status=status.HTTP_403_FORBIDDEN)\n\n try:\n file_type = request.POST.copy()['type']\n if file_type == 'photo':\n update.photo = upload_file\n update.save(update_fields=['photo'])\n return Response({'file': update.photo.url})\n elif file_type == 'file':\n update.file = upload_file\n update.save(update_fields=['file'])\n return Response({'file': update.file.url})\n except Exception as e:\n return Response({'error': str(e)}, status=status.HTTP_400_BAD_REQUEST)\n", "path": "akvo/rest/views/indicator_period_data.py"}]}
| 1,461 | 539 |
gh_patches_debug_11312
|
rasdani/github-patches
|
git_diff
|
buildbot__buildbot-7066
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
buildbot.tac - use an 0o prefix for octal integers
After fresh installation of buildbot workers on Rocky9 with python 3.9 we found the following problem starting the workers:
```
worker not running
Unhandled Error
Traceback (most recent call last):
File "/home/bb-slave/lib/python3.9/site-packages/twisted/application/app.py", line 674, in run
runApp(config)
File "/home/bb-slave/lib/python3.9/site-packages/twisted/scripts/twistd.py", line 29, in runApp
runner.run()
File "/home/bb-slave/lib/python3.9/site-packages/twisted/application/app.py", line 370, in run
self.application = self.createOrGetApplication()
File "/home/bb-slave/lib/python3.9/site-packages/twisted/application/app.py", line 437, in createOrGetApplication
application = getApplication(self.config, passphrase)
--- <exception caught here> ---
File "/home/bb-slave/lib/python3.9/site-packages/twisted/application/app.py", line 446, in getApplication
application = service.loadApplication(filename, style, passphrase)
File "/home/bb-slave/lib/python3.9/site-packages/twisted/application/service.py", line 404, in loadApplication
application = sob.loadValueFromFile(filename, "application")
File "/home/bb-slave/lib/python3.9/site-packages/twisted/persisted/sob.py", line 176, in loadValueFromFile
codeObj = compile(data, filename, "exec")
builtins.SyntaxError: leading zeros in decimal integer literals are not permitted; use an 0o prefix for octal integers (buildbot.tac, line 32)
Failed to load application: leading zeros in decimal integer literals are not permitted; use an 0o prefix for octal integers (buildbot.tac, line 32)
```
it turns out that the umask line in the generated buildbot.tac file causes the trouble:
```
[moko:/home/bb-slave] cat moko_linux_gtk3_slave/buildbot.tac
...
umask = 022
...
```
after changing the line, the worker starts and runs without problems:
```
umask = 0o022
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `worker/buildbot_worker/scripts/runner.py`
Content:
```
1 # This file is part of Buildbot. Buildbot is free software: you can
2 # redistribute it and/or modify it under the terms of the GNU General Public
3 # License as published by the Free Software Foundation, version 2.
4 #
5 # This program is distributed in the hope that it will be useful, but WITHOUT
6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
8 # details.
9 #
10 # You should have received a copy of the GNU General Public License along with
11 # this program; if not, write to the Free Software Foundation, Inc., 51
12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
13 #
14 # Copyright Buildbot Team Members
15
16 # N.B.: don't import anything that might pull in a reactor yet. Some of our
17 # subcommands want to load modules that need the gtk reactor.
18
19 from __future__ import absolute_import
20 from __future__ import division
21 from __future__ import print_function
22
23 import os
24 import re
25 import sys
26 import textwrap
27
28 from twisted.python import log
29 from twisted.python import reflect
30 from twisted.python import usage
31
32 # the create/start/stop commands should all be run as the same user,
33 # preferably a separate 'buildbot' account.
34
35 # Note that the terms 'options' and 'config' are used interchangeably here - in
36 # fact, they are interchanged several times. Caveat legator.
37
38
39 class MakerBase(usage.Options):
40 optFlags = [
41 ['help', 'h', "Display this message"],
42 ["quiet", "q", "Do not emit the commands being run"],
43 ]
44
45 longdesc = textwrap.dedent("""
46 Operates upon the specified <basedir> (or the current directory, if not
47 specified).
48 """)
49
50 # on tab completion, suggest directories as first argument
51 if hasattr(usage, 'Completions'):
52 # only set completion suggestion if running with
53 # twisted version (>=11.1.0) that supports it
54 compData = usage.Completions(
55 extraActions=[usage.CompleteDirs(descr="worker base directory")])
56
57 opt_h = usage.Options.opt_help
58
59 def parseArgs(self, *args):
60 if args:
61 self['basedir'] = args[0]
62 else:
63 # Use the current directory if no basedir was specified.
64 self['basedir'] = os.getcwd()
65 if len(args) > 1:
66 raise usage.UsageError("I wasn't expecting so many arguments")
67
68 def postOptions(self):
69 self['basedir'] = os.path.abspath(self['basedir'])
70
71
72 class StartOptions(MakerBase):
73 subcommandFunction = "buildbot_worker.scripts.start.startCommand"
74 optFlags = [
75 ['quiet', 'q', "Don't display startup log messages"],
76 ['nodaemon', None, "Don't daemonize (stay in foreground)"],
77 ]
78
79 def getSynopsis(self):
80 return "Usage: buildbot-worker start [<basedir>]"
81
82
83 class StopOptions(MakerBase):
84 subcommandFunction = "buildbot_worker.scripts.stop.stop"
85
86 def getSynopsis(self):
87 return "Usage: buildbot-worker stop [<basedir>]"
88
89
90 class RestartOptions(MakerBase):
91 subcommandFunction = "buildbot_worker.scripts.restart.restart"
92 optFlags = [
93 ['quiet', 'q', "Don't display startup log messages"],
94 ['nodaemon', None, "Don't daemonize (stay in foreground)"],
95 ]
96
97 def getSynopsis(self):
98 return "Usage: buildbot-worker restart [<basedir>]"
99
100
101 class CreateWorkerOptions(MakerBase):
102 subcommandFunction = "buildbot_worker.scripts.create_worker.createWorker"
103 optFlags = [
104 ["force", "f", "Re-use an existing directory"],
105 ["relocatable", "r",
106 "Create a relocatable buildbot.tac"],
107 ["no-logrotate", "n",
108 "Do not permit buildmaster rotate logs by itself"],
109 ['use-tls', None,
110 "Uses TLS to connect to master"],
111 ['delete-leftover-dirs', None,
112 'Delete folders that are not required by the master on connection'],
113 ]
114 optParameters = [
115 ["keepalive", "k", 600,
116 "Interval at which keepalives should be sent (in seconds)"],
117 ["umask", None, "None",
118 "controls permissions of generated files. "
119 "Use --umask=0o22 to be world-readable"],
120 ["maxdelay", None, 300,
121 "Maximum time between connection attempts"],
122 ["maxretries", None, 'None',
123 "Maximum number of retries before worker shutdown"],
124 ["numcpus", None, "None",
125 "Number of available cpus to use on a build. "],
126 ["log-size", "s", "10000000",
127 "size at which to rotate twisted log files"],
128 ["log-count", "l", "10",
129 "limit the number of kept old twisted log files "
130 "(None for unlimited)"],
131 ["allow-shutdown", "a", None,
132 "Allows the worker to initiate a graceful shutdown. One of "
133 "'signal' or 'file'"],
134 ["protocol", None, "pb", "Protocol to be used when creating master-worker connection"],
135 ["proxy-connection-string", None, None,
136 "Address of HTTP proxy to tunnel through"]
137 ]
138
139 longdesc = textwrap.dedent("""
140 This command creates a buildbot worker directory and buildbot.tac
141 file. The bot will use the <name> and <passwd> arguments to authenticate
142 itself when connecting to the master. All commands are run in a
143 build-specific subdirectory of <basedir>. <master> is a string of the
144 form 'hostname[:port]', and specifies where the buildmaster can be reached.
145 port defaults to 9989.
146
147 The appropriate values for <name>, <passwd>, and <master> should be
148 provided to you by the buildmaster administrator. You must choose <basedir>
149 yourself.
150 """)
151
152 def validateMasterArgument(self, master_arg):
153 """
154 Parse the <master> argument.
155
156 @param master_arg: the <master> argument to parse
157
158 @return: tuple of master's host and port
159 @raise UsageError: on errors parsing the argument
160 """
161 if master_arg[:5] == "http:":
162 raise usage.UsageError("<master> is not a URL - do not use URL")
163
164 if master_arg.startswith("[") and "]" in master_arg:
165 # detect ipv6 address with format [2001:1:2:3:4::1]:4321
166 master, port_tmp = master_arg.split("]")
167 master = master[1:]
168 if ":" not in port_tmp:
169 port = 9989
170 else:
171 port = port_tmp.split(":")[1]
172
173 elif ":" not in master_arg:
174 master = master_arg
175 port = 9989
176 else:
177 try:
178 master, port = master_arg.split(":")
179 except ValueError:
180 raise usage.UsageError(("invalid <master> argument '{}', "
181 "if it is an ipv6 address, it must be enclosed by []"
182 ).format(master_arg))
183
184 if not master:
185 raise usage.UsageError("invalid <master> argument '{}'".format(
186 master_arg))
187 try:
188 port = int(port)
189 except ValueError:
190 raise usage.UsageError("invalid master port '{}', "
191 "needs to be a number".format(port))
192
193 return master, port
194
195 def getSynopsis(self):
196 return "Usage: buildbot-worker create-worker " \
197 "[options] <basedir> <master> <name> <passwd>"
198
199 def parseArgs(self, *args):
200 if len(args) != 4:
201 raise usage.UsageError("incorrect number of arguments")
202 basedir, master, name, passwd = args
203 self['basedir'] = basedir
204 self['host'], self['port'] = self.validateMasterArgument(master)
205 self['name'] = name
206 self['passwd'] = passwd
207
208 def postOptions(self):
209 MakerBase.postOptions(self)
210
211 # check and convert numeric parameters
212 for argument in ["keepalive", "maxdelay", "log-size"]:
213 try:
214 self[argument] = int(self[argument])
215 except ValueError:
216 raise usage.UsageError("{} parameter needs to be a number".format(
217 argument))
218
219 for argument in ["log-count", "maxretries", "umask", "numcpus"]:
220 if not re.match(r'^(0o)?\d+$', self[argument]) and \
221 self[argument] != 'None':
222 raise usage.UsageError("{} parameter needs to be a number"
223 " or None".format(argument))
224
225 if self['allow-shutdown'] not in [None, 'signal', 'file']:
226 raise usage.UsageError("allow-shutdown needs to be one of"
227 " 'signal' or 'file'")
228
229
230 class Options(usage.Options):
231 synopsis = "Usage: buildbot-worker <command> [command options]"
232
233 subCommands = [
234 # the following are all admin commands
235 ['create-worker', None, CreateWorkerOptions,
236 "Create and populate a directory for a new worker"],
237 ['start', None, StartOptions, "Start a worker"],
238 ['stop', None, StopOptions, "Stop a worker"],
239 ['restart', None, RestartOptions,
240 "Restart a worker"],
241 ]
242
243 def opt_version(self):
244 import buildbot_worker # pylint: disable=import-outside-toplevel
245 print("worker version: {}".format(buildbot_worker.version))
246 usage.Options.opt_version(self)
247
248 def opt_verbose(self):
249 log.startLogging(sys.stderr)
250
251 def postOptions(self):
252 if not hasattr(self, 'subOptions'):
253 raise usage.UsageError("must specify a command")
254
255
256 def run():
257 config = Options()
258 try:
259 config.parseOptions()
260 except usage.error as e:
261 print("{}: {}".format(sys.argv[0], e))
262 print()
263 c = getattr(config, 'subOptions', config)
264 print(str(c))
265 sys.exit(1)
266
267 subconfig = config.subOptions
268 subcommandFunction = reflect.namedObject(subconfig.subcommandFunction)
269 sys.exit(subcommandFunction(subconfig))
270
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/worker/buildbot_worker/scripts/runner.py b/worker/buildbot_worker/scripts/runner.py
--- a/worker/buildbot_worker/scripts/runner.py
+++ b/worker/buildbot_worker/scripts/runner.py
@@ -217,7 +217,7 @@
argument))
for argument in ["log-count", "maxretries", "umask", "numcpus"]:
- if not re.match(r'^(0o)?\d+$', self[argument]) and \
+ if not re.match(r'^((0o)\d+|0|[1-9]\d*)$', self[argument]) and \
self[argument] != 'None':
raise usage.UsageError("{} parameter needs to be a number"
" or None".format(argument))
|
{"golden_diff": "diff --git a/worker/buildbot_worker/scripts/runner.py b/worker/buildbot_worker/scripts/runner.py\n--- a/worker/buildbot_worker/scripts/runner.py\n+++ b/worker/buildbot_worker/scripts/runner.py\n@@ -217,7 +217,7 @@\n argument))\n \n for argument in [\"log-count\", \"maxretries\", \"umask\", \"numcpus\"]:\n- if not re.match(r'^(0o)?\\d+$', self[argument]) and \\\n+ if not re.match(r'^((0o)\\d+|0|[1-9]\\d*)$', self[argument]) and \\\n self[argument] != 'None':\n raise usage.UsageError(\"{} parameter needs to be a number\"\n \" or None\".format(argument))\n", "issue": "buildbot.tac - use an 0o prefix for octal integers\nAfter fresh installation of buildbot workers on Rocky9 with python 3.9 we found the following problem starting the workers:\r\n\r\n```\r\nworker not running\r\nUnhandled Error\r\nTraceback (most recent call last):\r\n File \"/home/bb-slave/lib/python3.9/site-packages/twisted/application/app.py\", line 674, in run\r\n runApp(config)\r\n File \"/home/bb-slave/lib/python3.9/site-packages/twisted/scripts/twistd.py\", line 29, in runApp\r\n runner.run()\r\n File \"/home/bb-slave/lib/python3.9/site-packages/twisted/application/app.py\", line 370, in run\r\n self.application = self.createOrGetApplication()\r\n File \"/home/bb-slave/lib/python3.9/site-packages/twisted/application/app.py\", line 437, in createOrGetApplication\r\n application = getApplication(self.config, passphrase)\r\n--- <exception caught here> ---\r\n File \"/home/bb-slave/lib/python3.9/site-packages/twisted/application/app.py\", line 446, in getApplication\r\n application = service.loadApplication(filename, style, passphrase)\r\n File \"/home/bb-slave/lib/python3.9/site-packages/twisted/application/service.py\", line 404, in loadApplication\r\n application = sob.loadValueFromFile(filename, \"application\")\r\n File \"/home/bb-slave/lib/python3.9/site-packages/twisted/persisted/sob.py\", line 176, in loadValueFromFile\r\n codeObj = compile(data, filename, \"exec\")\r\nbuiltins.SyntaxError: leading zeros in decimal integer literals are not permitted; use an 0o prefix for octal integers (buildbot.tac, line 32)\r\n\r\n\r\nFailed to load application: leading zeros in decimal integer literals are not permitted; use an 0o prefix for octal integers (buildbot.tac, line 32)\r\n```\r\nit turns out that the umask line in the generated buildbot.tac file causes the trouble:\r\n\r\n```\r\n[moko:/home/bb-slave] cat moko_linux_gtk3_slave/buildbot.tac\r\n...\r\numask = 022\r\n...\r\n```\r\nafter changing the line, the worker starts and runs without problems:\r\n\r\n```\r\numask = 0o022\r\n```\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\n# N.B.: don't import anything that might pull in a reactor yet. Some of our\n# subcommands want to load modules that need the gtk reactor.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport re\nimport sys\nimport textwrap\n\nfrom twisted.python import log\nfrom twisted.python import reflect\nfrom twisted.python import usage\n\n# the create/start/stop commands should all be run as the same user,\n# preferably a separate 'buildbot' account.\n\n# Note that the terms 'options' and 'config' are used interchangeably here - in\n# fact, they are interchanged several times. Caveat legator.\n\n\nclass MakerBase(usage.Options):\n optFlags = [\n ['help', 'h', \"Display this message\"],\n [\"quiet\", \"q\", \"Do not emit the commands being run\"],\n ]\n\n longdesc = textwrap.dedent(\"\"\"\n Operates upon the specified <basedir> (or the current directory, if not\n specified).\n \"\"\")\n\n # on tab completion, suggest directories as first argument\n if hasattr(usage, 'Completions'):\n # only set completion suggestion if running with\n # twisted version (>=11.1.0) that supports it\n compData = usage.Completions(\n extraActions=[usage.CompleteDirs(descr=\"worker base directory\")])\n\n opt_h = usage.Options.opt_help\n\n def parseArgs(self, *args):\n if args:\n self['basedir'] = args[0]\n else:\n # Use the current directory if no basedir was specified.\n self['basedir'] = os.getcwd()\n if len(args) > 1:\n raise usage.UsageError(\"I wasn't expecting so many arguments\")\n\n def postOptions(self):\n self['basedir'] = os.path.abspath(self['basedir'])\n\n\nclass StartOptions(MakerBase):\n subcommandFunction = \"buildbot_worker.scripts.start.startCommand\"\n optFlags = [\n ['quiet', 'q', \"Don't display startup log messages\"],\n ['nodaemon', None, \"Don't daemonize (stay in foreground)\"],\n ]\n\n def getSynopsis(self):\n return \"Usage: buildbot-worker start [<basedir>]\"\n\n\nclass StopOptions(MakerBase):\n subcommandFunction = \"buildbot_worker.scripts.stop.stop\"\n\n def getSynopsis(self):\n return \"Usage: buildbot-worker stop [<basedir>]\"\n\n\nclass RestartOptions(MakerBase):\n subcommandFunction = \"buildbot_worker.scripts.restart.restart\"\n optFlags = [\n ['quiet', 'q', \"Don't display startup log messages\"],\n ['nodaemon', None, \"Don't daemonize (stay in foreground)\"],\n ]\n\n def getSynopsis(self):\n return \"Usage: buildbot-worker restart [<basedir>]\"\n\n\nclass CreateWorkerOptions(MakerBase):\n subcommandFunction = \"buildbot_worker.scripts.create_worker.createWorker\"\n optFlags = [\n [\"force\", \"f\", \"Re-use an existing directory\"],\n [\"relocatable\", \"r\",\n \"Create a relocatable buildbot.tac\"],\n [\"no-logrotate\", \"n\",\n \"Do not permit buildmaster rotate logs by itself\"],\n ['use-tls', None,\n \"Uses TLS to connect to master\"],\n ['delete-leftover-dirs', None,\n 'Delete folders that are not required by the master on connection'],\n ]\n optParameters = [\n [\"keepalive\", \"k\", 600,\n \"Interval at which keepalives should be sent (in seconds)\"],\n [\"umask\", None, \"None\",\n \"controls permissions of generated files. \"\n \"Use --umask=0o22 to be world-readable\"],\n [\"maxdelay\", None, 300,\n \"Maximum time between connection attempts\"],\n [\"maxretries\", None, 'None',\n \"Maximum number of retries before worker shutdown\"],\n [\"numcpus\", None, \"None\",\n \"Number of available cpus to use on a build. \"],\n [\"log-size\", \"s\", \"10000000\",\n \"size at which to rotate twisted log files\"],\n [\"log-count\", \"l\", \"10\",\n \"limit the number of kept old twisted log files \"\n \"(None for unlimited)\"],\n [\"allow-shutdown\", \"a\", None,\n \"Allows the worker to initiate a graceful shutdown. One of \"\n \"'signal' or 'file'\"],\n [\"protocol\", None, \"pb\", \"Protocol to be used when creating master-worker connection\"],\n [\"proxy-connection-string\", None, None,\n \"Address of HTTP proxy to tunnel through\"]\n ]\n\n longdesc = textwrap.dedent(\"\"\"\n This command creates a buildbot worker directory and buildbot.tac\n file. The bot will use the <name> and <passwd> arguments to authenticate\n itself when connecting to the master. All commands are run in a\n build-specific subdirectory of <basedir>. <master> is a string of the\n form 'hostname[:port]', and specifies where the buildmaster can be reached.\n port defaults to 9989.\n\n The appropriate values for <name>, <passwd>, and <master> should be\n provided to you by the buildmaster administrator. You must choose <basedir>\n yourself.\n \"\"\")\n\n def validateMasterArgument(self, master_arg):\n \"\"\"\n Parse the <master> argument.\n\n @param master_arg: the <master> argument to parse\n\n @return: tuple of master's host and port\n @raise UsageError: on errors parsing the argument\n \"\"\"\n if master_arg[:5] == \"http:\":\n raise usage.UsageError(\"<master> is not a URL - do not use URL\")\n\n if master_arg.startswith(\"[\") and \"]\" in master_arg:\n # detect ipv6 address with format [2001:1:2:3:4::1]:4321\n master, port_tmp = master_arg.split(\"]\")\n master = master[1:]\n if \":\" not in port_tmp:\n port = 9989\n else:\n port = port_tmp.split(\":\")[1]\n\n elif \":\" not in master_arg:\n master = master_arg\n port = 9989\n else:\n try:\n master, port = master_arg.split(\":\")\n except ValueError:\n raise usage.UsageError((\"invalid <master> argument '{}', \"\n \"if it is an ipv6 address, it must be enclosed by []\"\n ).format(master_arg))\n\n if not master:\n raise usage.UsageError(\"invalid <master> argument '{}'\".format(\n master_arg))\n try:\n port = int(port)\n except ValueError:\n raise usage.UsageError(\"invalid master port '{}', \"\n \"needs to be a number\".format(port))\n\n return master, port\n\n def getSynopsis(self):\n return \"Usage: buildbot-worker create-worker \" \\\n \"[options] <basedir> <master> <name> <passwd>\"\n\n def parseArgs(self, *args):\n if len(args) != 4:\n raise usage.UsageError(\"incorrect number of arguments\")\n basedir, master, name, passwd = args\n self['basedir'] = basedir\n self['host'], self['port'] = self.validateMasterArgument(master)\n self['name'] = name\n self['passwd'] = passwd\n\n def postOptions(self):\n MakerBase.postOptions(self)\n\n # check and convert numeric parameters\n for argument in [\"keepalive\", \"maxdelay\", \"log-size\"]:\n try:\n self[argument] = int(self[argument])\n except ValueError:\n raise usage.UsageError(\"{} parameter needs to be a number\".format(\n argument))\n\n for argument in [\"log-count\", \"maxretries\", \"umask\", \"numcpus\"]:\n if not re.match(r'^(0o)?\\d+$', self[argument]) and \\\n self[argument] != 'None':\n raise usage.UsageError(\"{} parameter needs to be a number\"\n \" or None\".format(argument))\n\n if self['allow-shutdown'] not in [None, 'signal', 'file']:\n raise usage.UsageError(\"allow-shutdown needs to be one of\"\n \" 'signal' or 'file'\")\n\n\nclass Options(usage.Options):\n synopsis = \"Usage: buildbot-worker <command> [command options]\"\n\n subCommands = [\n # the following are all admin commands\n ['create-worker', None, CreateWorkerOptions,\n \"Create and populate a directory for a new worker\"],\n ['start', None, StartOptions, \"Start a worker\"],\n ['stop', None, StopOptions, \"Stop a worker\"],\n ['restart', None, RestartOptions,\n \"Restart a worker\"],\n ]\n\n def opt_version(self):\n import buildbot_worker # pylint: disable=import-outside-toplevel\n print(\"worker version: {}\".format(buildbot_worker.version))\n usage.Options.opt_version(self)\n\n def opt_verbose(self):\n log.startLogging(sys.stderr)\n\n def postOptions(self):\n if not hasattr(self, 'subOptions'):\n raise usage.UsageError(\"must specify a command\")\n\n\ndef run():\n config = Options()\n try:\n config.parseOptions()\n except usage.error as e:\n print(\"{}: {}\".format(sys.argv[0], e))\n print()\n c = getattr(config, 'subOptions', config)\n print(str(c))\n sys.exit(1)\n\n subconfig = config.subOptions\n subcommandFunction = reflect.namedObject(subconfig.subcommandFunction)\n sys.exit(subcommandFunction(subconfig))\n", "path": "worker/buildbot_worker/scripts/runner.py"}], "after_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\n# N.B.: don't import anything that might pull in a reactor yet. Some of our\n# subcommands want to load modules that need the gtk reactor.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport re\nimport sys\nimport textwrap\n\nfrom twisted.python import log\nfrom twisted.python import reflect\nfrom twisted.python import usage\n\n# the create/start/stop commands should all be run as the same user,\n# preferably a separate 'buildbot' account.\n\n# Note that the terms 'options' and 'config' are used interchangeably here - in\n# fact, they are interchanged several times. Caveat legator.\n\n\nclass MakerBase(usage.Options):\n optFlags = [\n ['help', 'h', \"Display this message\"],\n [\"quiet\", \"q\", \"Do not emit the commands being run\"],\n ]\n\n longdesc = textwrap.dedent(\"\"\"\n Operates upon the specified <basedir> (or the current directory, if not\n specified).\n \"\"\")\n\n # on tab completion, suggest directories as first argument\n if hasattr(usage, 'Completions'):\n # only set completion suggestion if running with\n # twisted version (>=11.1.0) that supports it\n compData = usage.Completions(\n extraActions=[usage.CompleteDirs(descr=\"worker base directory\")])\n\n opt_h = usage.Options.opt_help\n\n def parseArgs(self, *args):\n if args:\n self['basedir'] = args[0]\n else:\n # Use the current directory if no basedir was specified.\n self['basedir'] = os.getcwd()\n if len(args) > 1:\n raise usage.UsageError(\"I wasn't expecting so many arguments\")\n\n def postOptions(self):\n self['basedir'] = os.path.abspath(self['basedir'])\n\n\nclass StartOptions(MakerBase):\n subcommandFunction = \"buildbot_worker.scripts.start.startCommand\"\n optFlags = [\n ['quiet', 'q', \"Don't display startup log messages\"],\n ['nodaemon', None, \"Don't daemonize (stay in foreground)\"],\n ]\n\n def getSynopsis(self):\n return \"Usage: buildbot-worker start [<basedir>]\"\n\n\nclass StopOptions(MakerBase):\n subcommandFunction = \"buildbot_worker.scripts.stop.stop\"\n\n def getSynopsis(self):\n return \"Usage: buildbot-worker stop [<basedir>]\"\n\n\nclass RestartOptions(MakerBase):\n subcommandFunction = \"buildbot_worker.scripts.restart.restart\"\n optFlags = [\n ['quiet', 'q', \"Don't display startup log messages\"],\n ['nodaemon', None, \"Don't daemonize (stay in foreground)\"],\n ]\n\n def getSynopsis(self):\n return \"Usage: buildbot-worker restart [<basedir>]\"\n\n\nclass CreateWorkerOptions(MakerBase):\n subcommandFunction = \"buildbot_worker.scripts.create_worker.createWorker\"\n optFlags = [\n [\"force\", \"f\", \"Re-use an existing directory\"],\n [\"relocatable\", \"r\",\n \"Create a relocatable buildbot.tac\"],\n [\"no-logrotate\", \"n\",\n \"Do not permit buildmaster rotate logs by itself\"],\n ['use-tls', None,\n \"Uses TLS to connect to master\"],\n ['delete-leftover-dirs', None,\n 'Delete folders that are not required by the master on connection'],\n ]\n optParameters = [\n [\"keepalive\", \"k\", 600,\n \"Interval at which keepalives should be sent (in seconds)\"],\n [\"umask\", None, \"None\",\n \"controls permissions of generated files. \"\n \"Use --umask=0o22 to be world-readable\"],\n [\"maxdelay\", None, 300,\n \"Maximum time between connection attempts\"],\n [\"maxretries\", None, 'None',\n \"Maximum number of retries before worker shutdown\"],\n [\"numcpus\", None, \"None\",\n \"Number of available cpus to use on a build. \"],\n [\"log-size\", \"s\", \"10000000\",\n \"size at which to rotate twisted log files\"],\n [\"log-count\", \"l\", \"10\",\n \"limit the number of kept old twisted log files \"\n \"(None for unlimited)\"],\n [\"allow-shutdown\", \"a\", None,\n \"Allows the worker to initiate a graceful shutdown. One of \"\n \"'signal' or 'file'\"],\n [\"protocol\", None, \"pb\", \"Protocol to be used when creating master-worker connection\"],\n [\"proxy-connection-string\", None, None,\n \"Address of HTTP proxy to tunnel through\"]\n ]\n\n longdesc = textwrap.dedent(\"\"\"\n This command creates a buildbot worker directory and buildbot.tac\n file. The bot will use the <name> and <passwd> arguments to authenticate\n itself when connecting to the master. All commands are run in a\n build-specific subdirectory of <basedir>. <master> is a string of the\n form 'hostname[:port]', and specifies where the buildmaster can be reached.\n port defaults to 9989.\n\n The appropriate values for <name>, <passwd>, and <master> should be\n provided to you by the buildmaster administrator. You must choose <basedir>\n yourself.\n \"\"\")\n\n def validateMasterArgument(self, master_arg):\n \"\"\"\n Parse the <master> argument.\n\n @param master_arg: the <master> argument to parse\n\n @return: tuple of master's host and port\n @raise UsageError: on errors parsing the argument\n \"\"\"\n if master_arg[:5] == \"http:\":\n raise usage.UsageError(\"<master> is not a URL - do not use URL\")\n\n if master_arg.startswith(\"[\") and \"]\" in master_arg:\n # detect ipv6 address with format [2001:1:2:3:4::1]:4321\n master, port_tmp = master_arg.split(\"]\")\n master = master[1:]\n if \":\" not in port_tmp:\n port = 9989\n else:\n port = port_tmp.split(\":\")[1]\n\n elif \":\" not in master_arg:\n master = master_arg\n port = 9989\n else:\n try:\n master, port = master_arg.split(\":\")\n except ValueError:\n raise usage.UsageError((\"invalid <master> argument '{}', \"\n \"if it is an ipv6 address, it must be enclosed by []\"\n ).format(master_arg))\n\n if not master:\n raise usage.UsageError(\"invalid <master> argument '{}'\".format(\n master_arg))\n try:\n port = int(port)\n except ValueError:\n raise usage.UsageError(\"invalid master port '{}', \"\n \"needs to be a number\".format(port))\n\n return master, port\n\n def getSynopsis(self):\n return \"Usage: buildbot-worker create-worker \" \\\n \"[options] <basedir> <master> <name> <passwd>\"\n\n def parseArgs(self, *args):\n if len(args) != 4:\n raise usage.UsageError(\"incorrect number of arguments\")\n basedir, master, name, passwd = args\n self['basedir'] = basedir\n self['host'], self['port'] = self.validateMasterArgument(master)\n self['name'] = name\n self['passwd'] = passwd\n\n def postOptions(self):\n MakerBase.postOptions(self)\n\n # check and convert numeric parameters\n for argument in [\"keepalive\", \"maxdelay\", \"log-size\"]:\n try:\n self[argument] = int(self[argument])\n except ValueError:\n raise usage.UsageError(\"{} parameter needs to be a number\".format(\n argument))\n\n for argument in [\"log-count\", \"maxretries\", \"umask\", \"numcpus\"]:\n if not re.match(r'^((0o)\\d+|0|[1-9]\\d*)$', self[argument]) and \\\n self[argument] != 'None':\n raise usage.UsageError(\"{} parameter needs to be a number\"\n \" or None\".format(argument))\n\n if self['allow-shutdown'] not in [None, 'signal', 'file']:\n raise usage.UsageError(\"allow-shutdown needs to be one of\"\n \" 'signal' or 'file'\")\n\n\nclass Options(usage.Options):\n synopsis = \"Usage: buildbot-worker <command> [command options]\"\n\n subCommands = [\n # the following are all admin commands\n ['create-worker', None, CreateWorkerOptions,\n \"Create and populate a directory for a new worker\"],\n ['start', None, StartOptions, \"Start a worker\"],\n ['stop', None, StopOptions, \"Stop a worker\"],\n ['restart', None, RestartOptions,\n \"Restart a worker\"],\n ]\n\n def opt_version(self):\n import buildbot_worker # pylint: disable=import-outside-toplevel\n print(\"worker version: {}\".format(buildbot_worker.version))\n usage.Options.opt_version(self)\n\n def opt_verbose(self):\n log.startLogging(sys.stderr)\n\n def postOptions(self):\n if not hasattr(self, 'subOptions'):\n raise usage.UsageError(\"must specify a command\")\n\n\ndef run():\n config = Options()\n try:\n config.parseOptions()\n except usage.error as e:\n print(\"{}: {}\".format(sys.argv[0], e))\n print()\n c = getattr(config, 'subOptions', config)\n print(str(c))\n sys.exit(1)\n\n subconfig = config.subOptions\n subcommandFunction = reflect.namedObject(subconfig.subcommandFunction)\n sys.exit(subcommandFunction(subconfig))\n", "path": "worker/buildbot_worker/scripts/runner.py"}]}
| 3,770 | 170 |
gh_patches_debug_9705
|
rasdani/github-patches
|
git_diff
|
rucio__rucio-5793
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
receiver doesn't correctly handle intermediate hops on fts 3.12
Motivation
----------
The last major FTS release started to correctly fill "job_state" for those messages as "ACTIVE". This is a behavioral change which makes rucio skip those messages.
https://github.com/rucio/rucio/blob/5e911923990cc9c3caa0afbfec919c3ea4121edc/lib/rucio/daemons/conveyor/receiver.py#L74
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/rucio/daemons/conveyor/receiver.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright European Organization for Nuclear Research (CERN) since 2012
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 """
17 Conveyor is a daemon to manage file transfers.
18 """
19
20 import datetime
21 import json
22 import logging
23 import socket
24 import threading
25 import time
26 import traceback
27
28 import stomp
29
30 import rucio.db.sqla.util
31 from rucio.common import exception
32 from rucio.common.config import config_get, config_get_bool, config_get_int
33 from rucio.common.logging import setup_logging
34 from rucio.common.policy import get_policy
35 from rucio.core import request as request_core
36 from rucio.core.monitor import record_counter
37 from rucio.core.transfer import set_transfer_update_time
38 from rucio.daemons.common import HeartbeatHandler
39 from rucio.db.sqla.session import transactional_session
40 from rucio.transfertool.fts3 import FTS3CompletionMessageTransferStatusReport
41
42 logging.getLogger("stomp").setLevel(logging.CRITICAL)
43
44 graceful_stop = threading.Event()
45
46
47 class Receiver(object):
48
49 def __init__(self, broker, id_, total_threads, full_mode=False, all_vos=False):
50 self.__all_vos = all_vos
51 self.__broker = broker
52 self.__id = id_
53 self.__total_threads = total_threads
54 self.__full_mode = full_mode
55
56 def on_error(self, frame):
57 record_counter('daemons.conveyor.receiver.error')
58 logging.error('[%s] %s' % (self.__broker, frame.body))
59
60 def on_message(self, frame):
61 record_counter('daemons.conveyor.receiver.message_all')
62
63 msg = json.loads(frame.body)
64
65 if not self.__all_vos:
66 if 'vo' not in msg or msg['vo'] != get_policy():
67 return
68
69 if 'job_metadata' in msg.keys() \
70 and isinstance(msg['job_metadata'], dict) \
71 and 'issuer' in msg['job_metadata'].keys() \
72 and str(msg['job_metadata']['issuer']) == str('rucio'):
73
74 if 'job_state' in msg.keys() and str(msg['job_state']) != str('ACTIVE'):
75 record_counter('daemons.conveyor.receiver.message_rucio')
76
77 self._perform_request_update(msg)
78
79 @transactional_session
80 def _perform_request_update(self, msg, session=None, logger=logging.log):
81 external_host = msg.get('endpnt', None)
82 request_id = msg['file_metadata'].get('request_id', None)
83 try:
84 tt_status_report = FTS3CompletionMessageTransferStatusReport(external_host, request_id=request_id, fts_message=msg)
85 if tt_status_report.get_db_fields_to_update(session=session, logger=logger):
86 logging.info('RECEIVED %s', tt_status_report)
87
88 if self.__full_mode:
89 ret = request_core.update_request_state(tt_status_report, session=session, logger=logger)
90 record_counter('daemons.conveyor.receiver.update_request_state.{updated}', labels={'updated': ret})
91 else:
92 try:
93 logging.debug("Update request %s update time" % request_id)
94 set_transfer_update_time(external_host, tt_status_report.external_id, datetime.datetime.utcnow() - datetime.timedelta(hours=24), session=session)
95 record_counter('daemons.conveyor.receiver.set_transfer_update_time')
96 except Exception as error:
97 logging.debug("Failed to update transfer's update time: %s" % str(error))
98 except Exception:
99 logging.critical(traceback.format_exc())
100
101
102 def receiver(id_, total_threads=1, full_mode=False, all_vos=False):
103 """
104 Main loop to consume messages from the FTS3 producer.
105 """
106
107 logging.info('receiver starting in full mode: %s' % full_mode)
108
109 logger_prefix = executable = 'conveyor-receiver'
110
111 brokers_alias = []
112 brokers_resolved = []
113 try:
114 brokers_alias = [b.strip() for b in config_get('messaging-fts3', 'brokers').split(',')]
115 except Exception:
116 raise Exception('Could not load brokers from configuration')
117
118 logging.info('resolving broker dns alias: %s' % brokers_alias)
119
120 brokers_resolved = []
121 for broker in brokers_alias:
122 addrinfos = socket.getaddrinfo(broker, 0, socket.AF_INET, 0, socket.IPPROTO_TCP)
123 brokers_resolved.extend(ai[4][0] for ai in addrinfos)
124
125 logging.info('brokers resolved to %s', brokers_resolved)
126
127 logging.info('checking authentication method')
128 use_ssl = True
129 try:
130 use_ssl = config_get_bool('messaging-fts3', 'use_ssl')
131 except:
132 logging.info('could not find use_ssl in configuration -- please update your rucio.cfg')
133
134 port = config_get_int('messaging-fts3', 'port')
135 vhost = config_get('messaging-fts3', 'broker_virtual_host', raise_exception=False)
136 if not use_ssl:
137 username = config_get('messaging-fts3', 'username')
138 password = config_get('messaging-fts3', 'password')
139 port = config_get_int('messaging-fts3', 'nonssl_port')
140
141 conns = []
142 for broker in brokers_resolved:
143 if not use_ssl:
144 logging.info('setting up username/password authentication: %s' % broker)
145 con = stomp.Connection12(host_and_ports=[(broker, port)],
146 use_ssl=False,
147 vhost=vhost,
148 reconnect_attempts_max=999)
149 else:
150 logging.info('setting up ssl cert/key authentication: %s' % broker)
151 con = stomp.Connection12(host_and_ports=[(broker, port)],
152 use_ssl=True,
153 ssl_key_file=config_get('messaging-fts3', 'ssl_key_file'),
154 ssl_cert_file=config_get('messaging-fts3', 'ssl_cert_file'),
155 vhost=vhost,
156 reconnect_attempts_max=999)
157 conns.append(con)
158
159 logging.info('receiver started')
160
161 with HeartbeatHandler(executable=executable, renewal_interval=30, logger_prefix=logger_prefix) as heartbeat_handler:
162
163 while not graceful_stop.is_set():
164
165 _, _, logger = heartbeat_handler.live()
166
167 for conn in conns:
168
169 if not conn.is_connected():
170 logger(logging.INFO, 'connecting to %s' % conn.transport._Transport__host_and_ports[0][0])
171 record_counter('daemons.messaging.fts3.reconnect.{host}', labels={'host': conn.transport._Transport__host_and_ports[0][0].split('.')[0]})
172
173 conn.set_listener('rucio-messaging-fts3', Receiver(broker=conn.transport._Transport__host_and_ports[0],
174 id_=id_, total_threads=total_threads,
175 full_mode=full_mode, all_vos=all_vos))
176 if not use_ssl:
177 conn.connect(username, password, wait=True)
178 else:
179 conn.connect(wait=True)
180 conn.subscribe(destination=config_get('messaging-fts3', 'destination'),
181 id='rucio-messaging-fts3',
182 ack='auto')
183 time.sleep(1)
184
185 for conn in conns:
186 try:
187 conn.disconnect()
188 except Exception:
189 pass
190
191
192 def stop(signum=None, frame=None):
193 """
194 Graceful exit.
195 """
196
197 graceful_stop.set()
198
199
200 def run(once=False, total_threads=1, full_mode=False):
201 """
202 Starts up the receiver thread
203 """
204 setup_logging()
205
206 if rucio.db.sqla.util.is_old_db():
207 raise exception.DatabaseException('Database was not updated, daemon won\'t start')
208
209 logging.info('starting receiver thread')
210 threads = [threading.Thread(target=receiver, kwargs={'id_': i,
211 'full_mode': full_mode,
212 'total_threads': total_threads}) for i in range(0, total_threads)]
213
214 [thread.start() for thread in threads]
215
216 logging.info('waiting for interrupts')
217
218 # Interruptible joins require a timeout.
219 while threads:
220 threads = [thread.join(timeout=3.14) for thread in threads if thread and thread.is_alive()]
221
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lib/rucio/daemons/conveyor/receiver.py b/lib/rucio/daemons/conveyor/receiver.py
--- a/lib/rucio/daemons/conveyor/receiver.py
+++ b/lib/rucio/daemons/conveyor/receiver.py
@@ -71,7 +71,7 @@
and 'issuer' in msg['job_metadata'].keys() \
and str(msg['job_metadata']['issuer']) == str('rucio'):
- if 'job_state' in msg.keys() and str(msg['job_state']) != str('ACTIVE'):
+ if 'job_state' in msg.keys() and (str(msg['job_state']) != str('ACTIVE') or msg.get('job_multihop', False) is True):
record_counter('daemons.conveyor.receiver.message_rucio')
self._perform_request_update(msg)
|
{"golden_diff": "diff --git a/lib/rucio/daemons/conveyor/receiver.py b/lib/rucio/daemons/conveyor/receiver.py\n--- a/lib/rucio/daemons/conveyor/receiver.py\n+++ b/lib/rucio/daemons/conveyor/receiver.py\n@@ -71,7 +71,7 @@\n and 'issuer' in msg['job_metadata'].keys() \\\n and str(msg['job_metadata']['issuer']) == str('rucio'):\n \n- if 'job_state' in msg.keys() and str(msg['job_state']) != str('ACTIVE'):\n+ if 'job_state' in msg.keys() and (str(msg['job_state']) != str('ACTIVE') or msg.get('job_multihop', False) is True):\n record_counter('daemons.conveyor.receiver.message_rucio')\n \n self._perform_request_update(msg)\n", "issue": "receiver doesn't correctly handle intermediate hops on fts 3.12\nMotivation\r\n----------\r\nThe last major FTS release started to correctly fill \"job_state\" for those messages as \"ACTIVE\". This is a behavioral change which makes rucio skip those messages. \r\n\r\nhttps://github.com/rucio/rucio/blob/5e911923990cc9c3caa0afbfec919c3ea4121edc/lib/rucio/daemons/conveyor/receiver.py#L74\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright European Organization for Nuclear Research (CERN) since 2012\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nConveyor is a daemon to manage file transfers.\n\"\"\"\n\nimport datetime\nimport json\nimport logging\nimport socket\nimport threading\nimport time\nimport traceback\n\nimport stomp\n\nimport rucio.db.sqla.util\nfrom rucio.common import exception\nfrom rucio.common.config import config_get, config_get_bool, config_get_int\nfrom rucio.common.logging import setup_logging\nfrom rucio.common.policy import get_policy\nfrom rucio.core import request as request_core\nfrom rucio.core.monitor import record_counter\nfrom rucio.core.transfer import set_transfer_update_time\nfrom rucio.daemons.common import HeartbeatHandler\nfrom rucio.db.sqla.session import transactional_session\nfrom rucio.transfertool.fts3 import FTS3CompletionMessageTransferStatusReport\n\nlogging.getLogger(\"stomp\").setLevel(logging.CRITICAL)\n\ngraceful_stop = threading.Event()\n\n\nclass Receiver(object):\n\n def __init__(self, broker, id_, total_threads, full_mode=False, all_vos=False):\n self.__all_vos = all_vos\n self.__broker = broker\n self.__id = id_\n self.__total_threads = total_threads\n self.__full_mode = full_mode\n\n def on_error(self, frame):\n record_counter('daemons.conveyor.receiver.error')\n logging.error('[%s] %s' % (self.__broker, frame.body))\n\n def on_message(self, frame):\n record_counter('daemons.conveyor.receiver.message_all')\n\n msg = json.loads(frame.body)\n\n if not self.__all_vos:\n if 'vo' not in msg or msg['vo'] != get_policy():\n return\n\n if 'job_metadata' in msg.keys() \\\n and isinstance(msg['job_metadata'], dict) \\\n and 'issuer' in msg['job_metadata'].keys() \\\n and str(msg['job_metadata']['issuer']) == str('rucio'):\n\n if 'job_state' in msg.keys() and str(msg['job_state']) != str('ACTIVE'):\n record_counter('daemons.conveyor.receiver.message_rucio')\n\n self._perform_request_update(msg)\n\n @transactional_session\n def _perform_request_update(self, msg, session=None, logger=logging.log):\n external_host = msg.get('endpnt', None)\n request_id = msg['file_metadata'].get('request_id', None)\n try:\n tt_status_report = FTS3CompletionMessageTransferStatusReport(external_host, request_id=request_id, fts_message=msg)\n if tt_status_report.get_db_fields_to_update(session=session, logger=logger):\n logging.info('RECEIVED %s', tt_status_report)\n\n if self.__full_mode:\n ret = request_core.update_request_state(tt_status_report, session=session, logger=logger)\n record_counter('daemons.conveyor.receiver.update_request_state.{updated}', labels={'updated': ret})\n else:\n try:\n logging.debug(\"Update request %s update time\" % request_id)\n set_transfer_update_time(external_host, tt_status_report.external_id, datetime.datetime.utcnow() - datetime.timedelta(hours=24), session=session)\n record_counter('daemons.conveyor.receiver.set_transfer_update_time')\n except Exception as error:\n logging.debug(\"Failed to update transfer's update time: %s\" % str(error))\n except Exception:\n logging.critical(traceback.format_exc())\n\n\ndef receiver(id_, total_threads=1, full_mode=False, all_vos=False):\n \"\"\"\n Main loop to consume messages from the FTS3 producer.\n \"\"\"\n\n logging.info('receiver starting in full mode: %s' % full_mode)\n\n logger_prefix = executable = 'conveyor-receiver'\n\n brokers_alias = []\n brokers_resolved = []\n try:\n brokers_alias = [b.strip() for b in config_get('messaging-fts3', 'brokers').split(',')]\n except Exception:\n raise Exception('Could not load brokers from configuration')\n\n logging.info('resolving broker dns alias: %s' % brokers_alias)\n\n brokers_resolved = []\n for broker in brokers_alias:\n addrinfos = socket.getaddrinfo(broker, 0, socket.AF_INET, 0, socket.IPPROTO_TCP)\n brokers_resolved.extend(ai[4][0] for ai in addrinfos)\n\n logging.info('brokers resolved to %s', brokers_resolved)\n\n logging.info('checking authentication method')\n use_ssl = True\n try:\n use_ssl = config_get_bool('messaging-fts3', 'use_ssl')\n except:\n logging.info('could not find use_ssl in configuration -- please update your rucio.cfg')\n\n port = config_get_int('messaging-fts3', 'port')\n vhost = config_get('messaging-fts3', 'broker_virtual_host', raise_exception=False)\n if not use_ssl:\n username = config_get('messaging-fts3', 'username')\n password = config_get('messaging-fts3', 'password')\n port = config_get_int('messaging-fts3', 'nonssl_port')\n\n conns = []\n for broker in brokers_resolved:\n if not use_ssl:\n logging.info('setting up username/password authentication: %s' % broker)\n con = stomp.Connection12(host_and_ports=[(broker, port)],\n use_ssl=False,\n vhost=vhost,\n reconnect_attempts_max=999)\n else:\n logging.info('setting up ssl cert/key authentication: %s' % broker)\n con = stomp.Connection12(host_and_ports=[(broker, port)],\n use_ssl=True,\n ssl_key_file=config_get('messaging-fts3', 'ssl_key_file'),\n ssl_cert_file=config_get('messaging-fts3', 'ssl_cert_file'),\n vhost=vhost,\n reconnect_attempts_max=999)\n conns.append(con)\n\n logging.info('receiver started')\n\n with HeartbeatHandler(executable=executable, renewal_interval=30, logger_prefix=logger_prefix) as heartbeat_handler:\n\n while not graceful_stop.is_set():\n\n _, _, logger = heartbeat_handler.live()\n\n for conn in conns:\n\n if not conn.is_connected():\n logger(logging.INFO, 'connecting to %s' % conn.transport._Transport__host_and_ports[0][0])\n record_counter('daemons.messaging.fts3.reconnect.{host}', labels={'host': conn.transport._Transport__host_and_ports[0][0].split('.')[0]})\n\n conn.set_listener('rucio-messaging-fts3', Receiver(broker=conn.transport._Transport__host_and_ports[0],\n id_=id_, total_threads=total_threads,\n full_mode=full_mode, all_vos=all_vos))\n if not use_ssl:\n conn.connect(username, password, wait=True)\n else:\n conn.connect(wait=True)\n conn.subscribe(destination=config_get('messaging-fts3', 'destination'),\n id='rucio-messaging-fts3',\n ack='auto')\n time.sleep(1)\n\n for conn in conns:\n try:\n conn.disconnect()\n except Exception:\n pass\n\n\ndef stop(signum=None, frame=None):\n \"\"\"\n Graceful exit.\n \"\"\"\n\n graceful_stop.set()\n\n\ndef run(once=False, total_threads=1, full_mode=False):\n \"\"\"\n Starts up the receiver thread\n \"\"\"\n setup_logging()\n\n if rucio.db.sqla.util.is_old_db():\n raise exception.DatabaseException('Database was not updated, daemon won\\'t start')\n\n logging.info('starting receiver thread')\n threads = [threading.Thread(target=receiver, kwargs={'id_': i,\n 'full_mode': full_mode,\n 'total_threads': total_threads}) for i in range(0, total_threads)]\n\n [thread.start() for thread in threads]\n\n logging.info('waiting for interrupts')\n\n # Interruptible joins require a timeout.\n while threads:\n threads = [thread.join(timeout=3.14) for thread in threads if thread and thread.is_alive()]\n", "path": "lib/rucio/daemons/conveyor/receiver.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright European Organization for Nuclear Research (CERN) since 2012\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nConveyor is a daemon to manage file transfers.\n\"\"\"\n\nimport datetime\nimport json\nimport logging\nimport socket\nimport threading\nimport time\nimport traceback\n\nimport stomp\n\nimport rucio.db.sqla.util\nfrom rucio.common import exception\nfrom rucio.common.config import config_get, config_get_bool, config_get_int\nfrom rucio.common.logging import setup_logging\nfrom rucio.common.policy import get_policy\nfrom rucio.core import request as request_core\nfrom rucio.core.monitor import record_counter\nfrom rucio.core.transfer import set_transfer_update_time\nfrom rucio.daemons.common import HeartbeatHandler\nfrom rucio.db.sqla.session import transactional_session\nfrom rucio.transfertool.fts3 import FTS3CompletionMessageTransferStatusReport\n\nlogging.getLogger(\"stomp\").setLevel(logging.CRITICAL)\n\ngraceful_stop = threading.Event()\n\n\nclass Receiver(object):\n\n def __init__(self, broker, id_, total_threads, full_mode=False, all_vos=False):\n self.__all_vos = all_vos\n self.__broker = broker\n self.__id = id_\n self.__total_threads = total_threads\n self.__full_mode = full_mode\n\n def on_error(self, frame):\n record_counter('daemons.conveyor.receiver.error')\n logging.error('[%s] %s' % (self.__broker, frame.body))\n\n def on_message(self, frame):\n record_counter('daemons.conveyor.receiver.message_all')\n\n msg = json.loads(frame.body)\n\n if not self.__all_vos:\n if 'vo' not in msg or msg['vo'] != get_policy():\n return\n\n if 'job_metadata' in msg.keys() \\\n and isinstance(msg['job_metadata'], dict) \\\n and 'issuer' in msg['job_metadata'].keys() \\\n and str(msg['job_metadata']['issuer']) == str('rucio'):\n\n if 'job_state' in msg.keys() and (str(msg['job_state']) != str('ACTIVE') or msg.get('job_multihop', False) is True):\n record_counter('daemons.conveyor.receiver.message_rucio')\n\n self._perform_request_update(msg)\n\n @transactional_session\n def _perform_request_update(self, msg, session=None, logger=logging.log):\n external_host = msg.get('endpnt', None)\n request_id = msg['file_metadata'].get('request_id', None)\n try:\n tt_status_report = FTS3CompletionMessageTransferStatusReport(external_host, request_id=request_id, fts_message=msg)\n if tt_status_report.get_db_fields_to_update(session=session, logger=logger):\n logging.info('RECEIVED %s', tt_status_report)\n\n if self.__full_mode:\n ret = request_core.update_request_state(tt_status_report, session=session, logger=logger)\n record_counter('daemons.conveyor.receiver.update_request_state.{updated}', labels={'updated': ret})\n else:\n try:\n logging.debug(\"Update request %s update time\" % request_id)\n set_transfer_update_time(external_host, tt_status_report.external_id, datetime.datetime.utcnow() - datetime.timedelta(hours=24), session=session)\n record_counter('daemons.conveyor.receiver.set_transfer_update_time')\n except Exception as error:\n logging.debug(\"Failed to update transfer's update time: %s\" % str(error))\n except Exception:\n logging.critical(traceback.format_exc())\n\n\ndef receiver(id_, total_threads=1, full_mode=False, all_vos=False):\n \"\"\"\n Main loop to consume messages from the FTS3 producer.\n \"\"\"\n\n logging.info('receiver starting in full mode: %s' % full_mode)\n\n logger_prefix = executable = 'conveyor-receiver'\n\n brokers_alias = []\n brokers_resolved = []\n try:\n brokers_alias = [b.strip() for b in config_get('messaging-fts3', 'brokers').split(',')]\n except Exception:\n raise Exception('Could not load brokers from configuration')\n\n logging.info('resolving broker dns alias: %s' % brokers_alias)\n\n brokers_resolved = []\n for broker in brokers_alias:\n addrinfos = socket.getaddrinfo(broker, 0, socket.AF_INET, 0, socket.IPPROTO_TCP)\n brokers_resolved.extend(ai[4][0] for ai in addrinfos)\n\n logging.info('brokers resolved to %s', brokers_resolved)\n\n logging.info('checking authentication method')\n use_ssl = True\n try:\n use_ssl = config_get_bool('messaging-fts3', 'use_ssl')\n except:\n logging.info('could not find use_ssl in configuration -- please update your rucio.cfg')\n\n port = config_get_int('messaging-fts3', 'port')\n vhost = config_get('messaging-fts3', 'broker_virtual_host', raise_exception=False)\n if not use_ssl:\n username = config_get('messaging-fts3', 'username')\n password = config_get('messaging-fts3', 'password')\n port = config_get_int('messaging-fts3', 'nonssl_port')\n\n conns = []\n for broker in brokers_resolved:\n if not use_ssl:\n logging.info('setting up username/password authentication: %s' % broker)\n con = stomp.Connection12(host_and_ports=[(broker, port)],\n use_ssl=False,\n vhost=vhost,\n reconnect_attempts_max=999)\n else:\n logging.info('setting up ssl cert/key authentication: %s' % broker)\n con = stomp.Connection12(host_and_ports=[(broker, port)],\n use_ssl=True,\n ssl_key_file=config_get('messaging-fts3', 'ssl_key_file'),\n ssl_cert_file=config_get('messaging-fts3', 'ssl_cert_file'),\n vhost=vhost,\n reconnect_attempts_max=999)\n conns.append(con)\n\n logging.info('receiver started')\n\n with HeartbeatHandler(executable=executable, renewal_interval=30, logger_prefix=logger_prefix) as heartbeat_handler:\n\n while not graceful_stop.is_set():\n\n _, _, logger = heartbeat_handler.live()\n\n for conn in conns:\n\n if not conn.is_connected():\n logger(logging.INFO, 'connecting to %s' % conn.transport._Transport__host_and_ports[0][0])\n record_counter('daemons.messaging.fts3.reconnect.{host}', labels={'host': conn.transport._Transport__host_and_ports[0][0].split('.')[0]})\n\n conn.set_listener('rucio-messaging-fts3', Receiver(broker=conn.transport._Transport__host_and_ports[0],\n id_=id_, total_threads=total_threads,\n full_mode=full_mode, all_vos=all_vos))\n if not use_ssl:\n conn.connect(username, password, wait=True)\n else:\n conn.connect(wait=True)\n conn.subscribe(destination=config_get('messaging-fts3', 'destination'),\n id='rucio-messaging-fts3',\n ack='auto')\n time.sleep(1)\n\n for conn in conns:\n try:\n conn.disconnect()\n except Exception:\n pass\n\n\ndef stop(signum=None, frame=None):\n \"\"\"\n Graceful exit.\n \"\"\"\n\n graceful_stop.set()\n\n\ndef run(once=False, total_threads=1, full_mode=False):\n \"\"\"\n Starts up the receiver thread\n \"\"\"\n setup_logging()\n\n if rucio.db.sqla.util.is_old_db():\n raise exception.DatabaseException('Database was not updated, daemon won\\'t start')\n\n logging.info('starting receiver thread')\n threads = [threading.Thread(target=receiver, kwargs={'id_': i,\n 'full_mode': full_mode,\n 'total_threads': total_threads}) for i in range(0, total_threads)]\n\n [thread.start() for thread in threads]\n\n logging.info('waiting for interrupts')\n\n # Interruptible joins require a timeout.\n while threads:\n threads = [thread.join(timeout=3.14) for thread in threads if thread and thread.is_alive()]\n", "path": "lib/rucio/daemons/conveyor/receiver.py"}]}
| 2,827 | 187 |
gh_patches_debug_65373
|
rasdani/github-patches
|
git_diff
|
TheAlgorithms__Python-11276
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Global variable should not using in class method
### Repository commit
9caf4784aada17dc75348f77cc8c356df503c0f3
### Python version (python --version)
python3.11
### Dependencies version (pip freeze)
#nop
### Expected behavior
Hi, guys. I come across one global variable that used in class method. I think using instance attribute would be better !
```python
def show_data(self):
show_list = []
for i in range(1, N + 1): ==> should using self.N instead of N !
show_list += [self.query(i, i)]
print(show_list)
```
[source](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/segment_tree.py#L101)
### Actual behavior
```python
def show_data(self):
show_list = []
for i in range(1, self.N + 1):
show_list += [self.query(i, i)]
print(show_list)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `data_structures/binary_tree/segment_tree.py`
Content:
```
1 import math
2
3
4 class SegmentTree:
5 def __init__(self, a):
6 self.A = a
7 self.N = len(self.A)
8 self.st = [0] * (
9 4 * self.N
10 ) # approximate the overall size of segment tree with array N
11 if self.N:
12 self.build(1, 0, self.N - 1)
13
14 def left(self, idx):
15 """
16 Returns the left child index for a given index in a binary tree.
17
18 >>> s = SegmentTree([1, 2, 3])
19 >>> s.left(1)
20 2
21 >>> s.left(2)
22 4
23 """
24 return idx * 2
25
26 def right(self, idx):
27 """
28 Returns the right child index for a given index in a binary tree.
29
30 >>> s = SegmentTree([1, 2, 3])
31 >>> s.right(1)
32 3
33 >>> s.right(2)
34 5
35 """
36 return idx * 2 + 1
37
38 def build(self, idx, l, r): # noqa: E741
39 if l == r:
40 self.st[idx] = self.A[l]
41 else:
42 mid = (l + r) // 2
43 self.build(self.left(idx), l, mid)
44 self.build(self.right(idx), mid + 1, r)
45 self.st[idx] = max(self.st[self.left(idx)], self.st[self.right(idx)])
46
47 def update(self, a, b, val):
48 """
49 Update the values in the segment tree in the range [a,b] with the given value.
50
51 >>> s = SegmentTree([1, 2, 3, 4, 5])
52 >>> s.update(2, 4, 10)
53 True
54 >>> s.query(1, 5)
55 10
56 """
57 return self.update_recursive(1, 0, self.N - 1, a - 1, b - 1, val)
58
59 def update_recursive(self, idx, l, r, a, b, val): # noqa: E741
60 """
61 update(1, 1, N, a, b, v) for update val v to [a,b]
62 """
63 if r < a or l > b:
64 return True
65 if l == r:
66 self.st[idx] = val
67 return True
68 mid = (l + r) // 2
69 self.update_recursive(self.left(idx), l, mid, a, b, val)
70 self.update_recursive(self.right(idx), mid + 1, r, a, b, val)
71 self.st[idx] = max(self.st[self.left(idx)], self.st[self.right(idx)])
72 return True
73
74 def query(self, a, b):
75 """
76 Query the maximum value in the range [a,b].
77
78 >>> s = SegmentTree([1, 2, 3, 4, 5])
79 >>> s.query(1, 3)
80 3
81 >>> s.query(1, 5)
82 5
83 """
84 return self.query_recursive(1, 0, self.N - 1, a - 1, b - 1)
85
86 def query_recursive(self, idx, l, r, a, b): # noqa: E741
87 """
88 query(1, 1, N, a, b) for query max of [a,b]
89 """
90 if r < a or l > b:
91 return -math.inf
92 if l >= a and r <= b:
93 return self.st[idx]
94 mid = (l + r) // 2
95 q1 = self.query_recursive(self.left(idx), l, mid, a, b)
96 q2 = self.query_recursive(self.right(idx), mid + 1, r, a, b)
97 return max(q1, q2)
98
99 def show_data(self):
100 show_list = []
101 for i in range(1, N + 1):
102 show_list += [self.query(i, i)]
103 print(show_list)
104
105
106 if __name__ == "__main__":
107 A = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
108 N = 15
109 segt = SegmentTree(A)
110 print(segt.query(4, 6))
111 print(segt.query(7, 11))
112 print(segt.query(7, 12))
113 segt.update(1, 3, 111)
114 print(segt.query(1, 15))
115 segt.update(7, 8, 235)
116 segt.show_data()
117
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/data_structures/binary_tree/segment_tree.py b/data_structures/binary_tree/segment_tree.py
--- a/data_structures/binary_tree/segment_tree.py
+++ b/data_structures/binary_tree/segment_tree.py
@@ -98,7 +98,7 @@
def show_data(self):
show_list = []
- for i in range(1, N + 1):
+ for i in range(1, self.N + 1):
show_list += [self.query(i, i)]
print(show_list)
|
{"golden_diff": "diff --git a/data_structures/binary_tree/segment_tree.py b/data_structures/binary_tree/segment_tree.py\n--- a/data_structures/binary_tree/segment_tree.py\n+++ b/data_structures/binary_tree/segment_tree.py\n@@ -98,7 +98,7 @@\n \n def show_data(self):\n show_list = []\n- for i in range(1, N + 1):\n+ for i in range(1, self.N + 1):\n show_list += [self.query(i, i)]\n print(show_list)\n", "issue": "Global variable should not using in class method\n### Repository commit\n\n9caf4784aada17dc75348f77cc8c356df503c0f3\n\n### Python version (python --version)\n\npython3.11\n\n### Dependencies version (pip freeze)\n\n#nop \n\n### Expected behavior\n\nHi, guys. I come across one global variable that used in class method. I think using instance attribute would be better !\r\n```python\r\n def show_data(self):\r\n show_list = []\r\n for i in range(1, N + 1): ==> should using self.N instead of N !\r\n show_list += [self.query(i, i)]\r\n print(show_list)\r\n```\r\n[source](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/segment_tree.py#L101)\r\n\r\n\n\n### Actual behavior\n\n```python\r\n def show_data(self):\r\n show_list = []\r\n for i in range(1, self.N + 1): \r\n show_list += [self.query(i, i)]\r\n print(show_list)\r\n```\n", "before_files": [{"content": "import math\n\n\nclass SegmentTree:\n def __init__(self, a):\n self.A = a\n self.N = len(self.A)\n self.st = [0] * (\n 4 * self.N\n ) # approximate the overall size of segment tree with array N\n if self.N:\n self.build(1, 0, self.N - 1)\n\n def left(self, idx):\n \"\"\"\n Returns the left child index for a given index in a binary tree.\n\n >>> s = SegmentTree([1, 2, 3])\n >>> s.left(1)\n 2\n >>> s.left(2)\n 4\n \"\"\"\n return idx * 2\n\n def right(self, idx):\n \"\"\"\n Returns the right child index for a given index in a binary tree.\n\n >>> s = SegmentTree([1, 2, 3])\n >>> s.right(1)\n 3\n >>> s.right(2)\n 5\n \"\"\"\n return idx * 2 + 1\n\n def build(self, idx, l, r): # noqa: E741\n if l == r:\n self.st[idx] = self.A[l]\n else:\n mid = (l + r) // 2\n self.build(self.left(idx), l, mid)\n self.build(self.right(idx), mid + 1, r)\n self.st[idx] = max(self.st[self.left(idx)], self.st[self.right(idx)])\n\n def update(self, a, b, val):\n \"\"\"\n Update the values in the segment tree in the range [a,b] with the given value.\n\n >>> s = SegmentTree([1, 2, 3, 4, 5])\n >>> s.update(2, 4, 10)\n True\n >>> s.query(1, 5)\n 10\n \"\"\"\n return self.update_recursive(1, 0, self.N - 1, a - 1, b - 1, val)\n\n def update_recursive(self, idx, l, r, a, b, val): # noqa: E741\n \"\"\"\n update(1, 1, N, a, b, v) for update val v to [a,b]\n \"\"\"\n if r < a or l > b:\n return True\n if l == r:\n self.st[idx] = val\n return True\n mid = (l + r) // 2\n self.update_recursive(self.left(idx), l, mid, a, b, val)\n self.update_recursive(self.right(idx), mid + 1, r, a, b, val)\n self.st[idx] = max(self.st[self.left(idx)], self.st[self.right(idx)])\n return True\n\n def query(self, a, b):\n \"\"\"\n Query the maximum value in the range [a,b].\n\n >>> s = SegmentTree([1, 2, 3, 4, 5])\n >>> s.query(1, 3)\n 3\n >>> s.query(1, 5)\n 5\n \"\"\"\n return self.query_recursive(1, 0, self.N - 1, a - 1, b - 1)\n\n def query_recursive(self, idx, l, r, a, b): # noqa: E741\n \"\"\"\n query(1, 1, N, a, b) for query max of [a,b]\n \"\"\"\n if r < a or l > b:\n return -math.inf\n if l >= a and r <= b:\n return self.st[idx]\n mid = (l + r) // 2\n q1 = self.query_recursive(self.left(idx), l, mid, a, b)\n q2 = self.query_recursive(self.right(idx), mid + 1, r, a, b)\n return max(q1, q2)\n\n def show_data(self):\n show_list = []\n for i in range(1, N + 1):\n show_list += [self.query(i, i)]\n print(show_list)\n\n\nif __name__ == \"__main__\":\n A = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]\n N = 15\n segt = SegmentTree(A)\n print(segt.query(4, 6))\n print(segt.query(7, 11))\n print(segt.query(7, 12))\n segt.update(1, 3, 111)\n print(segt.query(1, 15))\n segt.update(7, 8, 235)\n segt.show_data()\n", "path": "data_structures/binary_tree/segment_tree.py"}], "after_files": [{"content": "import math\n\n\nclass SegmentTree:\n def __init__(self, a):\n self.A = a\n self.N = len(self.A)\n self.st = [0] * (\n 4 * self.N\n ) # approximate the overall size of segment tree with array N\n if self.N:\n self.build(1, 0, self.N - 1)\n\n def left(self, idx):\n \"\"\"\n Returns the left child index for a given index in a binary tree.\n\n >>> s = SegmentTree([1, 2, 3])\n >>> s.left(1)\n 2\n >>> s.left(2)\n 4\n \"\"\"\n return idx * 2\n\n def right(self, idx):\n \"\"\"\n Returns the right child index for a given index in a binary tree.\n\n >>> s = SegmentTree([1, 2, 3])\n >>> s.right(1)\n 3\n >>> s.right(2)\n 5\n \"\"\"\n return idx * 2 + 1\n\n def build(self, idx, l, r): # noqa: E741\n if l == r:\n self.st[idx] = self.A[l]\n else:\n mid = (l + r) // 2\n self.build(self.left(idx), l, mid)\n self.build(self.right(idx), mid + 1, r)\n self.st[idx] = max(self.st[self.left(idx)], self.st[self.right(idx)])\n\n def update(self, a, b, val):\n \"\"\"\n Update the values in the segment tree in the range [a,b] with the given value.\n\n >>> s = SegmentTree([1, 2, 3, 4, 5])\n >>> s.update(2, 4, 10)\n True\n >>> s.query(1, 5)\n 10\n \"\"\"\n return self.update_recursive(1, 0, self.N - 1, a - 1, b - 1, val)\n\n def update_recursive(self, idx, l, r, a, b, val): # noqa: E741\n \"\"\"\n update(1, 1, N, a, b, v) for update val v to [a,b]\n \"\"\"\n if r < a or l > b:\n return True\n if l == r:\n self.st[idx] = val\n return True\n mid = (l + r) // 2\n self.update_recursive(self.left(idx), l, mid, a, b, val)\n self.update_recursive(self.right(idx), mid + 1, r, a, b, val)\n self.st[idx] = max(self.st[self.left(idx)], self.st[self.right(idx)])\n return True\n\n def query(self, a, b):\n \"\"\"\n Query the maximum value in the range [a,b].\n\n >>> s = SegmentTree([1, 2, 3, 4, 5])\n >>> s.query(1, 3)\n 3\n >>> s.query(1, 5)\n 5\n \"\"\"\n return self.query_recursive(1, 0, self.N - 1, a - 1, b - 1)\n\n def query_recursive(self, idx, l, r, a, b): # noqa: E741\n \"\"\"\n query(1, 1, N, a, b) for query max of [a,b]\n \"\"\"\n if r < a or l > b:\n return -math.inf\n if l >= a and r <= b:\n return self.st[idx]\n mid = (l + r) // 2\n q1 = self.query_recursive(self.left(idx), l, mid, a, b)\n q2 = self.query_recursive(self.right(idx), mid + 1, r, a, b)\n return max(q1, q2)\n\n def show_data(self):\n show_list = []\n for i in range(1, self.N + 1):\n show_list += [self.query(i, i)]\n print(show_list)\n\n\nif __name__ == \"__main__\":\n A = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]\n N = 15\n segt = SegmentTree(A)\n print(segt.query(4, 6))\n print(segt.query(7, 11))\n print(segt.query(7, 12))\n segt.update(1, 3, 111)\n print(segt.query(1, 15))\n segt.update(7, 8, 235)\n segt.show_data()\n", "path": "data_structures/binary_tree/segment_tree.py"}]}
| 1,804 | 121 |
gh_patches_debug_10927
|
rasdani/github-patches
|
git_diff
|
SCons__scons-3870
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update SCons API sphinx config to pull version from SCons/__init__.py
Update SCons API sphinx config to pull version from `SCons/__init__.py`
See `doc/sphinx/conf.py`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `doc/sphinx/conf.py`
Content:
```
1 #!/usr/bin/env python3
2 # -*- coding: utf-8 -*-
3 #
4 # SCons documentation build configuration file, created by
5 # sphinx-quickstart on Mon Apr 30 09:36:53 2018.
6 #
7 # This file is execfile()d with the current directory set to its
8 # containing dir.
9 #
10 # Note that not all possible configuration values are present in this
11 # autogenerated file.
12 #
13 # All configuration values have a default; values that are commented out
14 # serve to show the default.
15
16 # If extensions (or modules to document with autodoc) are in another directory,
17 # add these directories to sys.path here. If the directory is relative to the
18 # documentation root, use os.path.abspath to make it absolute, like shown here.
19 #
20 import os
21 import sys
22 sys.path.insert(0, os.path.abspath('../../'))
23
24 # -- General configuration ------------------------------------------------
25
26 # If your documentation needs a minimal Sphinx version, state it here.
27 #
28 needs_sphinx = '1.3'
29
30 # Add any Sphinx extension module names here, as strings. They can be
31 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
32 # ones.
33 extensions = [
34 #'autoclasstoc', # TODO: figure out how to make this useful
35 'sphinx.ext.autodoc',
36 'sphinx.ext.autosummary',
37 #'sphinx.ext.githubpages'
38 'sphinx.ext.napoleon',
39 'sphinx.ext.todo',
40 'sphinx.ext.viewcode',
41 'sphinx_rtd_theme',
42 'rst2pdf.pdfbuilder',
43 ]
44
45 autosummary_generate = True
46
47 autodoc_default_options = {
48 "members": True,
49 #"special-members": True,
50 "private-members": True,
51 "inherited-members": True,
52 "undoc-members": True,
53 "exclude-members": '__weakref__',
54 }
55 autodoc_exclude_members = ['*Tests']
56 napoleon_include_special_with_doc = False
57 napoleon_include_private_with_doc = True
58
59 # Add any paths that contain templates here, relative to this directory.
60 templates_path = ['_templates']
61
62 # The suffix(es) of source filenames.
63 # You can specify multiple suffix as a list of string:
64 #
65 source_suffix = '.rst'
66
67 # The master toctree document.
68 master_doc = 'index'
69
70 # General information about the project.
71 project = 'SCons'
72 copyright = '2020, SCons Project'
73 author = 'SCons Project Team'
74
75 # The version info for the project you're documenting, acts as replacement for
76 # |version| and |release|, also used in various other places throughout the
77 # built documents.
78 #
79 # TODO: fill these in externally
80 # The short X.Y version.
81 version = '4.1'
82 # The full version, including alpha/beta/rc tags.
83 release = '4.1.0'
84
85 # The language for content autogenerated by Sphinx. Refer to documentation
86 # for a list of supported languages.
87 #
88 # This is also used if you do content translation via gettext catalogs.
89 # Usually you set "language" from the command line for these cases.
90 language = None
91
92 # List of patterns, relative to source directory, that match files and
93 # directories to ignore when looking for source files.
94 # This pattern also affects html_static_path and html_extra_path .
95 exclude_patterns = ["*Tests.py"]
96
97 # The name of the Pygments (syntax highlighting) style to use.
98 pygments_style = 'sphinx'
99
100 # If true, `todo` and `todoList` produce output, else they produce nothing.
101 todo_include_todos = False
102
103
104 # -- Options for HTML output ----------------------------------------------
105
106 # The theme to use for HTML and HTML Help pages. See the documentation for
107 # a list of builtin themes.
108 #
109 html_theme = "sphinx_rtd_theme"
110
111 # Theme options are theme-specific and customize the look and feel of a theme
112 # further. For a list of options available for each theme, see the
113 # documentation.
114 #
115 html_theme_options = {
116 "collapse_navigation": False,
117 "navigation_depth": 3,
118 }
119
120 # Add any paths that contain custom static files (such as style sheets) here,
121 # relative to this directory. They are copied after the builtin static files,
122 # so a file named "default.css" will overwrite the builtin "default.css".
123 html_static_path = ['_static']
124
125 # Custom sidebar templates, must be a dictionary that maps document names
126 # to template names.
127 #
128 # The default sidebars (for documents that don't match any pattern) are
129 # defined by theme itself. Builtin themes are using these templates by
130 # default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
131 # 'searchbox.html']``.
132 #
133 # html_sidebars = {}
134
135 # -- Options for HTMLHelp output ------------------------------------------
136
137 # Output file base name for HTML help builder.
138 htmlhelp_basename = 'SConsAPIDocs'
139
140
141 # -- Options for LaTeX output ---------------------------------------------
142
143 latex_elements = {
144 # The paper size ('letterpaper' or 'a4paper').
145 #
146 # 'papersize': 'letterpaper',
147
148 # The font size ('10pt', '11pt' or '12pt').
149 #
150 # 'pointsize': '10pt',
151
152 # Additional stuff for the LaTeX preamble.
153 #
154 # 'preamble': '',
155
156 # Latex figure (float) alignment
157 #
158 # 'figure_align': 'htbp',
159 }
160
161
162 # Grouping the document tree into LaTeX files. List of tuples
163 # (source start file, target name, title,
164 # author, documentclass [howto, manual, or own class]).
165 latex_documents = [
166 (
167 master_doc,
168 "SConsAPIDocs.tex",
169 "SCons API Documentation",
170 "SCons Project",
171 "manual",
172 )
173 ]
174
175
176 # -- Options for manual page output ---------------------------------------
177
178 # One entry per manual page. List of tuples
179 # (source start file, name, description, authors, manual section).
180 man_pages = [
181 (master_doc, 'sconsapidocs', 'SCons API Documentation', [author], 1)
182 ]
183
184
185 # -- Options for Texinfo output -------------------------------------------
186
187 # Grouping the document tree into Texinfo files. List of tuples
188 # (source start file, target name, title, author,
189 # dir menu entry, description, category)
190 texinfo_documents = [
191 (
192 master_doc,
193 "SConsAPIDocs",
194 "SCons API Documentation",
195 author,
196 "SConsAPIDocs",
197 "One line description of project.",
198 "Miscellaneous",
199 )
200 ]
201
202
203 # -- Options for Epub output -------------------------------------------------
204
205
206
207 # -- for PDF
208 # Grouping the document tree into PDF files. List of tuples
209 # (source start file, target name, title, author, options).
210 pdf_documents = [('index', u'scons-api', u'SCons API Docs', u'SCons Project'),]
211
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/doc/sphinx/conf.py b/doc/sphinx/conf.py
--- a/doc/sphinx/conf.py
+++ b/doc/sphinx/conf.py
@@ -76,11 +76,12 @@
# |version| and |release|, also used in various other places throughout the
# built documents.
#
-# TODO: fill these in externally
+from SCons import __version__
+# The full version, including alpha/beta/rc tags:
+release = __version__
# The short X.Y version.
-version = '4.1'
-# The full version, including alpha/beta/rc tags.
-release = '4.1.0'
+major, minor, _ = __version__.split('.')
+version = '.'.join([major, minor])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
|
{"golden_diff": "diff --git a/doc/sphinx/conf.py b/doc/sphinx/conf.py\n--- a/doc/sphinx/conf.py\n+++ b/doc/sphinx/conf.py\n@@ -76,11 +76,12 @@\n # |version| and |release|, also used in various other places throughout the\n # built documents.\n #\n-# TODO: fill these in externally\n+from SCons import __version__\n+# The full version, including alpha/beta/rc tags:\n+release = __version__\n # The short X.Y version.\n-version = '4.1'\n-# The full version, including alpha/beta/rc tags.\n-release = '4.1.0'\n+major, minor, _ = __version__.split('.')\n+version = '.'.join([major, minor])\n \n # The language for content autogenerated by Sphinx. Refer to documentation\n # for a list of supported languages.\n", "issue": "Update SCons API sphinx config to pull version from SCons/__init__.py\nUpdate SCons API sphinx config to pull version from `SCons/__init__.py`\r\n\r\nSee `doc/sphinx/conf.py`\n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# SCons documentation build configuration file, created by\n# sphinx-quickstart on Mon Apr 30 09:36:53 2018.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nsys.path.insert(0, os.path.abspath('../../'))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\nneeds_sphinx = '1.3'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n #'autoclasstoc', # TODO: figure out how to make this useful\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n #'sphinx.ext.githubpages'\n 'sphinx.ext.napoleon',\n 'sphinx.ext.todo',\n 'sphinx.ext.viewcode',\n 'sphinx_rtd_theme',\n 'rst2pdf.pdfbuilder',\n]\n\nautosummary_generate = True\n\nautodoc_default_options = {\n \"members\": True,\n #\"special-members\": True,\n \"private-members\": True,\n \"inherited-members\": True,\n \"undoc-members\": True,\n \"exclude-members\": '__weakref__',\n}\nautodoc_exclude_members = ['*Tests']\nnapoleon_include_special_with_doc = False\nnapoleon_include_private_with_doc = True\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'SCons'\ncopyright = '2020, SCons Project'\nauthor = 'SCons Project Team'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# TODO: fill these in externally\n# The short X.Y version.\nversion = '4.1'\n# The full version, including alpha/beta/rc tags.\nrelease = '4.1.0'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = [\"*Tests.py\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\n \"collapse_navigation\": False,\n \"navigation_depth\": 3,\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\n# html_sidebars = {}\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'SConsAPIDocs'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (\n master_doc,\n \"SConsAPIDocs.tex\",\n \"SCons API Documentation\",\n \"SCons Project\",\n \"manual\",\n )\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'sconsapidocs', 'SCons API Documentation', [author], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"SConsAPIDocs\",\n \"SCons API Documentation\",\n author,\n \"SConsAPIDocs\",\n \"One line description of project.\",\n \"Miscellaneous\",\n )\n]\n\n\n# -- Options for Epub output -------------------------------------------------\n\n\n\n# -- for PDF\n# Grouping the document tree into PDF files. List of tuples\n# (source start file, target name, title, author, options).\npdf_documents = [('index', u'scons-api', u'SCons API Docs', u'SCons Project'),]\n", "path": "doc/sphinx/conf.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# SCons documentation build configuration file, created by\n# sphinx-quickstart on Mon Apr 30 09:36:53 2018.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nsys.path.insert(0, os.path.abspath('../../'))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\nneeds_sphinx = '1.3'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n #'autoclasstoc', # TODO: figure out how to make this useful\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n #'sphinx.ext.githubpages'\n 'sphinx.ext.napoleon',\n 'sphinx.ext.todo',\n 'sphinx.ext.viewcode',\n 'sphinx_rtd_theme',\n 'rst2pdf.pdfbuilder',\n]\n\nautosummary_generate = True\n\nautodoc_default_options = {\n \"members\": True,\n #\"special-members\": True,\n \"private-members\": True,\n \"inherited-members\": True,\n \"undoc-members\": True,\n \"exclude-members\": '__weakref__',\n}\nautodoc_exclude_members = ['*Tests']\nnapoleon_include_special_with_doc = False\nnapoleon_include_private_with_doc = True\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'SCons'\ncopyright = '2020, SCons Project'\nauthor = 'SCons Project Team'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\nfrom SCons import __version__\n# The full version, including alpha/beta/rc tags:\nrelease = __version__\n# The short X.Y version.\nmajor, minor, _ = __version__.split('.')\nversion = '.'.join([major, minor])\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = [\"*Tests.py\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\n \"collapse_navigation\": False,\n \"navigation_depth\": 3,\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\n# html_sidebars = {}\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'SConsAPIDocs'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (\n master_doc,\n \"SConsAPIDocs.tex\",\n \"SCons API Documentation\",\n \"SCons Project\",\n \"manual\",\n )\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'sconsapidocs', 'SCons API Documentation', [author], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"SConsAPIDocs\",\n \"SCons API Documentation\",\n author,\n \"SConsAPIDocs\",\n \"One line description of project.\",\n \"Miscellaneous\",\n )\n]\n\n\n# -- Options for Epub output -------------------------------------------------\n\n\n\n# -- for PDF\n# Grouping the document tree into PDF files. List of tuples\n# (source start file, target name, title, author, options).\npdf_documents = [('index', u'scons-api', u'SCons API Docs', u'SCons Project'),]\n", "path": "doc/sphinx/conf.py"}]}
| 2,333 | 185 |
gh_patches_debug_19308
|
rasdani/github-patches
|
git_diff
|
WordPress__openverse-api-730
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Mature filter is broken
## Description
<!-- Concisely describe the bug. Compare your experience with what you expected to happen. -->
<!-- For example: "I clicked the 'submit' button and instead of seeing a thank you message, I saw a blank page." -->
The `mature` flag on our API is currently failing.
Here's the stacktrace we're seeing
```python
AttributeError: 'NoneType' object has no attribute 'to_dict'
File "django/core/handlers/exception.py", line 55, in inner
response = get_response(request)
File "django/core/handlers/base.py", line 197, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "django/views/decorators/csrf.py", line 54, in wrapped_view
return view_func(*args, **kwargs)
File "rest_framework/viewsets.py", line 125, in view
return self.dispatch(request, *args, **kwargs)
File "rest_framework/views.py", line 509, in dispatch
response = self.handle_exception(exc)
File "rest_framework/views.py", line 469, in handle_exception
self.raise_uncaught_exception(exc)
File "rest_framework/views.py", line 480, in raise_uncaught_exception
raise exc
File "rest_framework/views.py", line 506, in dispatch
response = handler(request, *args, **kwargs)
File "django/utils/decorators.py", line 46, in _wrapper
return bound_method(*args, **kwargs)
File "catalog/api/views/media_views.py", line 68, in list
results, num_pages, num_results = perform_search(
File "catalog/api/controllers/elasticsearch/search.py", line 166, in perform_search
start, end = get_query_slice(
File "catalog/api/controllers/elasticsearch/utils.py", line 88, in get_query_slice
start_slice, end_slice = paginate_with_dead_link_mask(s, page_size, page)
File "catalog/api/controllers/elasticsearch/utils.py", line 58, in paginate_with_dead_link_mask
query_hash = get_query_hash(s)
File "catalog/api/utils/dead_link_mask.py", line 21, in get_query_hash
serialized_search_obj = s.to_dict()
File "elasticsearch_dsl/search.py", line 658, in to_dict
d["query"] = self.query.to_dict()
File "elasticsearch_dsl/utils.py", line 382, in to_dict
value = list(map(lambda x: x.to_dict(), value))
File "elasticsearch_dsl/utils.py", line 382, in <lambda>
value = list(map(lambda x: x.to_dict(), value))
```
## Reproduction
<!-- Provide detailed steps to reproduce the bug. -->
1. Visit http://localhost:8000/v1/images/?mature=true
2. Observe 500 error
## Additional context
<!-- Add any other context about the problem here; or delete the section entirely. -->
Sentry issue: https://sentry.io/share/issue/84aad5fd22354d20844f9fd5873958be/
## Resolution
<!-- Replace the [ ] with [x] to check the box. -->
- [ ] 🙋 I would be interested in resolving this bug.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `api/catalog/api/controllers/elasticsearch/search.py`
Content:
```
1 from __future__ import annotations
2
3 import json
4 import logging as log
5 import pprint
6 from typing import List, Literal, Tuple, Union
7
8 from django.conf import settings
9
10 from elasticsearch.exceptions import RequestError
11 from elasticsearch_dsl import Q, Search
12 from elasticsearch_dsl.response import Hit
13
14 from catalog.api.controllers.elasticsearch.utils import (
15 exclude_filtered_providers,
16 get_query_slice,
17 get_result_and_page_count,
18 post_process_results,
19 )
20 from catalog.api.serializers.media_serializers import MediaSearchRequestSerializer
21
22
23 def _quote_escape(query_string: str) -> str:
24 """
25 If there are any unmatched quotes in the query supplied by the user, ignore
26 them by escaping.
27
28 :param query_string: the string in which to escape unbalanced quotes
29 :return: the given string, if the quotes are balanced, the escaped string otherwise
30 """
31
32 num_quotes = query_string.count('"')
33 if num_quotes % 2 == 1:
34 return query_string.replace('"', '\\"')
35 else:
36 return query_string
37
38
39 def _apply_filter(
40 s: Search,
41 query_serializer: MediaSearchRequestSerializer,
42 basis: Union[str, tuple[str, str]],
43 behaviour: Literal["filter", "exclude"] = "filter",
44 ) -> Search:
45 """
46 Parse and apply a filter from the search parameters serializer. The
47 parameter key is assumed to have the same name as the corresponding
48 Elasticsearch property. Each parameter value is assumed to be a comma
49 separated list encoded as a string.
50
51 :param s: the search query to issue to Elasticsearch
52 :param query_serializer: the ``MediaSearchRequestSerializer`` object with the query
53 :param basis: the name of the field in the serializer and Elasticsearch
54 :param behaviour: whether to accept (``filter``) or reject (``exclude``) the hit
55 :return: the modified search query
56 """
57
58 search_params = query_serializer.data
59 if isinstance(basis, tuple):
60 serializer_field, es_field = basis
61 else:
62 serializer_field = es_field = basis
63 if serializer_field in search_params:
64 filters = []
65 for arg in search_params[serializer_field].split(","):
66 filters.append(Q("term", **{es_field: arg}))
67 method = getattr(s, behaviour) # can be ``s.filter`` or ``s.exclude``
68 return method("bool", should=filters)
69 else:
70 return s
71
72
73 def perform_search(
74 query_serializer: MediaSearchRequestSerializer,
75 index: Literal["image", "audio"],
76 ip: int,
77 ) -> Tuple[List[Hit], int, int]:
78 """
79 Perform a ranked, paginated search based on the query and filters given in the
80 search request.
81
82 :param query_serializer: the ``MediaSearchRequestSerializer`` object with the query
83 :param index: The Elasticsearch index to search (e.g. 'image')
84 :param ip: the users' hashed IP to consistently route to the same ES shard
85 :return: the list of search results with the page and result count
86 """
87
88 s = Search(using="default", index=index)
89 search_params = query_serializer.data
90
91 rules: dict[Literal["filter", "exclude"], list[Union[str, tuple[str, str]]]] = {
92 "filter": [
93 "extension",
94 "category",
95 ("categories", "category"),
96 "aspect_ratio",
97 "size",
98 "length",
99 "source",
100 ("license", "license.keyword"),
101 ("license_type", "license.keyword"),
102 ],
103 "exclude": [
104 ("excluded_source", "source"),
105 ],
106 }
107 for behaviour, bases in rules.items():
108 for basis in bases:
109 s = _apply_filter(s, query_serializer, basis, behaviour)
110
111 # Exclude mature content
112 if not search_params["mature"]:
113 s = s.exclude("term", mature=True)
114 # Exclude sources with ``filter_content`` enabled
115 s = exclude_filtered_providers(s)
116
117 # Search either by generic multimatch or by "advanced search" with
118 # individual field-level queries specified.
119
120 search_fields = ["tags.name", "title", "description"]
121 if "q" in search_params:
122 query = _quote_escape(search_params["q"])
123 s = s.query(
124 "simple_query_string",
125 query=query,
126 fields=search_fields,
127 default_operator="AND",
128 )
129 # Boost exact matches
130 quotes_stripped = query.replace('"', "")
131 exact_match_boost = Q(
132 "simple_query_string",
133 fields=["title"],
134 query=f'"{quotes_stripped}"',
135 boost=10000,
136 )
137 s.query = Q("bool", must=s.query, should=exact_match_boost)
138 else:
139 query_bases = ["creator", "title", ("tags", "tags.name")]
140 for query_basis in query_bases:
141 if isinstance(query_basis, tuple):
142 serializer_field, es_field = query_basis
143 else:
144 serializer_field = es_field = query_basis
145 if serializer_field in search_params:
146 value = _quote_escape(search_params[serializer_field])
147 s = s.query("simple_query_string", fields=[es_field], query=value)
148
149 if settings.USE_RANK_FEATURES:
150 feature_boost = {"standardized_popularity": 10000}
151 rank_queries = []
152 for field, boost in feature_boost.items():
153 rank_queries.append(Q("rank_feature", field=field, boost=boost))
154 s.query = Q("bool", must=s.query, should=rank_queries)
155
156 # Use highlighting to determine which fields contribute to the selection of
157 # top results.
158 s = s.highlight(*search_fields)
159 s = s.highlight_options(order="score")
160
161 # Route users to the same Elasticsearch worker node to reduce
162 # pagination inconsistencies and increase cache hits.
163 s = s.params(preference=str(ip), request_timeout=7)
164
165 # Paginate
166 start, end = get_query_slice(
167 s,
168 search_params["page_size"],
169 search_params["page"],
170 search_params["filter_dead"],
171 )
172 s = s[start:end]
173
174 try:
175 if settings.VERBOSE_ES_RESPONSE:
176 log.info(pprint.pprint(s.to_dict()))
177 search_response = s.execute()
178 log.info(
179 f"query={json.dumps(s.to_dict())}," f" es_took_ms={search_response.took}"
180 )
181 if settings.VERBOSE_ES_RESPONSE:
182 log.info(pprint.pprint(search_response.to_dict()))
183 except RequestError as e:
184 raise ValueError(e)
185
186 results = post_process_results(
187 s,
188 start,
189 end,
190 search_params["page_size"],
191 search_response,
192 search_params["filter_dead"],
193 )
194
195 result_count, page_count = get_result_and_page_count(
196 search_response, results, search_params["page_size"]
197 )
198 return results, page_count, result_count
199
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/api/catalog/api/controllers/elasticsearch/search.py b/api/catalog/api/controllers/elasticsearch/search.py
--- a/api/catalog/api/controllers/elasticsearch/search.py
+++ b/api/catalog/api/controllers/elasticsearch/search.py
@@ -9,6 +9,7 @@
from elasticsearch.exceptions import RequestError
from elasticsearch_dsl import Q, Search
+from elasticsearch_dsl.query import EMPTY_QUERY
from elasticsearch_dsl.response import Hit
from catalog.api.controllers.elasticsearch.utils import (
@@ -151,7 +152,7 @@
rank_queries = []
for field, boost in feature_boost.items():
rank_queries.append(Q("rank_feature", field=field, boost=boost))
- s.query = Q("bool", must=s.query, should=rank_queries)
+ s.query = Q("bool", must=s.query or EMPTY_QUERY, should=rank_queries)
# Use highlighting to determine which fields contribute to the selection of
# top results.
|
{"golden_diff": "diff --git a/api/catalog/api/controllers/elasticsearch/search.py b/api/catalog/api/controllers/elasticsearch/search.py\n--- a/api/catalog/api/controllers/elasticsearch/search.py\n+++ b/api/catalog/api/controllers/elasticsearch/search.py\n@@ -9,6 +9,7 @@\n \n from elasticsearch.exceptions import RequestError\n from elasticsearch_dsl import Q, Search\n+from elasticsearch_dsl.query import EMPTY_QUERY\n from elasticsearch_dsl.response import Hit\n \n from catalog.api.controllers.elasticsearch.utils import (\n@@ -151,7 +152,7 @@\n rank_queries = []\n for field, boost in feature_boost.items():\n rank_queries.append(Q(\"rank_feature\", field=field, boost=boost))\n- s.query = Q(\"bool\", must=s.query, should=rank_queries)\n+ s.query = Q(\"bool\", must=s.query or EMPTY_QUERY, should=rank_queries)\n \n # Use highlighting to determine which fields contribute to the selection of\n # top results.\n", "issue": "Mature filter is broken\n## Description\n<!-- Concisely describe the bug. Compare your experience with what you expected to happen. -->\n<!-- For example: \"I clicked the 'submit' button and instead of seeing a thank you message, I saw a blank page.\" -->\nThe `mature` flag on our API is currently failing.\n\nHere's the stacktrace we're seeing\n\n```python\nAttributeError: 'NoneType' object has no attribute 'to_dict'\n File \"django/core/handlers/exception.py\", line 55, in inner\n response = get_response(request)\n File \"django/core/handlers/base.py\", line 197, in _get_response\n response = wrapped_callback(request, *callback_args, **callback_kwargs)\n File \"django/views/decorators/csrf.py\", line 54, in wrapped_view\n return view_func(*args, **kwargs)\n File \"rest_framework/viewsets.py\", line 125, in view\n return self.dispatch(request, *args, **kwargs)\n File \"rest_framework/views.py\", line 509, in dispatch\n response = self.handle_exception(exc)\n File \"rest_framework/views.py\", line 469, in handle_exception\n self.raise_uncaught_exception(exc)\n File \"rest_framework/views.py\", line 480, in raise_uncaught_exception\n raise exc\n File \"rest_framework/views.py\", line 506, in dispatch\n response = handler(request, *args, **kwargs)\n File \"django/utils/decorators.py\", line 46, in _wrapper\n return bound_method(*args, **kwargs)\n File \"catalog/api/views/media_views.py\", line 68, in list\n results, num_pages, num_results = perform_search(\n File \"catalog/api/controllers/elasticsearch/search.py\", line 166, in perform_search\n start, end = get_query_slice(\n File \"catalog/api/controllers/elasticsearch/utils.py\", line 88, in get_query_slice\n start_slice, end_slice = paginate_with_dead_link_mask(s, page_size, page)\n File \"catalog/api/controllers/elasticsearch/utils.py\", line 58, in paginate_with_dead_link_mask\n query_hash = get_query_hash(s)\n File \"catalog/api/utils/dead_link_mask.py\", line 21, in get_query_hash\n serialized_search_obj = s.to_dict()\n File \"elasticsearch_dsl/search.py\", line 658, in to_dict\n d[\"query\"] = self.query.to_dict()\n File \"elasticsearch_dsl/utils.py\", line 382, in to_dict\n value = list(map(lambda x: x.to_dict(), value))\n File \"elasticsearch_dsl/utils.py\", line 382, in <lambda>\n value = list(map(lambda x: x.to_dict(), value))\n```\n\n## Reproduction\n<!-- Provide detailed steps to reproduce the bug. -->\n1. Visit http://localhost:8000/v1/images/?mature=true\n2. Observe 500 error\n\n## Additional context\n<!-- Add any other context about the problem here; or delete the section entirely. -->\nSentry issue: https://sentry.io/share/issue/84aad5fd22354d20844f9fd5873958be/\n\n## Resolution\n<!-- Replace the [ ] with [x] to check the box. -->\n- [ ] \ud83d\ude4b I would be interested in resolving this bug.\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport json\nimport logging as log\nimport pprint\nfrom typing import List, Literal, Tuple, Union\n\nfrom django.conf import settings\n\nfrom elasticsearch.exceptions import RequestError\nfrom elasticsearch_dsl import Q, Search\nfrom elasticsearch_dsl.response import Hit\n\nfrom catalog.api.controllers.elasticsearch.utils import (\n exclude_filtered_providers,\n get_query_slice,\n get_result_and_page_count,\n post_process_results,\n)\nfrom catalog.api.serializers.media_serializers import MediaSearchRequestSerializer\n\n\ndef _quote_escape(query_string: str) -> str:\n \"\"\"\n If there are any unmatched quotes in the query supplied by the user, ignore\n them by escaping.\n\n :param query_string: the string in which to escape unbalanced quotes\n :return: the given string, if the quotes are balanced, the escaped string otherwise\n \"\"\"\n\n num_quotes = query_string.count('\"')\n if num_quotes % 2 == 1:\n return query_string.replace('\"', '\\\\\"')\n else:\n return query_string\n\n\ndef _apply_filter(\n s: Search,\n query_serializer: MediaSearchRequestSerializer,\n basis: Union[str, tuple[str, str]],\n behaviour: Literal[\"filter\", \"exclude\"] = \"filter\",\n) -> Search:\n \"\"\"\n Parse and apply a filter from the search parameters serializer. The\n parameter key is assumed to have the same name as the corresponding\n Elasticsearch property. Each parameter value is assumed to be a comma\n separated list encoded as a string.\n\n :param s: the search query to issue to Elasticsearch\n :param query_serializer: the ``MediaSearchRequestSerializer`` object with the query\n :param basis: the name of the field in the serializer and Elasticsearch\n :param behaviour: whether to accept (``filter``) or reject (``exclude``) the hit\n :return: the modified search query\n \"\"\"\n\n search_params = query_serializer.data\n if isinstance(basis, tuple):\n serializer_field, es_field = basis\n else:\n serializer_field = es_field = basis\n if serializer_field in search_params:\n filters = []\n for arg in search_params[serializer_field].split(\",\"):\n filters.append(Q(\"term\", **{es_field: arg}))\n method = getattr(s, behaviour) # can be ``s.filter`` or ``s.exclude``\n return method(\"bool\", should=filters)\n else:\n return s\n\n\ndef perform_search(\n query_serializer: MediaSearchRequestSerializer,\n index: Literal[\"image\", \"audio\"],\n ip: int,\n) -> Tuple[List[Hit], int, int]:\n \"\"\"\n Perform a ranked, paginated search based on the query and filters given in the\n search request.\n\n :param query_serializer: the ``MediaSearchRequestSerializer`` object with the query\n :param index: The Elasticsearch index to search (e.g. 'image')\n :param ip: the users' hashed IP to consistently route to the same ES shard\n :return: the list of search results with the page and result count\n \"\"\"\n\n s = Search(using=\"default\", index=index)\n search_params = query_serializer.data\n\n rules: dict[Literal[\"filter\", \"exclude\"], list[Union[str, tuple[str, str]]]] = {\n \"filter\": [\n \"extension\",\n \"category\",\n (\"categories\", \"category\"),\n \"aspect_ratio\",\n \"size\",\n \"length\",\n \"source\",\n (\"license\", \"license.keyword\"),\n (\"license_type\", \"license.keyword\"),\n ],\n \"exclude\": [\n (\"excluded_source\", \"source\"),\n ],\n }\n for behaviour, bases in rules.items():\n for basis in bases:\n s = _apply_filter(s, query_serializer, basis, behaviour)\n\n # Exclude mature content\n if not search_params[\"mature\"]:\n s = s.exclude(\"term\", mature=True)\n # Exclude sources with ``filter_content`` enabled\n s = exclude_filtered_providers(s)\n\n # Search either by generic multimatch or by \"advanced search\" with\n # individual field-level queries specified.\n\n search_fields = [\"tags.name\", \"title\", \"description\"]\n if \"q\" in search_params:\n query = _quote_escape(search_params[\"q\"])\n s = s.query(\n \"simple_query_string\",\n query=query,\n fields=search_fields,\n default_operator=\"AND\",\n )\n # Boost exact matches\n quotes_stripped = query.replace('\"', \"\")\n exact_match_boost = Q(\n \"simple_query_string\",\n fields=[\"title\"],\n query=f'\"{quotes_stripped}\"',\n boost=10000,\n )\n s.query = Q(\"bool\", must=s.query, should=exact_match_boost)\n else:\n query_bases = [\"creator\", \"title\", (\"tags\", \"tags.name\")]\n for query_basis in query_bases:\n if isinstance(query_basis, tuple):\n serializer_field, es_field = query_basis\n else:\n serializer_field = es_field = query_basis\n if serializer_field in search_params:\n value = _quote_escape(search_params[serializer_field])\n s = s.query(\"simple_query_string\", fields=[es_field], query=value)\n\n if settings.USE_RANK_FEATURES:\n feature_boost = {\"standardized_popularity\": 10000}\n rank_queries = []\n for field, boost in feature_boost.items():\n rank_queries.append(Q(\"rank_feature\", field=field, boost=boost))\n s.query = Q(\"bool\", must=s.query, should=rank_queries)\n\n # Use highlighting to determine which fields contribute to the selection of\n # top results.\n s = s.highlight(*search_fields)\n s = s.highlight_options(order=\"score\")\n\n # Route users to the same Elasticsearch worker node to reduce\n # pagination inconsistencies and increase cache hits.\n s = s.params(preference=str(ip), request_timeout=7)\n\n # Paginate\n start, end = get_query_slice(\n s,\n search_params[\"page_size\"],\n search_params[\"page\"],\n search_params[\"filter_dead\"],\n )\n s = s[start:end]\n\n try:\n if settings.VERBOSE_ES_RESPONSE:\n log.info(pprint.pprint(s.to_dict()))\n search_response = s.execute()\n log.info(\n f\"query={json.dumps(s.to_dict())},\" f\" es_took_ms={search_response.took}\"\n )\n if settings.VERBOSE_ES_RESPONSE:\n log.info(pprint.pprint(search_response.to_dict()))\n except RequestError as e:\n raise ValueError(e)\n\n results = post_process_results(\n s,\n start,\n end,\n search_params[\"page_size\"],\n search_response,\n search_params[\"filter_dead\"],\n )\n\n result_count, page_count = get_result_and_page_count(\n search_response, results, search_params[\"page_size\"]\n )\n return results, page_count, result_count\n", "path": "api/catalog/api/controllers/elasticsearch/search.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport json\nimport logging as log\nimport pprint\nfrom typing import List, Literal, Tuple, Union\n\nfrom django.conf import settings\n\nfrom elasticsearch.exceptions import RequestError\nfrom elasticsearch_dsl import Q, Search\nfrom elasticsearch_dsl.query import EMPTY_QUERY\nfrom elasticsearch_dsl.response import Hit\n\nfrom catalog.api.controllers.elasticsearch.utils import (\n exclude_filtered_providers,\n get_query_slice,\n get_result_and_page_count,\n post_process_results,\n)\nfrom catalog.api.serializers.media_serializers import MediaSearchRequestSerializer\n\n\ndef _quote_escape(query_string: str) -> str:\n \"\"\"\n If there are any unmatched quotes in the query supplied by the user, ignore\n them by escaping.\n\n :param query_string: the string in which to escape unbalanced quotes\n :return: the given string, if the quotes are balanced, the escaped string otherwise\n \"\"\"\n\n num_quotes = query_string.count('\"')\n if num_quotes % 2 == 1:\n return query_string.replace('\"', '\\\\\"')\n else:\n return query_string\n\n\ndef _apply_filter(\n s: Search,\n query_serializer: MediaSearchRequestSerializer,\n basis: Union[str, tuple[str, str]],\n behaviour: Literal[\"filter\", \"exclude\"] = \"filter\",\n) -> Search:\n \"\"\"\n Parse and apply a filter from the search parameters serializer. The\n parameter key is assumed to have the same name as the corresponding\n Elasticsearch property. Each parameter value is assumed to be a comma\n separated list encoded as a string.\n\n :param s: the search query to issue to Elasticsearch\n :param query_serializer: the ``MediaSearchRequestSerializer`` object with the query\n :param basis: the name of the field in the serializer and Elasticsearch\n :param behaviour: whether to accept (``filter``) or reject (``exclude``) the hit\n :return: the modified search query\n \"\"\"\n\n search_params = query_serializer.data\n if isinstance(basis, tuple):\n serializer_field, es_field = basis\n else:\n serializer_field = es_field = basis\n if serializer_field in search_params:\n filters = []\n for arg in search_params[serializer_field].split(\",\"):\n filters.append(Q(\"term\", **{es_field: arg}))\n method = getattr(s, behaviour) # can be ``s.filter`` or ``s.exclude``\n return method(\"bool\", should=filters)\n else:\n return s\n\n\ndef perform_search(\n query_serializer: MediaSearchRequestSerializer,\n index: Literal[\"image\", \"audio\"],\n ip: int,\n) -> Tuple[List[Hit], int, int]:\n \"\"\"\n Perform a ranked, paginated search based on the query and filters given in the\n search request.\n\n :param query_serializer: the ``MediaSearchRequestSerializer`` object with the query\n :param index: The Elasticsearch index to search (e.g. 'image')\n :param ip: the users' hashed IP to consistently route to the same ES shard\n :return: the list of search results with the page and result count\n \"\"\"\n\n s = Search(using=\"default\", index=index)\n search_params = query_serializer.data\n\n rules: dict[Literal[\"filter\", \"exclude\"], list[Union[str, tuple[str, str]]]] = {\n \"filter\": [\n \"extension\",\n \"category\",\n (\"categories\", \"category\"),\n \"aspect_ratio\",\n \"size\",\n \"length\",\n \"source\",\n (\"license\", \"license.keyword\"),\n (\"license_type\", \"license.keyword\"),\n ],\n \"exclude\": [\n (\"excluded_source\", \"source\"),\n ],\n }\n for behaviour, bases in rules.items():\n for basis in bases:\n s = _apply_filter(s, query_serializer, basis, behaviour)\n\n # Exclude mature content\n if not search_params[\"mature\"]:\n s = s.exclude(\"term\", mature=True)\n # Exclude sources with ``filter_content`` enabled\n s = exclude_filtered_providers(s)\n\n # Search either by generic multimatch or by \"advanced search\" with\n # individual field-level queries specified.\n\n search_fields = [\"tags.name\", \"title\", \"description\"]\n if \"q\" in search_params:\n query = _quote_escape(search_params[\"q\"])\n s = s.query(\n \"simple_query_string\",\n query=query,\n fields=search_fields,\n default_operator=\"AND\",\n )\n # Boost exact matches\n quotes_stripped = query.replace('\"', \"\")\n exact_match_boost = Q(\n \"simple_query_string\",\n fields=[\"title\"],\n query=f'\"{quotes_stripped}\"',\n boost=10000,\n )\n s.query = Q(\"bool\", must=s.query, should=exact_match_boost)\n else:\n query_bases = [\"creator\", \"title\", (\"tags\", \"tags.name\")]\n for query_basis in query_bases:\n if isinstance(query_basis, tuple):\n serializer_field, es_field = query_basis\n else:\n serializer_field = es_field = query_basis\n if serializer_field in search_params:\n value = _quote_escape(search_params[serializer_field])\n s = s.query(\"simple_query_string\", fields=[es_field], query=value)\n\n if settings.USE_RANK_FEATURES:\n feature_boost = {\"standardized_popularity\": 10000}\n rank_queries = []\n for field, boost in feature_boost.items():\n rank_queries.append(Q(\"rank_feature\", field=field, boost=boost))\n s.query = Q(\"bool\", must=s.query or EMPTY_QUERY, should=rank_queries)\n\n # Use highlighting to determine which fields contribute to the selection of\n # top results.\n s = s.highlight(*search_fields)\n s = s.highlight_options(order=\"score\")\n\n # Route users to the same Elasticsearch worker node to reduce\n # pagination inconsistencies and increase cache hits.\n s = s.params(preference=str(ip), request_timeout=7)\n\n # Paginate\n start, end = get_query_slice(\n s,\n search_params[\"page_size\"],\n search_params[\"page\"],\n search_params[\"filter_dead\"],\n )\n s = s[start:end]\n\n try:\n if settings.VERBOSE_ES_RESPONSE:\n log.info(pprint.pprint(s.to_dict()))\n search_response = s.execute()\n log.info(\n f\"query={json.dumps(s.to_dict())},\" f\" es_took_ms={search_response.took}\"\n )\n if settings.VERBOSE_ES_RESPONSE:\n log.info(pprint.pprint(search_response.to_dict()))\n except RequestError as e:\n raise ValueError(e)\n\n results = post_process_results(\n s,\n start,\n end,\n search_params[\"page_size\"],\n search_response,\n search_params[\"filter_dead\"],\n )\n\n result_count, page_count = get_result_and_page_count(\n search_response, results, search_params[\"page_size\"]\n )\n return results, page_count, result_count\n", "path": "api/catalog/api/controllers/elasticsearch/search.py"}]}
| 2,996 | 206 |
gh_patches_debug_896
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-1228
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
boto3 is fixed at the patch level version
**Is your feature request related to a problem? Please describe.**
free boto3 dependency patch version.
**Describe the solution you'd like**
replace the line here:
https://github.com/bridgecrewio/checkov/blob/master/Pipfile#L29
with
```
boto3 = "==1.17.*"
```
**Describe alternatives you've considered**
there are no alternatives as the patch version i don't see why is locked.
it can cause conflicts with already installed boto3 library
**Additional context**
boto3 dependency install latest patch version
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
Fixes #1211
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 import logging
3 import os
4 from importlib import util
5 from os import path
6
7 import setuptools
8 from setuptools import setup
9
10 # read the contents of your README file
11 this_directory = path.abspath(path.dirname(__file__))
12 with open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
13 long_description = f.read()
14
15 logger = logging.getLogger(__name__)
16 spec = util.spec_from_file_location(
17 "checkov.version", os.path.join("checkov", "version.py")
18 )
19 # noinspection PyUnresolvedReferences
20 mod = util.module_from_spec(spec)
21 spec.loader.exec_module(mod) # type: ignore
22 version = mod.version # type: ignore
23
24 setup(
25 extras_require={
26 "dev": [
27 "pytest==5.3.1",
28 "coverage",
29 "coverage-badge",
30 "GitPython==3.1.7",
31 "bandit"
32 ]
33 },
34 install_requires=[
35 "bc-python-hcl2>=0.3.18",
36 "cloudsplaining>=0.4.1",
37 "deep_merge",
38 "tabulate",
39 "colorama",
40 "termcolor",
41 "junit-xml",
42 "dpath>=1.5.0,<2",
43 "pyyaml>=5.4.1",
44 "boto3==1.17.27",
45 "GitPython",
46 "six==1.15.0",
47 "jmespath",
48 "tqdm",
49 "update_checker",
50 "semantic_version",
51 "packaging",
52 "networkx",
53 "dockerfile-parse",
54 "docker"
55 ],
56 license="Apache License 2.0",
57 name="checkov",
58 version=version,
59 python_requires=">=3.7",
60 description="Infrastructure as code static analysis",
61 author="bridgecrew",
62 author_email="[email protected]",
63 url="https://github.com/nimrodkor/checkov",
64 packages=setuptools.find_packages(exclude=["tests*","integration_tests*"]),
65 include_package_data=True,
66 package_dir={'checkov.terraform.checks.graph_checks': 'checkov/terraform/checks/graph_checks'},
67 package_data = {'checkov.terraform.checks.graph_checks': ['aws/*.yaml', 'gcp/*.yaml', 'azure/*.yaml']},
68 scripts=["bin/checkov", "bin/checkov.cmd"],
69 long_description=long_description,
70 long_description_content_type="text/markdown",
71 classifiers=[
72 'Environment :: Console',
73 'Intended Audience :: Developers',
74 'Intended Audience :: System Administrators',
75 'Programming Language :: Python :: 3.7',
76 'Programming Language :: Python :: 3.8',
77 'Programming Language :: Python :: 3.9',
78 'Topic :: Security',
79 'Topic :: Software Development :: Build Tools'
80 ]
81 )
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -41,7 +41,7 @@
"junit-xml",
"dpath>=1.5.0,<2",
"pyyaml>=5.4.1",
- "boto3==1.17.27",
+ "boto3==1.17.*",
"GitPython",
"six==1.15.0",
"jmespath",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -41,7 +41,7 @@\n \"junit-xml\",\n \"dpath>=1.5.0,<2\",\n \"pyyaml>=5.4.1\",\n- \"boto3==1.17.27\",\n+ \"boto3==1.17.*\",\n \"GitPython\",\n \"six==1.15.0\",\n \"jmespath\",\n", "issue": "boto3 is fixed at the patch level version\n**Is your feature request related to a problem? Please describe.**\r\nfree boto3 dependency patch version.\r\n\r\n**Describe the solution you'd like**\r\nreplace the line here:\r\nhttps://github.com/bridgecrewio/checkov/blob/master/Pipfile#L29\r\n\r\nwith \r\n```\r\nboto3 = \"==1.17.*\"\r\n```\r\n**Describe alternatives you've considered**\r\nthere are no alternatives as the patch version i don't see why is locked.\r\nit can cause conflicts with already installed boto3 library \r\n\r\n**Additional context**\r\n\r\n\nboto3 dependency install latest patch version\nBy submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.\r\n\r\nFixes #1211 \r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nlogger = logging.getLogger(__name__)\nspec = util.spec_from_file_location(\n \"checkov.version\", os.path.join(\"checkov\", \"version.py\")\n)\n# noinspection PyUnresolvedReferences\nmod = util.module_from_spec(spec)\nspec.loader.exec_module(mod) # type: ignore\nversion = mod.version # type: ignore\n\nsetup(\n extras_require={\n \"dev\": [\n \"pytest==5.3.1\",\n \"coverage\",\n \"coverage-badge\",\n \"GitPython==3.1.7\",\n \"bandit\"\n ]\n },\n install_requires=[\n \"bc-python-hcl2>=0.3.18\",\n \"cloudsplaining>=0.4.1\",\n \"deep_merge\",\n \"tabulate\",\n \"colorama\",\n \"termcolor\",\n \"junit-xml\",\n \"dpath>=1.5.0,<2\",\n \"pyyaml>=5.4.1\",\n \"boto3==1.17.27\",\n \"GitPython\",\n \"six==1.15.0\",\n \"jmespath\",\n \"tqdm\",\n \"update_checker\",\n \"semantic_version\",\n \"packaging\",\n \"networkx\",\n \"dockerfile-parse\",\n \"docker\"\n ],\n license=\"Apache License 2.0\",\n name=\"checkov\",\n version=version,\n python_requires=\">=3.7\",\n description=\"Infrastructure as code static analysis\",\n author=\"bridgecrew\",\n author_email=\"[email protected]\",\n url=\"https://github.com/nimrodkor/checkov\",\n packages=setuptools.find_packages(exclude=[\"tests*\",\"integration_tests*\"]),\n include_package_data=True,\n package_dir={'checkov.terraform.checks.graph_checks': 'checkov/terraform/checks/graph_checks'},\n package_data = {'checkov.terraform.checks.graph_checks': ['aws/*.yaml', 'gcp/*.yaml', 'azure/*.yaml']},\n scripts=[\"bin/checkov\", \"bin/checkov.cmd\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Topic :: Security',\n 'Topic :: Software Development :: Build Tools'\n ]\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nlogger = logging.getLogger(__name__)\nspec = util.spec_from_file_location(\n \"checkov.version\", os.path.join(\"checkov\", \"version.py\")\n)\n# noinspection PyUnresolvedReferences\nmod = util.module_from_spec(spec)\nspec.loader.exec_module(mod) # type: ignore\nversion = mod.version # type: ignore\n\nsetup(\n extras_require={\n \"dev\": [\n \"pytest==5.3.1\",\n \"coverage\",\n \"coverage-badge\",\n \"GitPython==3.1.7\",\n \"bandit\"\n ]\n },\n install_requires=[\n \"bc-python-hcl2>=0.3.18\",\n \"cloudsplaining>=0.4.1\",\n \"deep_merge\",\n \"tabulate\",\n \"colorama\",\n \"termcolor\",\n \"junit-xml\",\n \"dpath>=1.5.0,<2\",\n \"pyyaml>=5.4.1\",\n \"boto3==1.17.*\",\n \"GitPython\",\n \"six==1.15.0\",\n \"jmespath\",\n \"tqdm\",\n \"update_checker\",\n \"semantic_version\",\n \"packaging\",\n \"networkx\",\n \"dockerfile-parse\",\n \"docker\"\n ],\n license=\"Apache License 2.0\",\n name=\"checkov\",\n version=version,\n python_requires=\">=3.7\",\n description=\"Infrastructure as code static analysis\",\n author=\"bridgecrew\",\n author_email=\"[email protected]\",\n url=\"https://github.com/nimrodkor/checkov\",\n packages=setuptools.find_packages(exclude=[\"tests*\",\"integration_tests*\"]),\n include_package_data=True,\n package_dir={'checkov.terraform.checks.graph_checks': 'checkov/terraform/checks/graph_checks'},\n package_data = {'checkov.terraform.checks.graph_checks': ['aws/*.yaml', 'gcp/*.yaml', 'azure/*.yaml']},\n scripts=[\"bin/checkov\", \"bin/checkov.cmd\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Topic :: Security',\n 'Topic :: Software Development :: Build Tools'\n ]\n)\n", "path": "setup.py"}]}
| 1,192 | 113 |
gh_patches_debug_6047
|
rasdani/github-patches
|
git_diff
|
liqd__a4-meinberlin-2151
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
testing #1824 wrong wording
Dashboard/Newsletter: the Word "Empfänger*innen" is at the wrong place. Please delete it above and replace "Projekt" by "Empfänger*innen sind alle, die diesem Projekt folgen:"
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/newsletters/forms.py`
Content:
```
1 from django import forms
2 from django.apps import apps
3 from django.conf import settings
4 from django.utils.translation import ugettext_lazy as _
5
6 from adhocracy4.projects.models import Project
7
8 from . import models
9
10 Organisation = apps.get_model(settings.A4_ORGANISATIONS_MODEL)
11
12
13 class NewsletterForm(forms.ModelForm):
14 class Meta:
15 model = models.Newsletter
16 fields = ['sender_name', 'sender', 'receivers', 'project',
17 'organisation', 'subject', 'body']
18
19 def __init__(self, user=None, organisation=None, *args, **kwargs):
20 super().__init__(*args, **kwargs)
21 choices = [(value, string)
22 for value, string in models.RECEIVER_CHOICES
23 if value != models.PLATFORM or (user and user.is_superuser)]
24 self.fields['receivers'] = forms.ChoiceField(
25 label=_('Receivers'),
26 choices=choices,
27 widget=forms.RadioSelect(),
28 )
29
30 project_qs = Project.objects
31 if organisation:
32 project_qs = Project.objects.filter(organisation=organisation.id)
33
34 self.fields['project'] = forms.ModelChoiceField(
35 label=_('Project'),
36 queryset=project_qs,
37 required=False, empty_label=None)
38
39 self.fields['organisation'] = forms.ModelChoiceField(
40 label=_('Organisation'),
41 queryset=Organisation.objects,
42 required=False, empty_label=None)
43
44 def clean(self):
45 cleaned_data = super().clean()
46 if cleaned_data.get('receivers') == str(models.PROJECT) and \
47 not cleaned_data.get('project'):
48 self.add_error('project', _('Select a Project'))
49
50
51 class RestrictedNewsletterForm(forms.ModelForm):
52
53 class Meta:
54 model = models.Newsletter
55 fields = ['sender_name', 'sender', 'project', 'receivers',
56 'organisation', 'subject', 'body']
57
58 def __init__(self, user=None, organisation=None, *args, **kwargs):
59 super().__init__(*args, **kwargs)
60
61 self.fields['receivers'].widget = forms.HiddenInput()
62
63 project_qs = Project.objects
64 if organisation:
65 project_qs = Project.objects.filter(organisation=organisation.id)
66 if user and not user.is_superuser:
67 user_groups = user.groups.all()
68 org_groups = organisation.groups.all()
69 shared_groups = user_groups & org_groups
70 group = shared_groups.distinct().first()
71 project_qs = project_qs.filter(group=group)
72
73 self.fields['project'] = forms.ModelChoiceField(
74 label=_('Project'),
75 queryset=project_qs,
76 required=False, empty_label=None)
77
78 self.fields['organisation'] = forms.ModelChoiceField(
79 label=_('Organisation'),
80 queryset=Organisation.objects,
81 required=False, empty_label=None)
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/meinberlin/apps/newsletters/forms.py b/meinberlin/apps/newsletters/forms.py
--- a/meinberlin/apps/newsletters/forms.py
+++ b/meinberlin/apps/newsletters/forms.py
@@ -74,6 +74,8 @@
label=_('Project'),
queryset=project_qs,
required=False, empty_label=None)
+ self.fields['project'].label = _('Receivers are all users '
+ 'which follow the following project:')
self.fields['organisation'] = forms.ModelChoiceField(
label=_('Organisation'),
|
{"golden_diff": "diff --git a/meinberlin/apps/newsletters/forms.py b/meinberlin/apps/newsletters/forms.py\n--- a/meinberlin/apps/newsletters/forms.py\n+++ b/meinberlin/apps/newsletters/forms.py\n@@ -74,6 +74,8 @@\n label=_('Project'),\n queryset=project_qs,\n required=False, empty_label=None)\n+ self.fields['project'].label = _('Receivers are all users '\n+ 'which follow the following project:')\n \n self.fields['organisation'] = forms.ModelChoiceField(\n label=_('Organisation'),\n", "issue": "testing #1824 wrong wording\nDashboard/Newsletter: the Word \"Empf\u00e4nger*innen\" is at the wrong place. Please delete it above and replace \"Projekt\" by \"Empf\u00e4nger*innen sind alle, die diesem Projekt folgen:\"\r\n \r\n\n", "before_files": [{"content": "from django import forms\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.projects.models import Project\n\nfrom . import models\n\nOrganisation = apps.get_model(settings.A4_ORGANISATIONS_MODEL)\n\n\nclass NewsletterForm(forms.ModelForm):\n class Meta:\n model = models.Newsletter\n fields = ['sender_name', 'sender', 'receivers', 'project',\n 'organisation', 'subject', 'body']\n\n def __init__(self, user=None, organisation=None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n choices = [(value, string)\n for value, string in models.RECEIVER_CHOICES\n if value != models.PLATFORM or (user and user.is_superuser)]\n self.fields['receivers'] = forms.ChoiceField(\n label=_('Receivers'),\n choices=choices,\n widget=forms.RadioSelect(),\n )\n\n project_qs = Project.objects\n if organisation:\n project_qs = Project.objects.filter(organisation=organisation.id)\n\n self.fields['project'] = forms.ModelChoiceField(\n label=_('Project'),\n queryset=project_qs,\n required=False, empty_label=None)\n\n self.fields['organisation'] = forms.ModelChoiceField(\n label=_('Organisation'),\n queryset=Organisation.objects,\n required=False, empty_label=None)\n\n def clean(self):\n cleaned_data = super().clean()\n if cleaned_data.get('receivers') == str(models.PROJECT) and \\\n not cleaned_data.get('project'):\n self.add_error('project', _('Select a Project'))\n\n\nclass RestrictedNewsletterForm(forms.ModelForm):\n\n class Meta:\n model = models.Newsletter\n fields = ['sender_name', 'sender', 'project', 'receivers',\n 'organisation', 'subject', 'body']\n\n def __init__(self, user=None, organisation=None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.fields['receivers'].widget = forms.HiddenInput()\n\n project_qs = Project.objects\n if organisation:\n project_qs = Project.objects.filter(organisation=organisation.id)\n if user and not user.is_superuser:\n user_groups = user.groups.all()\n org_groups = organisation.groups.all()\n shared_groups = user_groups & org_groups\n group = shared_groups.distinct().first()\n project_qs = project_qs.filter(group=group)\n\n self.fields['project'] = forms.ModelChoiceField(\n label=_('Project'),\n queryset=project_qs,\n required=False, empty_label=None)\n\n self.fields['organisation'] = forms.ModelChoiceField(\n label=_('Organisation'),\n queryset=Organisation.objects,\n required=False, empty_label=None)\n", "path": "meinberlin/apps/newsletters/forms.py"}], "after_files": [{"content": "from django import forms\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.projects.models import Project\n\nfrom . import models\n\nOrganisation = apps.get_model(settings.A4_ORGANISATIONS_MODEL)\n\n\nclass NewsletterForm(forms.ModelForm):\n class Meta:\n model = models.Newsletter\n fields = ['sender_name', 'sender', 'receivers', 'project',\n 'organisation', 'subject', 'body']\n\n def __init__(self, user=None, organisation=None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n choices = [(value, string)\n for value, string in models.RECEIVER_CHOICES\n if value != models.PLATFORM or (user and user.is_superuser)]\n self.fields['receivers'] = forms.ChoiceField(\n label=_('Receivers'),\n choices=choices,\n widget=forms.RadioSelect(),\n )\n\n project_qs = Project.objects\n if organisation:\n project_qs = Project.objects.filter(organisation=organisation.id)\n\n self.fields['project'] = forms.ModelChoiceField(\n label=_('Project'),\n queryset=project_qs,\n required=False, empty_label=None)\n\n self.fields['organisation'] = forms.ModelChoiceField(\n label=_('Organisation'),\n queryset=Organisation.objects,\n required=False, empty_label=None)\n\n def clean(self):\n cleaned_data = super().clean()\n if cleaned_data.get('receivers') == str(models.PROJECT) and \\\n not cleaned_data.get('project'):\n self.add_error('project', _('Select a Project'))\n\n\nclass RestrictedNewsletterForm(forms.ModelForm):\n\n class Meta:\n model = models.Newsletter\n fields = ['sender_name', 'sender', 'project', 'receivers',\n 'organisation', 'subject', 'body']\n\n def __init__(self, user=None, organisation=None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.fields['receivers'].widget = forms.HiddenInput()\n\n project_qs = Project.objects\n if organisation:\n project_qs = Project.objects.filter(organisation=organisation.id)\n if user and not user.is_superuser:\n user_groups = user.groups.all()\n org_groups = organisation.groups.all()\n shared_groups = user_groups & org_groups\n group = shared_groups.distinct().first()\n project_qs = project_qs.filter(group=group)\n\n self.fields['project'] = forms.ModelChoiceField(\n label=_('Project'),\n queryset=project_qs,\n required=False, empty_label=None)\n self.fields['project'].label = _('Receivers are all users '\n 'which follow the following project:')\n\n self.fields['organisation'] = forms.ModelChoiceField(\n label=_('Organisation'),\n queryset=Organisation.objects,\n required=False, empty_label=None)\n", "path": "meinberlin/apps/newsletters/forms.py"}]}
| 1,061 | 124 |
gh_patches_debug_26393
|
rasdani/github-patches
|
git_diff
|
interlegis__sapl-1243
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Todos os autores disponíveis para seleção
Estão sendo mostrados para seleção em protocolo e matérias, todos os autores, inclusive os inativos no sistema
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sapl/api/views.py`
Content:
```
1 from django.contrib.contenttypes.models import ContentType
2 from django.db.models import Q
3 from django.http import Http404
4 from django.utils.translation import ugettext_lazy as _
5 from rest_framework.filters import DjangoFilterBackend
6 from rest_framework.generics import ListAPIView
7 from rest_framework.mixins import ListModelMixin, RetrieveModelMixin
8 from rest_framework.permissions import (AllowAny, IsAuthenticated,
9 IsAuthenticatedOrReadOnly)
10 from rest_framework.viewsets import GenericViewSet, ModelViewSet
11
12 from sapl.api.forms import AutorChoiceFilterSet, AutorSearchForFieldFilterSet
13 from sapl.api.serializers import (AutorChoiceSerializer, AutorSerializer,
14 ChoiceSerializer,
15 MateriaLegislativaSerializer,
16 ModelChoiceSerializer,
17 SessaoPlenariaSerializer)
18 from sapl.base.models import Autor, TipoAutor
19 from sapl.materia.models import MateriaLegislativa
20 from sapl.sessao.models import SessaoPlenaria
21 from sapl.utils import SaplGenericRelation, sapl_logger
22
23
24 class ModelChoiceView(ListAPIView):
25
26 # FIXME aplicar permissão correta de usuário
27 permission_classes = (IsAuthenticated,)
28 serializer_class = ModelChoiceSerializer
29
30 def get(self, request, *args, **kwargs):
31 self.model = ContentType.objects.get_for_id(
32 self.kwargs['content_type']).model_class()
33
34 pagination = request.GET.get('pagination', '')
35
36 if pagination == 'False':
37 self.pagination_class = None
38
39 return ListAPIView.get(self, request, *args, **kwargs)
40
41 def get_queryset(self):
42 return self.model.objects.all()
43
44
45 class AutorListView(ListAPIView):
46 """
47 Listagem de Autores com filtro para autores já cadastrados
48 e/ou possíveis autores.
49
50 - tr - tipo do resultado
51 Prepera Lista de Autores para 3 cenários distintos
52
53 - default = 1
54
55 = 1 -> para (value, text) usados geralmente
56 em combobox, radiobox, checkbox, etc com pesquisa básica
57 de Autores feita pelo django-filter
58 -> processo usado nas pesquisas, o mais usado.
59
60 = 2 -> para (value, text) usados geralmente
61 em combobox, radiobox, checkbox, etc com pesquisa básica
62 de Autores mas feito para Possíveis Autores armazenados
63 segundo o ContentType associado ao Tipo de Autor via
64 relacionamento genérico.
65 Busca feita sem django-filter processada no get_queryset
66 -> processo no cadastro de autores para seleção e busca
67 dos possíveis autores
68
69 = 3 -> Devolve instancias da classe Autor filtradas pelo
70 django-filter
71
72 - tipo - chave primária do Tipo de Autor a ser filtrado
73
74 - q - busca textual no nome do Autor ou em fields_search
75 declarados no field SaplGenericRelation das GenericFks
76 A busca textual acontece via django-filter com a
77 variável `tr` igual 1 ou 3. Em caso contrário,
78 o django-filter é desativado e a busca é feita
79 no model do ContentType associado ao tipo.
80
81 - q_0 / q_1 - q_0 faz o código ignorar "q"...
82
83 q_0 -> campos lookup a serem filtrados em qualquer Model
84 que implemente SaplGenericRelation
85 q_1 -> o valor que será pesquisado no lookup de q_0
86
87 q_0 e q_1 podem ser separados por ","... isso dará a
88 possibilidade de filtrar mais de um campo.
89
90
91 http://localhost:8000
92 /api/autor?tr=1&q_0=parlamentar_set__ativo&q_1=False
93 /api/autor?tr=1&q_0=parlamentar_set__ativo&q_1=True
94 /api/autor?tr=3&q_0=parlamentar_set__ativo&q_1=False
95 /api/autor?tr=3&q_0=parlamentar_set__ativo&q_1=True
96
97 http://localhost:8000
98 /api/autor?tr=1
99 &q_0=parlamentar_set__nome_completo__icontains,
100 parlamentar_set__ativo
101 &q_1=Carvalho,False
102 /api/autor?tr=1
103 &q_0=parlamentar_set__nome_completo__icontains,
104 parlamentar_set__ativo
105 &q_1=Carvalho,True
106 /api/autor?tr=3
107 &q_0=parlamentar_set__nome_completo__icontains,
108 parlamentar_set__ativo
109 &q_1=Carvalho,False
110 /api/autor?tr=3
111 &q_0=parlamentar_set__nome_completo__icontains,
112 parlamentar_set__ativo
113 &q_1=Carvalho,True
114
115
116 não importa o campo que vc passe de qualquer dos Models
117 ligados... é possível ver que models são esses,
118 na ocasião do commit deste texto, executando:
119 In [6]: from sapl.utils import models_with_gr_for_model
120
121 In [7]: models_with_gr_for_model(Autor)
122 Out[7]:
123 [sapl.parlamentares.models.Parlamentar,
124 sapl.parlamentares.models.Frente,
125 sapl.comissoes.models.Comissao,
126 sapl.materia.models.Orgao,
127 sapl.sessao.models.Bancada,
128 sapl.sessao.models.Bloco]
129
130 qualquer atributo destes models podem ser passados
131 para busca
132 """
133
134 TR_AUTOR_CHOICE_SERIALIZER = 1
135 TR_CHOICE_SERIALIZER = 2
136 TR_AUTOR_SERIALIZER = 3
137
138 # FIXME aplicar permissão correta de usuário
139 permission_classes = (IsAuthenticatedOrReadOnly,)
140 queryset = Autor.objects.all()
141 model = Autor
142
143 filter_class = AutorChoiceFilterSet
144 filter_backends = (DjangoFilterBackend, )
145 serializer_class = AutorChoiceSerializer
146
147 @property
148 def tr(self):
149 try:
150 tr = int(self.request.GET.get
151 ('tr', AutorListView.TR_AUTOR_CHOICE_SERIALIZER))
152
153 assert tr in (
154 AutorListView.TR_AUTOR_CHOICE_SERIALIZER,
155 AutorListView.TR_CHOICE_SERIALIZER,
156 AutorListView.TR_AUTOR_SERIALIZER), sapl_logger.info(
157 _("Tipo do Resultado a ser fornecido não existe!"))
158 except:
159 return AutorListView.TR_AUTOR_CHOICE_SERIALIZER
160 else:
161 return tr
162
163 def get(self, request, *args, **kwargs):
164 """
165 desativa o django-filter se a busca for por possiveis autores
166 parametro tr = TR_CHOICE_SERIALIZER
167 """
168 if self.tr == AutorListView.TR_CHOICE_SERIALIZER:
169 self.filter_class = None
170 self.filter_backends = []
171 self.serializer_class = ChoiceSerializer
172
173 elif self.tr == AutorListView.TR_AUTOR_SERIALIZER:
174 self.serializer_class = AutorSerializer
175 self.permission_classes = (IsAuthenticated,)
176
177 if self.filter_class and 'q_0' in request.GET:
178 self.filter_class = AutorSearchForFieldFilterSet
179
180 return ListAPIView.get(self, request, *args, **kwargs)
181
182 def get_queryset(self):
183 queryset = ListAPIView.get_queryset(self)
184
185 if self.filter_backends:
186 return queryset
187
188 params = {'content_type__isnull': False}
189
190 tipo = ''
191 try:
192 tipo = int(self.request.GET.get('tipo', ''))
193 if tipo:
194 params['id'] = tipo
195 except:
196 pass
197
198 tipos = TipoAutor.objects.filter(**params)
199
200 if not tipos.exists() and tipo:
201 raise Http404()
202
203 r = []
204 for tipo in tipos:
205 q = self.request.GET.get('q', '').strip()
206
207 model_class = tipo.content_type.model_class()
208
209 fields = list(filter(
210 lambda field: isinstance(field, SaplGenericRelation) and
211 field.related_model == Autor,
212 model_class._meta.get_fields(include_hidden=True)))
213
214 """
215 fields - é um array de SaplGenericRelation que deve possuir o
216 atributo fields_search. Verifique na documentação da classe
217 a estrutura de fields_search.
218 """
219
220 assert len(fields) >= 1, (_(
221 'Não foi encontrado em %(model)s um atributo do tipo '
222 'SaplGenericRelation que use o model %(model_autor)s') % {
223 'model': model_class._meta.verbose_name,
224 'model_autor': Autor._meta.verbose_name})
225
226 qs = model_class.objects.all()
227
228 q_filter = Q()
229 if q:
230 for item in fields:
231 if item.related_model != Autor:
232 continue
233 q_fs = Q()
234 for field in item.fields_search:
235 q_fs = q_fs | Q(**{'%s%s' % (
236 field[0],
237 field[1]): q})
238 q_filter = q_filter & q_fs
239
240 qs = qs.filter(q_filter).distinct(
241 fields[0].fields_search[0][0]).order_by(
242 fields[0].fields_search[0][0])
243 else:
244 qs = qs.order_by(fields[0].fields_search[0][0])
245
246 qs = qs.values_list(
247 'id', fields[0].fields_search[0][0])
248 r += list(qs)
249
250 if tipos.count() > 1:
251 r.sort(key=lambda x: x[1].upper())
252 return r
253
254
255 class MateriaLegislativaViewSet(ListModelMixin,
256 RetrieveModelMixin,
257 GenericViewSet):
258
259 permission_classes = (IsAuthenticated,)
260 serializer_class = MateriaLegislativaSerializer
261 queryset = MateriaLegislativa.objects.all()
262 filter_backends = (DjangoFilterBackend,)
263 filter_fields = ('numero', 'ano', 'tipo', )
264
265
266 class SessaoPlenariaViewSet(ListModelMixin,
267 RetrieveModelMixin,
268 GenericViewSet):
269
270 permission_classes = (AllowAny,)
271 serializer_class = SessaoPlenariaSerializer
272 queryset = SessaoPlenaria.objects.all()
273 filter_backends = (DjangoFilterBackend,)
274 filter_fields = ('data_inicio', 'data_fim', 'interativa')
275
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sapl/api/views.py b/sapl/api/views.py
--- a/sapl/api/views.py
+++ b/sapl/api/views.py
@@ -96,19 +96,19 @@
http://localhost:8000
/api/autor?tr=1
- &q_0=parlamentar_set__nome_completo__icontains,
+ &q_0=parlamentar_set__nome_parlamentar__icontains,
parlamentar_set__ativo
&q_1=Carvalho,False
/api/autor?tr=1
- &q_0=parlamentar_set__nome_completo__icontains,
+ &q_0=parlamentar_set__nome_parlamentar__icontains,
parlamentar_set__ativo
&q_1=Carvalho,True
/api/autor?tr=3
- &q_0=parlamentar_set__nome_completo__icontains,
+ &q_0=parlamentar_set__nome_parlamentar__icontains,
parlamentar_set__ativo
&q_1=Carvalho,False
/api/autor?tr=3
- &q_0=parlamentar_set__nome_completo__icontains,
+ &q_0=parlamentar_set__nome_parlamentar__icontains,
parlamentar_set__ativo
&q_1=Carvalho,True
|
{"golden_diff": "diff --git a/sapl/api/views.py b/sapl/api/views.py\n--- a/sapl/api/views.py\n+++ b/sapl/api/views.py\n@@ -96,19 +96,19 @@\n \n http://localhost:8000\n /api/autor?tr=1\n- &q_0=parlamentar_set__nome_completo__icontains,\n+ &q_0=parlamentar_set__nome_parlamentar__icontains,\n parlamentar_set__ativo\n &q_1=Carvalho,False\n /api/autor?tr=1\n- &q_0=parlamentar_set__nome_completo__icontains,\n+ &q_0=parlamentar_set__nome_parlamentar__icontains,\n parlamentar_set__ativo\n &q_1=Carvalho,True\n /api/autor?tr=3\n- &q_0=parlamentar_set__nome_completo__icontains,\n+ &q_0=parlamentar_set__nome_parlamentar__icontains,\n parlamentar_set__ativo\n &q_1=Carvalho,False\n /api/autor?tr=3\n- &q_0=parlamentar_set__nome_completo__icontains,\n+ &q_0=parlamentar_set__nome_parlamentar__icontains,\n parlamentar_set__ativo\n &q_1=Carvalho,True\n", "issue": "Todos os autores dispon\u00edveis para sele\u00e7\u00e3o\nEst\u00e3o sendo mostrados para sele\u00e7\u00e3o em protocolo e mat\u00e9rias, todos os autores, inclusive os inativos no sistema\n", "before_files": [{"content": "from django.contrib.contenttypes.models import ContentType\nfrom django.db.models import Q\nfrom django.http import Http404\nfrom django.utils.translation import ugettext_lazy as _\nfrom rest_framework.filters import DjangoFilterBackend\nfrom rest_framework.generics import ListAPIView\nfrom rest_framework.mixins import ListModelMixin, RetrieveModelMixin\nfrom rest_framework.permissions import (AllowAny, IsAuthenticated,\n IsAuthenticatedOrReadOnly)\nfrom rest_framework.viewsets import GenericViewSet, ModelViewSet\n\nfrom sapl.api.forms import AutorChoiceFilterSet, AutorSearchForFieldFilterSet\nfrom sapl.api.serializers import (AutorChoiceSerializer, AutorSerializer,\n ChoiceSerializer,\n MateriaLegislativaSerializer,\n ModelChoiceSerializer,\n SessaoPlenariaSerializer)\nfrom sapl.base.models import Autor, TipoAutor\nfrom sapl.materia.models import MateriaLegislativa\nfrom sapl.sessao.models import SessaoPlenaria\nfrom sapl.utils import SaplGenericRelation, sapl_logger\n\n\nclass ModelChoiceView(ListAPIView):\n\n # FIXME aplicar permiss\u00e3o correta de usu\u00e1rio\n permission_classes = (IsAuthenticated,)\n serializer_class = ModelChoiceSerializer\n\n def get(self, request, *args, **kwargs):\n self.model = ContentType.objects.get_for_id(\n self.kwargs['content_type']).model_class()\n\n pagination = request.GET.get('pagination', '')\n\n if pagination == 'False':\n self.pagination_class = None\n\n return ListAPIView.get(self, request, *args, **kwargs)\n\n def get_queryset(self):\n return self.model.objects.all()\n\n\nclass AutorListView(ListAPIView):\n \"\"\"\n Listagem de Autores com filtro para autores j\u00e1 cadastrados\n e/ou poss\u00edveis autores.\n\n - tr - tipo do resultado\n Prepera Lista de Autores para 3 cen\u00e1rios distintos\n\n - default = 1\n\n = 1 -> para (value, text) usados geralmente\n em combobox, radiobox, checkbox, etc com pesquisa b\u00e1sica\n de Autores feita pelo django-filter\n -> processo usado nas pesquisas, o mais usado.\n\n = 2 -> para (value, text) usados geralmente\n em combobox, radiobox, checkbox, etc com pesquisa b\u00e1sica\n de Autores mas feito para Poss\u00edveis Autores armazenados\n segundo o ContentType associado ao Tipo de Autor via\n relacionamento gen\u00e9rico.\n Busca feita sem django-filter processada no get_queryset\n -> processo no cadastro de autores para sele\u00e7\u00e3o e busca\n dos poss\u00edveis autores\n\n = 3 -> Devolve instancias da classe Autor filtradas pelo\n django-filter\n\n - tipo - chave prim\u00e1ria do Tipo de Autor a ser filtrado\n\n - q - busca textual no nome do Autor ou em fields_search\n declarados no field SaplGenericRelation das GenericFks\n A busca textual acontece via django-filter com a\n vari\u00e1vel `tr` igual 1 ou 3. Em caso contr\u00e1rio,\n o django-filter \u00e9 desativado e a busca \u00e9 feita\n no model do ContentType associado ao tipo.\n\n - q_0 / q_1 - q_0 faz o c\u00f3digo ignorar \"q\"...\n\n q_0 -> campos lookup a serem filtrados em qualquer Model\n que implemente SaplGenericRelation\n q_1 -> o valor que ser\u00e1 pesquisado no lookup de q_0\n\n q_0 e q_1 podem ser separados por \",\"... isso dar\u00e1 a\n possibilidade de filtrar mais de um campo. \n\n\n http://localhost:8000\n /api/autor?tr=1&q_0=parlamentar_set__ativo&q_1=False\n /api/autor?tr=1&q_0=parlamentar_set__ativo&q_1=True\n /api/autor?tr=3&q_0=parlamentar_set__ativo&q_1=False\n /api/autor?tr=3&q_0=parlamentar_set__ativo&q_1=True\n\n http://localhost:8000\n /api/autor?tr=1\n &q_0=parlamentar_set__nome_completo__icontains,\n parlamentar_set__ativo\n &q_1=Carvalho,False\n /api/autor?tr=1\n &q_0=parlamentar_set__nome_completo__icontains,\n parlamentar_set__ativo\n &q_1=Carvalho,True\n /api/autor?tr=3\n &q_0=parlamentar_set__nome_completo__icontains,\n parlamentar_set__ativo\n &q_1=Carvalho,False\n /api/autor?tr=3\n &q_0=parlamentar_set__nome_completo__icontains,\n parlamentar_set__ativo\n &q_1=Carvalho,True\n\n\n n\u00e3o importa o campo que vc passe de qualquer dos Models\n ligados... \u00e9 poss\u00edvel ver que models s\u00e3o esses, \n na ocasi\u00e3o do commit deste texto, executando:\n In [6]: from sapl.utils import models_with_gr_for_model\n\n In [7]: models_with_gr_for_model(Autor)\n Out[7]:\n [sapl.parlamentares.models.Parlamentar,\n sapl.parlamentares.models.Frente,\n sapl.comissoes.models.Comissao,\n sapl.materia.models.Orgao,\n sapl.sessao.models.Bancada,\n sapl.sessao.models.Bloco]\n\n qualquer atributo destes models podem ser passados \n para busca\n \"\"\"\n\n TR_AUTOR_CHOICE_SERIALIZER = 1\n TR_CHOICE_SERIALIZER = 2\n TR_AUTOR_SERIALIZER = 3\n\n # FIXME aplicar permiss\u00e3o correta de usu\u00e1rio\n permission_classes = (IsAuthenticatedOrReadOnly,)\n queryset = Autor.objects.all()\n model = Autor\n\n filter_class = AutorChoiceFilterSet\n filter_backends = (DjangoFilterBackend, )\n serializer_class = AutorChoiceSerializer\n\n @property\n def tr(self):\n try:\n tr = int(self.request.GET.get\n ('tr', AutorListView.TR_AUTOR_CHOICE_SERIALIZER))\n\n assert tr in (\n AutorListView.TR_AUTOR_CHOICE_SERIALIZER,\n AutorListView.TR_CHOICE_SERIALIZER,\n AutorListView.TR_AUTOR_SERIALIZER), sapl_logger.info(\n _(\"Tipo do Resultado a ser fornecido n\u00e3o existe!\"))\n except:\n return AutorListView.TR_AUTOR_CHOICE_SERIALIZER\n else:\n return tr\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n desativa o django-filter se a busca for por possiveis autores\n parametro tr = TR_CHOICE_SERIALIZER\n \"\"\"\n if self.tr == AutorListView.TR_CHOICE_SERIALIZER:\n self.filter_class = None\n self.filter_backends = []\n self.serializer_class = ChoiceSerializer\n\n elif self.tr == AutorListView.TR_AUTOR_SERIALIZER:\n self.serializer_class = AutorSerializer\n self.permission_classes = (IsAuthenticated,)\n\n if self.filter_class and 'q_0' in request.GET:\n self.filter_class = AutorSearchForFieldFilterSet\n\n return ListAPIView.get(self, request, *args, **kwargs)\n\n def get_queryset(self):\n queryset = ListAPIView.get_queryset(self)\n\n if self.filter_backends:\n return queryset\n\n params = {'content_type__isnull': False}\n\n tipo = ''\n try:\n tipo = int(self.request.GET.get('tipo', ''))\n if tipo:\n params['id'] = tipo\n except:\n pass\n\n tipos = TipoAutor.objects.filter(**params)\n\n if not tipos.exists() and tipo:\n raise Http404()\n\n r = []\n for tipo in tipos:\n q = self.request.GET.get('q', '').strip()\n\n model_class = tipo.content_type.model_class()\n\n fields = list(filter(\n lambda field: isinstance(field, SaplGenericRelation) and\n field.related_model == Autor,\n model_class._meta.get_fields(include_hidden=True)))\n\n \"\"\"\n fields - \u00e9 um array de SaplGenericRelation que deve possuir o\n atributo fields_search. Verifique na documenta\u00e7\u00e3o da classe\n a estrutura de fields_search.\n \"\"\"\n\n assert len(fields) >= 1, (_(\n 'N\u00e3o foi encontrado em %(model)s um atributo do tipo '\n 'SaplGenericRelation que use o model %(model_autor)s') % {\n 'model': model_class._meta.verbose_name,\n 'model_autor': Autor._meta.verbose_name})\n\n qs = model_class.objects.all()\n\n q_filter = Q()\n if q:\n for item in fields:\n if item.related_model != Autor:\n continue\n q_fs = Q()\n for field in item.fields_search:\n q_fs = q_fs | Q(**{'%s%s' % (\n field[0],\n field[1]): q})\n q_filter = q_filter & q_fs\n\n qs = qs.filter(q_filter).distinct(\n fields[0].fields_search[0][0]).order_by(\n fields[0].fields_search[0][0])\n else:\n qs = qs.order_by(fields[0].fields_search[0][0])\n\n qs = qs.values_list(\n 'id', fields[0].fields_search[0][0])\n r += list(qs)\n\n if tipos.count() > 1:\n r.sort(key=lambda x: x[1].upper())\n return r\n\n\nclass MateriaLegislativaViewSet(ListModelMixin,\n RetrieveModelMixin,\n GenericViewSet):\n\n permission_classes = (IsAuthenticated,)\n serializer_class = MateriaLegislativaSerializer\n queryset = MateriaLegislativa.objects.all()\n filter_backends = (DjangoFilterBackend,)\n filter_fields = ('numero', 'ano', 'tipo', )\n\n\nclass SessaoPlenariaViewSet(ListModelMixin,\n RetrieveModelMixin,\n GenericViewSet):\n\n permission_classes = (AllowAny,)\n serializer_class = SessaoPlenariaSerializer\n queryset = SessaoPlenaria.objects.all()\n filter_backends = (DjangoFilterBackend,)\n filter_fields = ('data_inicio', 'data_fim', 'interativa')\n", "path": "sapl/api/views.py"}], "after_files": [{"content": "from django.contrib.contenttypes.models import ContentType\nfrom django.db.models import Q\nfrom django.http import Http404\nfrom django.utils.translation import ugettext_lazy as _\nfrom rest_framework.filters import DjangoFilterBackend\nfrom rest_framework.generics import ListAPIView\nfrom rest_framework.mixins import ListModelMixin, RetrieveModelMixin\nfrom rest_framework.permissions import (AllowAny, IsAuthenticated,\n IsAuthenticatedOrReadOnly)\nfrom rest_framework.viewsets import GenericViewSet, ModelViewSet\n\nfrom sapl.api.forms import AutorChoiceFilterSet, AutorSearchForFieldFilterSet\nfrom sapl.api.serializers import (AutorChoiceSerializer, AutorSerializer,\n ChoiceSerializer,\n MateriaLegislativaSerializer,\n ModelChoiceSerializer,\n SessaoPlenariaSerializer)\nfrom sapl.base.models import Autor, TipoAutor\nfrom sapl.materia.models import MateriaLegislativa\nfrom sapl.sessao.models import SessaoPlenaria\nfrom sapl.utils import SaplGenericRelation, sapl_logger\n\n\nclass ModelChoiceView(ListAPIView):\n\n # FIXME aplicar permiss\u00e3o correta de usu\u00e1rio\n permission_classes = (IsAuthenticated,)\n serializer_class = ModelChoiceSerializer\n\n def get(self, request, *args, **kwargs):\n self.model = ContentType.objects.get_for_id(\n self.kwargs['content_type']).model_class()\n\n pagination = request.GET.get('pagination', '')\n\n if pagination == 'False':\n self.pagination_class = None\n\n return ListAPIView.get(self, request, *args, **kwargs)\n\n def get_queryset(self):\n return self.model.objects.all()\n\n\nclass AutorListView(ListAPIView):\n \"\"\"\n Listagem de Autores com filtro para autores j\u00e1 cadastrados\n e/ou poss\u00edveis autores.\n\n - tr - tipo do resultado\n Prepera Lista de Autores para 3 cen\u00e1rios distintos\n\n - default = 1\n\n = 1 -> para (value, text) usados geralmente\n em combobox, radiobox, checkbox, etc com pesquisa b\u00e1sica\n de Autores feita pelo django-filter\n -> processo usado nas pesquisas, o mais usado.\n\n = 2 -> para (value, text) usados geralmente\n em combobox, radiobox, checkbox, etc com pesquisa b\u00e1sica\n de Autores mas feito para Poss\u00edveis Autores armazenados\n segundo o ContentType associado ao Tipo de Autor via\n relacionamento gen\u00e9rico.\n Busca feita sem django-filter processada no get_queryset\n -> processo no cadastro de autores para sele\u00e7\u00e3o e busca\n dos poss\u00edveis autores\n\n = 3 -> Devolve instancias da classe Autor filtradas pelo\n django-filter\n\n - tipo - chave prim\u00e1ria do Tipo de Autor a ser filtrado\n\n - q - busca textual no nome do Autor ou em fields_search\n declarados no field SaplGenericRelation das GenericFks\n A busca textual acontece via django-filter com a\n vari\u00e1vel `tr` igual 1 ou 3. Em caso contr\u00e1rio,\n o django-filter \u00e9 desativado e a busca \u00e9 feita\n no model do ContentType associado ao tipo.\n\n - q_0 / q_1 - q_0 faz o c\u00f3digo ignorar \"q\"...\n\n q_0 -> campos lookup a serem filtrados em qualquer Model\n que implemente SaplGenericRelation\n q_1 -> o valor que ser\u00e1 pesquisado no lookup de q_0\n\n q_0 e q_1 podem ser separados por \",\"... isso dar\u00e1 a\n possibilidade de filtrar mais de um campo. \n\n\n http://localhost:8000\n /api/autor?tr=1&q_0=parlamentar_set__ativo&q_1=False\n /api/autor?tr=1&q_0=parlamentar_set__ativo&q_1=True\n /api/autor?tr=3&q_0=parlamentar_set__ativo&q_1=False\n /api/autor?tr=3&q_0=parlamentar_set__ativo&q_1=True\n\n http://localhost:8000\n /api/autor?tr=1\n &q_0=parlamentar_set__nome_parlamentar__icontains,\n parlamentar_set__ativo\n &q_1=Carvalho,False\n /api/autor?tr=1\n &q_0=parlamentar_set__nome_parlamentar__icontains,\n parlamentar_set__ativo\n &q_1=Carvalho,True\n /api/autor?tr=3\n &q_0=parlamentar_set__nome_parlamentar__icontains,\n parlamentar_set__ativo\n &q_1=Carvalho,False\n /api/autor?tr=3\n &q_0=parlamentar_set__nome_parlamentar__icontains,\n parlamentar_set__ativo\n &q_1=Carvalho,True\n\n\n n\u00e3o importa o campo que vc passe de qualquer dos Models\n ligados... \u00e9 poss\u00edvel ver que models s\u00e3o esses, \n na ocasi\u00e3o do commit deste texto, executando:\n In [6]: from sapl.utils import models_with_gr_for_model\n\n In [7]: models_with_gr_for_model(Autor)\n Out[7]:\n [sapl.parlamentares.models.Parlamentar,\n sapl.parlamentares.models.Frente,\n sapl.comissoes.models.Comissao,\n sapl.materia.models.Orgao,\n sapl.sessao.models.Bancada,\n sapl.sessao.models.Bloco]\n\n qualquer atributo destes models podem ser passados \n para busca\n \"\"\"\n\n TR_AUTOR_CHOICE_SERIALIZER = 1\n TR_CHOICE_SERIALIZER = 2\n TR_AUTOR_SERIALIZER = 3\n\n # FIXME aplicar permiss\u00e3o correta de usu\u00e1rio\n permission_classes = (IsAuthenticatedOrReadOnly,)\n queryset = Autor.objects.all()\n model = Autor\n\n filter_class = AutorChoiceFilterSet\n filter_backends = (DjangoFilterBackend, )\n serializer_class = AutorChoiceSerializer\n\n @property\n def tr(self):\n try:\n tr = int(self.request.GET.get\n ('tr', AutorListView.TR_AUTOR_CHOICE_SERIALIZER))\n\n assert tr in (\n AutorListView.TR_AUTOR_CHOICE_SERIALIZER,\n AutorListView.TR_CHOICE_SERIALIZER,\n AutorListView.TR_AUTOR_SERIALIZER), sapl_logger.info(\n _(\"Tipo do Resultado a ser fornecido n\u00e3o existe!\"))\n except:\n return AutorListView.TR_AUTOR_CHOICE_SERIALIZER\n else:\n return tr\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n desativa o django-filter se a busca for por possiveis autores\n parametro tr = TR_CHOICE_SERIALIZER\n \"\"\"\n if self.tr == AutorListView.TR_CHOICE_SERIALIZER:\n self.filter_class = None\n self.filter_backends = []\n self.serializer_class = ChoiceSerializer\n\n elif self.tr == AutorListView.TR_AUTOR_SERIALIZER:\n self.serializer_class = AutorSerializer\n self.permission_classes = (IsAuthenticated,)\n\n if self.filter_class and 'q_0' in request.GET:\n self.filter_class = AutorSearchForFieldFilterSet\n\n return ListAPIView.get(self, request, *args, **kwargs)\n\n def get_queryset(self):\n queryset = ListAPIView.get_queryset(self)\n\n if self.filter_backends:\n return queryset\n\n params = {'content_type__isnull': False}\n\n tipo = ''\n try:\n tipo = int(self.request.GET.get('tipo', ''))\n if tipo:\n params['id'] = tipo\n except:\n pass\n\n tipos = TipoAutor.objects.filter(**params)\n\n if not tipos.exists() and tipo:\n raise Http404()\n\n r = []\n for tipo in tipos:\n q = self.request.GET.get('q', '').strip()\n\n model_class = tipo.content_type.model_class()\n\n fields = list(filter(\n lambda field: isinstance(field, SaplGenericRelation) and\n field.related_model == Autor,\n model_class._meta.get_fields(include_hidden=True)))\n\n \"\"\"\n fields - \u00e9 um array de SaplGenericRelation que deve possuir o\n atributo fields_search. Verifique na documenta\u00e7\u00e3o da classe\n a estrutura de fields_search.\n \"\"\"\n\n assert len(fields) >= 1, (_(\n 'N\u00e3o foi encontrado em %(model)s um atributo do tipo '\n 'SaplGenericRelation que use o model %(model_autor)s') % {\n 'model': model_class._meta.verbose_name,\n 'model_autor': Autor._meta.verbose_name})\n\n qs = model_class.objects.all()\n\n q_filter = Q()\n if q:\n for item in fields:\n if item.related_model != Autor:\n continue\n q_fs = Q()\n for field in item.fields_search:\n q_fs = q_fs | Q(**{'%s%s' % (\n field[0],\n field[1]): q})\n q_filter = q_filter & q_fs\n\n qs = qs.filter(q_filter).distinct(\n fields[0].fields_search[0][0]).order_by(\n fields[0].fields_search[0][0])\n else:\n qs = qs.order_by(fields[0].fields_search[0][0])\n\n qs = qs.values_list(\n 'id', fields[0].fields_search[0][0])\n r += list(qs)\n\n if tipos.count() > 1:\n r.sort(key=lambda x: x[1].upper())\n return r\n\n\nclass MateriaLegislativaViewSet(ListModelMixin,\n RetrieveModelMixin,\n GenericViewSet):\n\n permission_classes = (IsAuthenticated,)\n serializer_class = MateriaLegislativaSerializer\n queryset = MateriaLegislativa.objects.all()\n filter_backends = (DjangoFilterBackend,)\n filter_fields = ('numero', 'ano', 'tipo', )\n\n\nclass SessaoPlenariaViewSet(ListModelMixin,\n RetrieveModelMixin,\n GenericViewSet):\n\n permission_classes = (AllowAny,)\n serializer_class = SessaoPlenariaSerializer\n queryset = SessaoPlenaria.objects.all()\n filter_backends = (DjangoFilterBackend,)\n filter_fields = ('data_inicio', 'data_fim', 'interativa')\n", "path": "sapl/api/views.py"}]}
| 3,324 | 337 |
gh_patches_debug_20413
|
rasdani/github-patches
|
git_diff
|
flairNLP__flair-198
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Generate text with language model
Is there any script that can generate text from trained language model?
Also calculating perplexity on testing data would be helpful.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flair/models/language_model.py`
Content:
```
1 import torch.nn as nn
2 import torch
3 import math
4 from torch.autograd import Variable
5 from typing import Dict, List
6 from flair.data import Dictionary
7
8
9 class LanguageModel(nn.Module):
10 """Container module with an encoder, a recurrent module, and a decoder."""
11
12 def __init__(self,
13 dictionary: Dictionary,
14 is_forward_lm: bool,
15 hidden_size: int,
16 nlayers: int,
17 embedding_size: int = 100,
18 nout=None,
19 dropout=0.5):
20
21 super(LanguageModel, self).__init__()
22
23 self.dictionary = dictionary
24 self.is_forward_lm: bool = is_forward_lm
25
26 self.dropout = dropout
27 self.hidden_size = hidden_size
28 self.embedding_size = embedding_size
29 self.nlayers = nlayers
30
31 self.drop = nn.Dropout(dropout)
32 self.encoder = nn.Embedding(len(dictionary), embedding_size)
33
34 if nlayers == 1:
35 self.rnn = nn.LSTM(embedding_size, hidden_size, nlayers)
36 else:
37 self.rnn = nn.LSTM(embedding_size, hidden_size, nlayers, dropout=dropout)
38
39 self.hidden = None
40
41 self.nout = nout
42 if nout is not None:
43 self.proj = nn.Linear(hidden_size, nout)
44 self.initialize(self.proj.weight)
45 self.decoder = nn.Linear(nout, len(dictionary))
46 else:
47 self.proj = None
48 self.decoder = nn.Linear(hidden_size, len(dictionary))
49
50 self.init_weights()
51
52 # auto-spawn on GPU if available
53 if torch.cuda.is_available():
54 self.cuda()
55
56 def init_weights(self):
57 initrange = 0.1
58 self.encoder.weight.data.uniform_(-initrange, initrange)
59 self.decoder.bias.data.fill_(0)
60 self.decoder.weight.data.uniform_(-initrange, initrange)
61
62 def set_hidden(self, hidden):
63 self.hidden = hidden
64
65 def forward(self, input, hidden, ordered_sequence_lengths=None):
66 encoded = self.encoder(input)
67 emb = self.drop(encoded)
68
69 self.rnn.flatten_parameters()
70
71 output, hidden = self.rnn(emb, hidden)
72
73 if self.proj is not None:
74 output = self.proj(output)
75
76 output = self.drop(output)
77
78 decoded = self.decoder(output.view(output.size(0) * output.size(1), output.size(2)))
79
80 return decoded.view(output.size(0), output.size(1), decoded.size(1)), output, hidden
81
82 def init_hidden(self, bsz):
83 weight = next(self.parameters()).data
84 return (Variable(weight.new(self.nlayers, bsz, self.hidden_size).zero_()),
85 Variable(weight.new(self.nlayers, bsz, self.hidden_size).zero_()))
86
87 def get_representation(self, strings: List[str], detach_from_lm=True):
88
89 sequences_as_char_indices: List[List[int]] = []
90 for string in strings:
91 char_indices = [self.dictionary.get_idx_for_item(char) for char in string]
92 sequences_as_char_indices.append(char_indices)
93
94 batch = Variable(torch.LongTensor(sequences_as_char_indices).transpose(0, 1))
95
96 if torch.cuda.is_available():
97 batch = batch.cuda()
98
99 hidden = self.init_hidden(len(strings))
100 prediction, rnn_output, hidden = self.forward(batch, hidden)
101
102 if detach_from_lm: rnn_output = self.repackage_hidden(rnn_output)
103
104 return rnn_output
105
106 def repackage_hidden(self, h):
107 """Wraps hidden states in new Variables, to detach them from their history."""
108 if type(h) == torch.Tensor:
109 return Variable(h.data)
110 else:
111 return tuple(self.repackage_hidden(v) for v in h)
112
113 def initialize(self, matrix):
114 in_, out_ = matrix.size()
115 stdv = math.sqrt(3. / (in_ + out_))
116 matrix.data.uniform_(-stdv, stdv)
117
118 @classmethod
119 def load_language_model(cls, model_file):
120
121 if not torch.cuda.is_available():
122 state = torch.load(model_file, map_location='cpu')
123 else:
124 state = torch.load(model_file)
125
126 model = LanguageModel(state['dictionary'],
127 state['is_forward_lm'],
128 state['hidden_size'],
129 state['nlayers'],
130 state['embedding_size'],
131 state['nout'],
132 state['dropout'])
133 model.load_state_dict(state['state_dict'])
134 model.eval()
135 if torch.cuda.is_available():
136 model.cuda()
137 return model
138
139 def save(self, file):
140 model_state = {
141 'state_dict': self.state_dict(),
142 'dictionary': self.dictionary,
143 'is_forward_lm': self.is_forward_lm,
144 'hidden_size': self.hidden_size,
145 'nlayers': self.nlayers,
146 'embedding_size': self.embedding_size,
147 'nout': self.nout,
148 'dropout': self.dropout
149 }
150 torch.save(model_state, file, pickle_protocol=4)
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/flair/models/language_model.py b/flair/models/language_model.py
--- a/flair/models/language_model.py
+++ b/flair/models/language_model.py
@@ -2,7 +2,7 @@
import torch
import math
from torch.autograd import Variable
-from typing import Dict, List
+from typing import List
from flair.data import Dictionary
@@ -148,3 +148,24 @@
'dropout': self.dropout
}
torch.save(model_state, file, pickle_protocol=4)
+
+ def generate_text(self, number_of_characters=1000) -> str:
+ characters = []
+
+ idx2item = self.dictionary.idx2item
+
+ # initial hidden state
+ hidden = self.init_hidden(1)
+ input = torch.rand(1, 1).mul(len(idx2item)).long()
+ if torch.cuda.is_available():
+ input = input.cuda()
+
+ for i in range(number_of_characters):
+ prediction, rnn_output, hidden = self.forward(input, hidden)
+ word_weights = prediction.squeeze().data.div(1.0).exp().cpu()
+ word_idx = torch.multinomial(word_weights, 1)[0]
+ input.data.fill_(word_idx)
+ word = idx2item[word_idx].decode('UTF-8')
+ characters.append(word)
+
+ return ''.join(characters)
|
{"golden_diff": "diff --git a/flair/models/language_model.py b/flair/models/language_model.py\n--- a/flair/models/language_model.py\n+++ b/flair/models/language_model.py\n@@ -2,7 +2,7 @@\n import torch\n import math\n from torch.autograd import Variable\n-from typing import Dict, List\n+from typing import List\n from flair.data import Dictionary\n \n \n@@ -148,3 +148,24 @@\n 'dropout': self.dropout\n }\n torch.save(model_state, file, pickle_protocol=4)\n+\n+ def generate_text(self, number_of_characters=1000) -> str:\n+ characters = []\n+\n+ idx2item = self.dictionary.idx2item\n+\n+ # initial hidden state\n+ hidden = self.init_hidden(1)\n+ input = torch.rand(1, 1).mul(len(idx2item)).long()\n+ if torch.cuda.is_available():\n+ input = input.cuda()\n+\n+ for i in range(number_of_characters):\n+ prediction, rnn_output, hidden = self.forward(input, hidden)\n+ word_weights = prediction.squeeze().data.div(1.0).exp().cpu()\n+ word_idx = torch.multinomial(word_weights, 1)[0]\n+ input.data.fill_(word_idx)\n+ word = idx2item[word_idx].decode('UTF-8')\n+ characters.append(word)\n+\n+ return ''.join(characters)\n", "issue": "Generate text with language model\nIs there any script that can generate text from trained language model?\r\nAlso calculating perplexity on testing data would be helpful.\n", "before_files": [{"content": "import torch.nn as nn\nimport torch\nimport math\nfrom torch.autograd import Variable\nfrom typing import Dict, List\nfrom flair.data import Dictionary\n\n\nclass LanguageModel(nn.Module):\n \"\"\"Container module with an encoder, a recurrent module, and a decoder.\"\"\"\n\n def __init__(self,\n dictionary: Dictionary,\n is_forward_lm: bool,\n hidden_size: int,\n nlayers: int,\n embedding_size: int = 100,\n nout=None,\n dropout=0.5):\n\n super(LanguageModel, self).__init__()\n\n self.dictionary = dictionary\n self.is_forward_lm: bool = is_forward_lm\n\n self.dropout = dropout\n self.hidden_size = hidden_size\n self.embedding_size = embedding_size\n self.nlayers = nlayers\n\n self.drop = nn.Dropout(dropout)\n self.encoder = nn.Embedding(len(dictionary), embedding_size)\n\n if nlayers == 1:\n self.rnn = nn.LSTM(embedding_size, hidden_size, nlayers)\n else:\n self.rnn = nn.LSTM(embedding_size, hidden_size, nlayers, dropout=dropout)\n\n self.hidden = None\n\n self.nout = nout\n if nout is not None:\n self.proj = nn.Linear(hidden_size, nout)\n self.initialize(self.proj.weight)\n self.decoder = nn.Linear(nout, len(dictionary))\n else:\n self.proj = None\n self.decoder = nn.Linear(hidden_size, len(dictionary))\n\n self.init_weights()\n\n # auto-spawn on GPU if available\n if torch.cuda.is_available():\n self.cuda()\n\n def init_weights(self):\n initrange = 0.1\n self.encoder.weight.data.uniform_(-initrange, initrange)\n self.decoder.bias.data.fill_(0)\n self.decoder.weight.data.uniform_(-initrange, initrange)\n\n def set_hidden(self, hidden):\n self.hidden = hidden\n\n def forward(self, input, hidden, ordered_sequence_lengths=None):\n encoded = self.encoder(input)\n emb = self.drop(encoded)\n\n self.rnn.flatten_parameters()\n\n output, hidden = self.rnn(emb, hidden)\n\n if self.proj is not None:\n output = self.proj(output)\n\n output = self.drop(output)\n\n decoded = self.decoder(output.view(output.size(0) * output.size(1), output.size(2)))\n\n return decoded.view(output.size(0), output.size(1), decoded.size(1)), output, hidden\n\n def init_hidden(self, bsz):\n weight = next(self.parameters()).data\n return (Variable(weight.new(self.nlayers, bsz, self.hidden_size).zero_()),\n Variable(weight.new(self.nlayers, bsz, self.hidden_size).zero_()))\n\n def get_representation(self, strings: List[str], detach_from_lm=True):\n\n sequences_as_char_indices: List[List[int]] = []\n for string in strings:\n char_indices = [self.dictionary.get_idx_for_item(char) for char in string]\n sequences_as_char_indices.append(char_indices)\n\n batch = Variable(torch.LongTensor(sequences_as_char_indices).transpose(0, 1))\n\n if torch.cuda.is_available():\n batch = batch.cuda()\n\n hidden = self.init_hidden(len(strings))\n prediction, rnn_output, hidden = self.forward(batch, hidden)\n\n if detach_from_lm: rnn_output = self.repackage_hidden(rnn_output)\n\n return rnn_output\n\n def repackage_hidden(self, h):\n \"\"\"Wraps hidden states in new Variables, to detach them from their history.\"\"\"\n if type(h) == torch.Tensor:\n return Variable(h.data)\n else:\n return tuple(self.repackage_hidden(v) for v in h)\n\n def initialize(self, matrix):\n in_, out_ = matrix.size()\n stdv = math.sqrt(3. / (in_ + out_))\n matrix.data.uniform_(-stdv, stdv)\n\n @classmethod\n def load_language_model(cls, model_file):\n\n if not torch.cuda.is_available():\n state = torch.load(model_file, map_location='cpu')\n else:\n state = torch.load(model_file)\n\n model = LanguageModel(state['dictionary'],\n state['is_forward_lm'],\n state['hidden_size'],\n state['nlayers'],\n state['embedding_size'],\n state['nout'],\n state['dropout'])\n model.load_state_dict(state['state_dict'])\n model.eval()\n if torch.cuda.is_available():\n model.cuda()\n return model\n\n def save(self, file):\n model_state = {\n 'state_dict': self.state_dict(),\n 'dictionary': self.dictionary,\n 'is_forward_lm': self.is_forward_lm,\n 'hidden_size': self.hidden_size,\n 'nlayers': self.nlayers,\n 'embedding_size': self.embedding_size,\n 'nout': self.nout,\n 'dropout': self.dropout\n }\n torch.save(model_state, file, pickle_protocol=4)\n", "path": "flair/models/language_model.py"}], "after_files": [{"content": "import torch.nn as nn\nimport torch\nimport math\nfrom torch.autograd import Variable\nfrom typing import List\nfrom flair.data import Dictionary\n\n\nclass LanguageModel(nn.Module):\n \"\"\"Container module with an encoder, a recurrent module, and a decoder.\"\"\"\n\n def __init__(self,\n dictionary: Dictionary,\n is_forward_lm: bool,\n hidden_size: int,\n nlayers: int,\n embedding_size: int = 100,\n nout=None,\n dropout=0.5):\n\n super(LanguageModel, self).__init__()\n\n self.dictionary = dictionary\n self.is_forward_lm: bool = is_forward_lm\n\n self.dropout = dropout\n self.hidden_size = hidden_size\n self.embedding_size = embedding_size\n self.nlayers = nlayers\n\n self.drop = nn.Dropout(dropout)\n self.encoder = nn.Embedding(len(dictionary), embedding_size)\n\n if nlayers == 1:\n self.rnn = nn.LSTM(embedding_size, hidden_size, nlayers)\n else:\n self.rnn = nn.LSTM(embedding_size, hidden_size, nlayers, dropout=dropout)\n\n self.hidden = None\n\n self.nout = nout\n if nout is not None:\n self.proj = nn.Linear(hidden_size, nout)\n self.initialize(self.proj.weight)\n self.decoder = nn.Linear(nout, len(dictionary))\n else:\n self.proj = None\n self.decoder = nn.Linear(hidden_size, len(dictionary))\n\n self.init_weights()\n\n # auto-spawn on GPU if available\n if torch.cuda.is_available():\n self.cuda()\n\n def init_weights(self):\n initrange = 0.1\n self.encoder.weight.data.uniform_(-initrange, initrange)\n self.decoder.bias.data.fill_(0)\n self.decoder.weight.data.uniform_(-initrange, initrange)\n\n def set_hidden(self, hidden):\n self.hidden = hidden\n\n def forward(self, input, hidden, ordered_sequence_lengths=None):\n encoded = self.encoder(input)\n emb = self.drop(encoded)\n\n self.rnn.flatten_parameters()\n\n output, hidden = self.rnn(emb, hidden)\n\n if self.proj is not None:\n output = self.proj(output)\n\n output = self.drop(output)\n\n decoded = self.decoder(output.view(output.size(0) * output.size(1), output.size(2)))\n\n return decoded.view(output.size(0), output.size(1), decoded.size(1)), output, hidden\n\n def init_hidden(self, bsz):\n weight = next(self.parameters()).data\n return (Variable(weight.new(self.nlayers, bsz, self.hidden_size).zero_()),\n Variable(weight.new(self.nlayers, bsz, self.hidden_size).zero_()))\n\n def get_representation(self, strings: List[str], detach_from_lm=True):\n\n sequences_as_char_indices: List[List[int]] = []\n for string in strings:\n char_indices = [self.dictionary.get_idx_for_item(char) for char in string]\n sequences_as_char_indices.append(char_indices)\n\n batch = Variable(torch.LongTensor(sequences_as_char_indices).transpose(0, 1))\n\n if torch.cuda.is_available():\n batch = batch.cuda()\n\n hidden = self.init_hidden(len(strings))\n prediction, rnn_output, hidden = self.forward(batch, hidden)\n\n if detach_from_lm: rnn_output = self.repackage_hidden(rnn_output)\n\n return rnn_output\n\n def repackage_hidden(self, h):\n \"\"\"Wraps hidden states in new Variables, to detach them from their history.\"\"\"\n if type(h) == torch.Tensor:\n return Variable(h.data)\n else:\n return tuple(self.repackage_hidden(v) for v in h)\n\n def initialize(self, matrix):\n in_, out_ = matrix.size()\n stdv = math.sqrt(3. / (in_ + out_))\n matrix.data.uniform_(-stdv, stdv)\n\n @classmethod\n def load_language_model(cls, model_file):\n\n if not torch.cuda.is_available():\n state = torch.load(model_file, map_location='cpu')\n else:\n state = torch.load(model_file)\n\n model = LanguageModel(state['dictionary'],\n state['is_forward_lm'],\n state['hidden_size'],\n state['nlayers'],\n state['embedding_size'],\n state['nout'],\n state['dropout'])\n model.load_state_dict(state['state_dict'])\n model.eval()\n if torch.cuda.is_available():\n model.cuda()\n return model\n\n def save(self, file):\n model_state = {\n 'state_dict': self.state_dict(),\n 'dictionary': self.dictionary,\n 'is_forward_lm': self.is_forward_lm,\n 'hidden_size': self.hidden_size,\n 'nlayers': self.nlayers,\n 'embedding_size': self.embedding_size,\n 'nout': self.nout,\n 'dropout': self.dropout\n }\n torch.save(model_state, file, pickle_protocol=4)\n\n def generate_text(self, number_of_characters=1000) -> str:\n characters = []\n\n idx2item = self.dictionary.idx2item\n\n # initial hidden state\n hidden = self.init_hidden(1)\n input = torch.rand(1, 1).mul(len(idx2item)).long()\n if torch.cuda.is_available():\n input = input.cuda()\n\n for i in range(number_of_characters):\n prediction, rnn_output, hidden = self.forward(input, hidden)\n word_weights = prediction.squeeze().data.div(1.0).exp().cpu()\n word_idx = torch.multinomial(word_weights, 1)[0]\n input.data.fill_(word_idx)\n word = idx2item[word_idx].decode('UTF-8')\n characters.append(word)\n\n return ''.join(characters)\n", "path": "flair/models/language_model.py"}]}
| 1,701 | 309 |
gh_patches_debug_35899
|
rasdani/github-patches
|
git_diff
|
bids-standard__pybids-598
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Keeping the docs current
We're currently manually pushing the docs to https://bids-standard.github.io/pybids/, which is easy to forget, and the docs are currently out-of-date (0.9). It would be good to make an explicit process to update them or set up something automatic like ReadTheDocs.
ReadTheDocs also has the advantage of keeping old versions around, which might be useful for reference as the API changes.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `doc/conf.py`
Content:
```
1 # Configuration file for the Sphinx documentation builder.
2 #
3 # This file only contains a selection of the most common options. For a full
4 # list see the documentation:
5 # https://www.sphinx-doc.org/en/master/usage/configuration.html
6
7 # -- Path setup --------------------------------------------------------------
8
9 # If extensions (or modules to document with autodoc) are in another directory,
10 # add these directories to sys.path here. If the directory is relative to the
11 # documentation root, use os.path.abspath to make it absolute, like shown here.
12 #
13 import os
14 import sys
15 import sphinx_rtd_theme
16
17 sys.path.insert(0, os.path.abspath('../'))
18
19 # -- Project information -----------------------------------------------------
20
21 project = 'PyBIDS'
22 copyright = '2020, Developers of PyBIDS'
23 author = 'Developers of PyBIDS'
24
25 currentdir = os.path.abspath(os.path.dirname(__file__))
26 from bids._version import get_versions
27 __version__ = get_versions()['version']
28 source_version = __version__
29 currentdir = os.path.abspath(os.path.dirname(__file__))
30
31 # -- General configuration ---------------------------------------------------
32
33 # Add any Sphinx extension module names here, as strings. They can be
34 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
35 # ones.
36 extensions = [
37 'sphinx.ext.autodoc',
38 'sphinx.ext.autosummary',
39 'sphinx.ext.autosectionlabel',
40 'sphinx.ext.intersphinx',
41 'sphinx.ext.coverage',
42 'sphinx.ext.mathjax',
43 'sphinx.ext.viewcode',
44 'sphinx.ext.githubpages',
45 'sphinx.ext.napoleon',
46 'numpydoc',
47 'm2r'
48 ]
49
50 intersphinx_mapping = {
51 'http://docs.python.org/3.5': None,
52 'http://docs.scipy.org/doc/numpy': None,
53 'http://docs.scipy.org/doc/scipy/reference': None,
54 'http://matplotlib.org/': None,
55 'http://scikit-learn.org/0.17': None,
56 'http://nipy.org/nibabel/': None,
57 'http://pandas.pydata.org/pandas-docs/stable/': None,
58 'http://neurosynth.readthedocs.io/en/latest/': None,
59 }
60
61 # If your documentation needs a minimal Sphinx version, state it here.
62 needs_sphinx = '2.2.0'
63
64 # Generate stubs
65 autosummary_generate = True
66 autodoc_default_flags = ['members', 'inherited-members']
67 add_module_names = False
68
69 # Add any paths that contain templates here, relative to this directory.
70 templates_path = ['_templates']
71
72 # The suffix of source filenames.
73 source_suffix = ['.rst', '.md']
74
75 # The master toctree document.
76 master_doc = 'index'
77
78 # List of patterns, relative to source directory, that match files and
79 # directories to ignore when looking for source files.
80 # This pattern also affects html_static_path and html_extra_path.
81 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
82
83 # The short X.Y version.
84 version = ''
85 # The full version, including alpha/beta/rc tags.
86 import bids
87 release = bids.__version__
88
89 # The name of the Pygments (syntax highlighting) style to use.
90 pygments_style = 'sphinx'
91
92 # -- Options for HTML output -------------------------------------------------
93
94 # The theme to use for HTML and HTML Help pages. See the documentation for
95 # a list of builtin themes.
96 #
97 # The theme to use for HTML and HTML Help pages. See the documentation for
98 # a list of builtin themes.
99 html_theme = "sphinx_rtd_theme"
100 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
101
102 # Add any paths that contain custom static files (such as style sheets) here,
103 # relative to this directory. They are copied after the builtin static files,
104 # so a file named "default.css" will overwrite the builtin "default.css".
105 html_static_path = ['_static']
106
107 # If false, no module index is generated.
108 html_domain_indices = False
109
110 # One entry per manual page. List of tuples
111 # (source start file, name, description, authors, manual section).
112 man_pages = [
113 ('index', 'bids', 'bids Documentation',
114 [author], 1)
115 ]
116
117 # Grouping the document tree into Texinfo files. List of tuples
118 # (source start file, target name, title, author,
119 # dir menu entry, description, category)
120 texinfo_documents = [
121 ('index', 'bids', 'bids Documentation',
122 author, 'bids', 'One line description of project.',
123 'Miscellaneous'),
124 ]
125
126 # If false, no module index is generated.
127 texinfo_domain_indices = False
128
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/doc/conf.py b/doc/conf.py
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -12,20 +12,21 @@
#
import os
import sys
+from datetime import date
+
import sphinx_rtd_theme
+import bids
+
sys.path.insert(0, os.path.abspath('../'))
# -- Project information -----------------------------------------------------
project = 'PyBIDS'
-copyright = '2020, Developers of PyBIDS'
+copyright = '2015-{}, Developers of PyBIDS'.format(date.today().year)
author = 'Developers of PyBIDS'
currentdir = os.path.abspath(os.path.dirname(__file__))
-from bids._version import get_versions
-__version__ = get_versions()['version']
-source_version = __version__
currentdir = os.path.abspath(os.path.dirname(__file__))
# -- General configuration ---------------------------------------------------
@@ -44,20 +45,21 @@
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
'numpydoc',
- 'm2r'
]
intersphinx_mapping = {
- 'http://docs.python.org/3.5': None,
- 'http://docs.scipy.org/doc/numpy': None,
- 'http://docs.scipy.org/doc/scipy/reference': None,
- 'http://matplotlib.org/': None,
- 'http://scikit-learn.org/0.17': None,
- 'http://nipy.org/nibabel/': None,
- 'http://pandas.pydata.org/pandas-docs/stable/': None,
- 'http://neurosynth.readthedocs.io/en/latest/': None,
+ 'https://docs.python.org/3.5': None,
+ 'https://docs.scipy.org/doc/numpy': None,
+ 'https://docs.scipy.org/doc/scipy/reference': None,
+ 'https://matplotlib.org/': None,
+ 'https://scikit-learn.org/0.17': None,
+ 'https://nipy.org/nibabel/': None,
+ 'https://pandas.pydata.org/pandas-docs/stable/': None,
+ 'https://neurosynth.readthedocs.io/en/latest/': None,
}
+intersphinx_timeout = 5
+
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '2.2.0'
@@ -81,10 +83,9 @@
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The short X.Y version.
-version = ''
+version = bids.__version__
# The full version, including alpha/beta/rc tags.
-import bids
-release = bids.__version__
+release = version
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
|
{"golden_diff": "diff --git a/doc/conf.py b/doc/conf.py\n--- a/doc/conf.py\n+++ b/doc/conf.py\n@@ -12,20 +12,21 @@\n #\n import os\n import sys\n+from datetime import date\n+\n import sphinx_rtd_theme\n \n+import bids\n+\n sys.path.insert(0, os.path.abspath('../'))\n \n # -- Project information -----------------------------------------------------\n \n project = 'PyBIDS'\n-copyright = '2020, Developers of PyBIDS'\n+copyright = '2015-{}, Developers of PyBIDS'.format(date.today().year)\n author = 'Developers of PyBIDS'\n \n currentdir = os.path.abspath(os.path.dirname(__file__))\n-from bids._version import get_versions\n-__version__ = get_versions()['version']\n-source_version = __version__\n currentdir = os.path.abspath(os.path.dirname(__file__))\n \n # -- General configuration ---------------------------------------------------\n@@ -44,20 +45,21 @@\n 'sphinx.ext.githubpages',\n 'sphinx.ext.napoleon',\n 'numpydoc',\n- 'm2r'\n ]\n \n intersphinx_mapping = {\n- 'http://docs.python.org/3.5': None,\n- 'http://docs.scipy.org/doc/numpy': None,\n- 'http://docs.scipy.org/doc/scipy/reference': None,\n- 'http://matplotlib.org/': None,\n- 'http://scikit-learn.org/0.17': None,\n- 'http://nipy.org/nibabel/': None,\n- 'http://pandas.pydata.org/pandas-docs/stable/': None,\n- 'http://neurosynth.readthedocs.io/en/latest/': None,\n+ 'https://docs.python.org/3.5': None,\n+ 'https://docs.scipy.org/doc/numpy': None,\n+ 'https://docs.scipy.org/doc/scipy/reference': None,\n+ 'https://matplotlib.org/': None,\n+ 'https://scikit-learn.org/0.17': None,\n+ 'https://nipy.org/nibabel/': None,\n+ 'https://pandas.pydata.org/pandas-docs/stable/': None,\n+ 'https://neurosynth.readthedocs.io/en/latest/': None,\n }\n \n+intersphinx_timeout = 5\n+\n # If your documentation needs a minimal Sphinx version, state it here.\n needs_sphinx = '2.2.0'\n \n@@ -81,10 +83,9 @@\n exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n \n # The short X.Y version.\n-version = ''\n+version = bids.__version__\n # The full version, including alpha/beta/rc tags.\n-import bids\n-release = bids.__version__\n+release = version\n \n # The name of the Pygments (syntax highlighting) style to use.\n pygments_style = 'sphinx'\n", "issue": "Keeping the docs current\nWe're currently manually pushing the docs to https://bids-standard.github.io/pybids/, which is easy to forget, and the docs are currently out-of-date (0.9). It would be good to make an explicit process to update them or set up something automatic like ReadTheDocs.\r\n\r\nReadTheDocs also has the advantage of keeping old versions around, which might be useful for reference as the API changes.\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nimport sphinx_rtd_theme\n\nsys.path.insert(0, os.path.abspath('../'))\n\n# -- Project information -----------------------------------------------------\n\nproject = 'PyBIDS'\ncopyright = '2020, Developers of PyBIDS'\nauthor = 'Developers of PyBIDS'\n\ncurrentdir = os.path.abspath(os.path.dirname(__file__))\nfrom bids._version import get_versions\n__version__ = get_versions()['version']\nsource_version = __version__\ncurrentdir = os.path.abspath(os.path.dirname(__file__))\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.autosectionlabel',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.coverage',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.githubpages',\n 'sphinx.ext.napoleon',\n 'numpydoc',\n 'm2r'\n]\n\nintersphinx_mapping = {\n 'http://docs.python.org/3.5': None,\n 'http://docs.scipy.org/doc/numpy': None,\n 'http://docs.scipy.org/doc/scipy/reference': None,\n 'http://matplotlib.org/': None,\n 'http://scikit-learn.org/0.17': None,\n 'http://nipy.org/nibabel/': None,\n 'http://pandas.pydata.org/pandas-docs/stable/': None,\n 'http://neurosynth.readthedocs.io/en/latest/': None,\n}\n\n# If your documentation needs a minimal Sphinx version, state it here.\nneeds_sphinx = '2.2.0'\n\n# Generate stubs\nautosummary_generate = True\nautodoc_default_flags = ['members', 'inherited-members']\nadd_module_names = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = ['.rst', '.md']\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The short X.Y version.\nversion = ''\n# The full version, including alpha/beta/rc tags.\nimport bids\nrelease = bids.__version__\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If false, no module index is generated.\nhtml_domain_indices = False\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'bids', 'bids Documentation',\n [author], 1)\n]\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'bids', 'bids Documentation',\n author, 'bids', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# If false, no module index is generated.\ntexinfo_domain_indices = False\n", "path": "doc/conf.py"}], "after_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nfrom datetime import date\n\nimport sphinx_rtd_theme\n\nimport bids\n\nsys.path.insert(0, os.path.abspath('../'))\n\n# -- Project information -----------------------------------------------------\n\nproject = 'PyBIDS'\ncopyright = '2015-{}, Developers of PyBIDS'.format(date.today().year)\nauthor = 'Developers of PyBIDS'\n\ncurrentdir = os.path.abspath(os.path.dirname(__file__))\ncurrentdir = os.path.abspath(os.path.dirname(__file__))\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.autosectionlabel',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.coverage',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.githubpages',\n 'sphinx.ext.napoleon',\n 'numpydoc',\n]\n\nintersphinx_mapping = {\n 'https://docs.python.org/3.5': None,\n 'https://docs.scipy.org/doc/numpy': None,\n 'https://docs.scipy.org/doc/scipy/reference': None,\n 'https://matplotlib.org/': None,\n 'https://scikit-learn.org/0.17': None,\n 'https://nipy.org/nibabel/': None,\n 'https://pandas.pydata.org/pandas-docs/stable/': None,\n 'https://neurosynth.readthedocs.io/en/latest/': None,\n}\n\nintersphinx_timeout = 5\n\n# If your documentation needs a minimal Sphinx version, state it here.\nneeds_sphinx = '2.2.0'\n\n# Generate stubs\nautosummary_generate = True\nautodoc_default_flags = ['members', 'inherited-members']\nadd_module_names = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = ['.rst', '.md']\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The short X.Y version.\nversion = bids.__version__\n# The full version, including alpha/beta/rc tags.\nrelease = version\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If false, no module index is generated.\nhtml_domain_indices = False\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'bids', 'bids Documentation',\n [author], 1)\n]\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'bids', 'bids Documentation',\n author, 'bids', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# If false, no module index is generated.\ntexinfo_domain_indices = False\n", "path": "doc/conf.py"}]}
| 1,641 | 634 |
gh_patches_debug_25600
|
rasdani/github-patches
|
git_diff
|
Pylons__pyramid-1566
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Suggestion: pcreate should list the template part of basic usage
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyramid/scripts/pcreate.py`
Content:
```
1 # (c) 2005 Ian Bicking and contributors; written for Paste
2 # (http://pythonpaste.org) Licensed under the MIT license:
3 # http://www.opensource.org/licenses/mit-license.php
4
5 import optparse
6 import os
7 import os.path
8 import pkg_resources
9 import re
10 import sys
11
12 _bad_chars_re = re.compile('[^a-zA-Z0-9_]')
13
14 def main(argv=sys.argv, quiet=False):
15 command = PCreateCommand(argv, quiet)
16 return command.run()
17
18 class PCreateCommand(object):
19 verbosity = 1 # required
20 description = "Render Pyramid scaffolding to an output directory"
21 usage = "usage: %prog [options] output_directory"
22 parser = optparse.OptionParser(usage, description=description)
23 parser.add_option('-s', '--scaffold',
24 dest='scaffold_name',
25 action='append',
26 help=("Add a scaffold to the create process "
27 "(multiple -s args accepted)"))
28 parser.add_option('-t', '--template',
29 dest='scaffold_name',
30 action='append',
31 help=('A backwards compatibility alias for '
32 '-s/--scaffold. Add a scaffold to the '
33 'create process (multiple -t args accepted)'))
34 parser.add_option('-l', '--list',
35 dest='list',
36 action='store_true',
37 help="List all available scaffold names")
38 parser.add_option('--list-templates',
39 dest='list',
40 action='store_true',
41 help=("A backwards compatibility alias for -l/--list. "
42 "List all available scaffold names."))
43 parser.add_option('--simulate',
44 dest='simulate',
45 action='store_true',
46 help='Simulate but do no work')
47 parser.add_option('--overwrite',
48 dest='overwrite',
49 action='store_true',
50 help='Always overwrite')
51 parser.add_option('--interactive',
52 dest='interactive',
53 action='store_true',
54 help='When a file would be overwritten, interrogate')
55
56 pyramid_dist = pkg_resources.get_distribution("pyramid")
57
58 def __init__(self, argv, quiet=False):
59 self.quiet = quiet
60 self.options, self.args = self.parser.parse_args(argv[1:])
61 self.scaffolds = self.all_scaffolds()
62
63 def run(self):
64 if self.options.list:
65 return self.show_scaffolds()
66 if not self.options.scaffold_name:
67 self.out('You must provide at least one scaffold name')
68 return 2
69 if not self.args:
70 self.out('You must provide a project name')
71 return 2
72 available = [x.name for x in self.scaffolds]
73 diff = set(self.options.scaffold_name).difference(available)
74 if diff:
75 self.out('Unavailable scaffolds: %s' % list(diff))
76 return 2
77 return self.render_scaffolds()
78
79 def render_scaffolds(self):
80 options = self.options
81 args = self.args
82 output_dir = os.path.abspath(os.path.normpath(args[0]))
83 project_name = os.path.basename(os.path.split(output_dir)[1])
84 pkg_name = _bad_chars_re.sub(
85 '', project_name.lower().replace('-', '_'))
86 safe_name = pkg_resources.safe_name(project_name)
87 egg_name = pkg_resources.to_filename(safe_name)
88
89 # get pyramid package version
90 pyramid_version = self.pyramid_dist.version
91
92 ## map pyramid package version of the documentation branch ##
93 # if version ends with 'dev' then docs version is 'master'
94 if self.pyramid_dist.version[-3:] == 'dev':
95 pyramid_docs_branch = 'master'
96 else:
97 # if not version is not 'dev' find the version.major_version string
98 # and combine it with '-branch'
99 version_match = re.match(r'(\d+\.\d+)', self.pyramid_dist.version)
100 if version_match is not None:
101 pyramid_docs_branch = "%s-branch" % version_match.group()
102 # if can not parse the version then default to 'latest'
103 else:
104 pyramid_docs_branch = 'latest'
105
106 vars = {
107 'project': project_name,
108 'package': pkg_name,
109 'egg': egg_name,
110 'pyramid_version': pyramid_version,
111 'pyramid_docs_branch': pyramid_docs_branch,
112 }
113 for scaffold_name in options.scaffold_name:
114 for scaffold in self.scaffolds:
115 if scaffold.name == scaffold_name:
116 scaffold.run(self, output_dir, vars)
117 return 0
118
119 def show_scaffolds(self):
120 scaffolds = sorted(self.scaffolds, key=lambda x: x.name)
121 if scaffolds:
122 max_name = max([len(t.name) for t in scaffolds])
123 self.out('Available scaffolds:')
124 for scaffold in scaffolds:
125 self.out(' %s:%s %s' % (
126 scaffold.name,
127 ' '*(max_name-len(scaffold.name)), scaffold.summary))
128 else:
129 self.out('No scaffolds available')
130 return 0
131
132 def all_scaffolds(self):
133 scaffolds = []
134 eps = list(pkg_resources.iter_entry_points('pyramid.scaffold'))
135 for entry in eps:
136 try:
137 scaffold_class = entry.load()
138 scaffold = scaffold_class(entry.name)
139 scaffolds.append(scaffold)
140 except Exception as e: # pragma: no cover
141 self.out('Warning: could not load entry point %s (%s: %s)' % (
142 entry.name, e.__class__.__name__, e))
143 return scaffolds
144
145 def out(self, msg): # pragma: no cover
146 if not self.quiet:
147 print(msg)
148
149 if __name__ == '__main__': # pragma: no cover
150 sys.exit(main() or 0)
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pyramid/scripts/pcreate.py b/pyramid/scripts/pcreate.py
--- a/pyramid/scripts/pcreate.py
+++ b/pyramid/scripts/pcreate.py
@@ -18,7 +18,7 @@
class PCreateCommand(object):
verbosity = 1 # required
description = "Render Pyramid scaffolding to an output directory"
- usage = "usage: %prog [options] output_directory"
+ usage = "usage: %prog [options] -s <scaffold> output_directory"
parser = optparse.OptionParser(usage, description=description)
parser.add_option('-s', '--scaffold',
dest='scaffold_name',
@@ -63,8 +63,16 @@
def run(self):
if self.options.list:
return self.show_scaffolds()
+ if not self.options.scaffold_name and not self.args:
+ if not self.quiet: # pragma: no cover
+ self.parser.print_help()
+ self.out('')
+ self.show_scaffolds()
+ return 2
if not self.options.scaffold_name:
- self.out('You must provide at least one scaffold name')
+ self.out('You must provide at least one scaffold name: -s <scaffold name>')
+ self.out('')
+ self.show_scaffolds()
return 2
if not self.args:
self.out('You must provide a project name')
|
{"golden_diff": "diff --git a/pyramid/scripts/pcreate.py b/pyramid/scripts/pcreate.py\n--- a/pyramid/scripts/pcreate.py\n+++ b/pyramid/scripts/pcreate.py\n@@ -18,7 +18,7 @@\n class PCreateCommand(object):\n verbosity = 1 # required\n description = \"Render Pyramid scaffolding to an output directory\"\n- usage = \"usage: %prog [options] output_directory\"\n+ usage = \"usage: %prog [options] -s <scaffold> output_directory\"\n parser = optparse.OptionParser(usage, description=description)\n parser.add_option('-s', '--scaffold',\n dest='scaffold_name',\n@@ -63,8 +63,16 @@\n def run(self):\n if self.options.list:\n return self.show_scaffolds()\n+ if not self.options.scaffold_name and not self.args:\n+ if not self.quiet: # pragma: no cover\n+ self.parser.print_help()\n+ self.out('')\n+ self.show_scaffolds()\n+ return 2\n if not self.options.scaffold_name:\n- self.out('You must provide at least one scaffold name')\n+ self.out('You must provide at least one scaffold name: -s <scaffold name>')\n+ self.out('')\n+ self.show_scaffolds()\n return 2\n if not self.args:\n self.out('You must provide a project name')\n", "issue": "Suggestion: pcreate should list the template part of basic usage\n\n", "before_files": [{"content": "# (c) 2005 Ian Bicking and contributors; written for Paste\n# (http://pythonpaste.org) Licensed under the MIT license:\n# http://www.opensource.org/licenses/mit-license.php\n\nimport optparse\nimport os\nimport os.path\nimport pkg_resources\nimport re\nimport sys\n\n_bad_chars_re = re.compile('[^a-zA-Z0-9_]')\n\ndef main(argv=sys.argv, quiet=False):\n command = PCreateCommand(argv, quiet)\n return command.run()\n\nclass PCreateCommand(object):\n verbosity = 1 # required\n description = \"Render Pyramid scaffolding to an output directory\"\n usage = \"usage: %prog [options] output_directory\"\n parser = optparse.OptionParser(usage, description=description)\n parser.add_option('-s', '--scaffold',\n dest='scaffold_name',\n action='append',\n help=(\"Add a scaffold to the create process \"\n \"(multiple -s args accepted)\"))\n parser.add_option('-t', '--template',\n dest='scaffold_name',\n action='append',\n help=('A backwards compatibility alias for '\n '-s/--scaffold. Add a scaffold to the '\n 'create process (multiple -t args accepted)'))\n parser.add_option('-l', '--list',\n dest='list',\n action='store_true',\n help=\"List all available scaffold names\")\n parser.add_option('--list-templates',\n dest='list',\n action='store_true',\n help=(\"A backwards compatibility alias for -l/--list. \"\n \"List all available scaffold names.\"))\n parser.add_option('--simulate',\n dest='simulate',\n action='store_true',\n help='Simulate but do no work')\n parser.add_option('--overwrite',\n dest='overwrite',\n action='store_true',\n help='Always overwrite')\n parser.add_option('--interactive',\n dest='interactive',\n action='store_true',\n help='When a file would be overwritten, interrogate')\n\n pyramid_dist = pkg_resources.get_distribution(\"pyramid\")\n\n def __init__(self, argv, quiet=False):\n self.quiet = quiet\n self.options, self.args = self.parser.parse_args(argv[1:])\n self.scaffolds = self.all_scaffolds()\n\n def run(self):\n if self.options.list:\n return self.show_scaffolds()\n if not self.options.scaffold_name:\n self.out('You must provide at least one scaffold name')\n return 2\n if not self.args:\n self.out('You must provide a project name')\n return 2\n available = [x.name for x in self.scaffolds]\n diff = set(self.options.scaffold_name).difference(available)\n if diff:\n self.out('Unavailable scaffolds: %s' % list(diff))\n return 2\n return self.render_scaffolds()\n\n def render_scaffolds(self):\n options = self.options\n args = self.args\n output_dir = os.path.abspath(os.path.normpath(args[0]))\n project_name = os.path.basename(os.path.split(output_dir)[1])\n pkg_name = _bad_chars_re.sub(\n '', project_name.lower().replace('-', '_'))\n safe_name = pkg_resources.safe_name(project_name)\n egg_name = pkg_resources.to_filename(safe_name)\n\n # get pyramid package version\n pyramid_version = self.pyramid_dist.version\n\n ## map pyramid package version of the documentation branch ##\n # if version ends with 'dev' then docs version is 'master'\n if self.pyramid_dist.version[-3:] == 'dev':\n pyramid_docs_branch = 'master'\n else:\n # if not version is not 'dev' find the version.major_version string\n # and combine it with '-branch'\n version_match = re.match(r'(\\d+\\.\\d+)', self.pyramid_dist.version)\n if version_match is not None:\n pyramid_docs_branch = \"%s-branch\" % version_match.group()\n # if can not parse the version then default to 'latest'\n else:\n pyramid_docs_branch = 'latest'\n\n vars = {\n 'project': project_name,\n 'package': pkg_name,\n 'egg': egg_name,\n 'pyramid_version': pyramid_version,\n 'pyramid_docs_branch': pyramid_docs_branch,\n }\n for scaffold_name in options.scaffold_name:\n for scaffold in self.scaffolds:\n if scaffold.name == scaffold_name:\n scaffold.run(self, output_dir, vars)\n return 0\n\n def show_scaffolds(self):\n scaffolds = sorted(self.scaffolds, key=lambda x: x.name)\n if scaffolds:\n max_name = max([len(t.name) for t in scaffolds])\n self.out('Available scaffolds:')\n for scaffold in scaffolds:\n self.out(' %s:%s %s' % (\n scaffold.name,\n ' '*(max_name-len(scaffold.name)), scaffold.summary))\n else:\n self.out('No scaffolds available')\n return 0\n\n def all_scaffolds(self):\n scaffolds = []\n eps = list(pkg_resources.iter_entry_points('pyramid.scaffold'))\n for entry in eps:\n try:\n scaffold_class = entry.load()\n scaffold = scaffold_class(entry.name)\n scaffolds.append(scaffold)\n except Exception as e: # pragma: no cover\n self.out('Warning: could not load entry point %s (%s: %s)' % (\n entry.name, e.__class__.__name__, e))\n return scaffolds\n\n def out(self, msg): # pragma: no cover\n if not self.quiet:\n print(msg)\n\nif __name__ == '__main__': # pragma: no cover\n sys.exit(main() or 0)\n", "path": "pyramid/scripts/pcreate.py"}], "after_files": [{"content": "# (c) 2005 Ian Bicking and contributors; written for Paste\n# (http://pythonpaste.org) Licensed under the MIT license:\n# http://www.opensource.org/licenses/mit-license.php\n\nimport optparse\nimport os\nimport os.path\nimport pkg_resources\nimport re\nimport sys\n\n_bad_chars_re = re.compile('[^a-zA-Z0-9_]')\n\ndef main(argv=sys.argv, quiet=False):\n command = PCreateCommand(argv, quiet)\n return command.run()\n\nclass PCreateCommand(object):\n verbosity = 1 # required\n description = \"Render Pyramid scaffolding to an output directory\"\n usage = \"usage: %prog [options] -s <scaffold> output_directory\"\n parser = optparse.OptionParser(usage, description=description)\n parser.add_option('-s', '--scaffold',\n dest='scaffold_name',\n action='append',\n help=(\"Add a scaffold to the create process \"\n \"(multiple -s args accepted)\"))\n parser.add_option('-t', '--template',\n dest='scaffold_name',\n action='append',\n help=('A backwards compatibility alias for '\n '-s/--scaffold. Add a scaffold to the '\n 'create process (multiple -t args accepted)'))\n parser.add_option('-l', '--list',\n dest='list',\n action='store_true',\n help=\"List all available scaffold names\")\n parser.add_option('--list-templates',\n dest='list',\n action='store_true',\n help=(\"A backwards compatibility alias for -l/--list. \"\n \"List all available scaffold names.\"))\n parser.add_option('--simulate',\n dest='simulate',\n action='store_true',\n help='Simulate but do no work')\n parser.add_option('--overwrite',\n dest='overwrite',\n action='store_true',\n help='Always overwrite')\n parser.add_option('--interactive',\n dest='interactive',\n action='store_true',\n help='When a file would be overwritten, interrogate')\n\n pyramid_dist = pkg_resources.get_distribution(\"pyramid\")\n\n def __init__(self, argv, quiet=False):\n self.quiet = quiet\n self.options, self.args = self.parser.parse_args(argv[1:])\n self.scaffolds = self.all_scaffolds()\n\n def run(self):\n if self.options.list:\n return self.show_scaffolds()\n if not self.options.scaffold_name and not self.args:\n if not self.quiet: # pragma: no cover\n self.parser.print_help()\n self.out('')\n self.show_scaffolds()\n return 2\n if not self.options.scaffold_name:\n self.out('You must provide at least one scaffold name: -s <scaffold name>')\n self.out('')\n self.show_scaffolds()\n return 2\n if not self.args:\n self.out('You must provide a project name')\n return 2\n available = [x.name for x in self.scaffolds]\n diff = set(self.options.scaffold_name).difference(available)\n if diff:\n self.out('Unavailable scaffolds: %s' % list(diff))\n return 2\n return self.render_scaffolds()\n\n def render_scaffolds(self):\n options = self.options\n args = self.args\n output_dir = os.path.abspath(os.path.normpath(args[0]))\n project_name = os.path.basename(os.path.split(output_dir)[1])\n pkg_name = _bad_chars_re.sub(\n '', project_name.lower().replace('-', '_'))\n safe_name = pkg_resources.safe_name(project_name)\n egg_name = pkg_resources.to_filename(safe_name)\n\n # get pyramid package version\n pyramid_version = self.pyramid_dist.version\n\n ## map pyramid package version of the documentation branch ##\n # if version ends with 'dev' then docs version is 'master'\n if self.pyramid_dist.version[-3:] == 'dev':\n pyramid_docs_branch = 'master'\n else:\n # if not version is not 'dev' find the version.major_version string\n # and combine it with '-branch'\n version_match = re.match(r'(\\d+\\.\\d+)', self.pyramid_dist.version)\n if version_match is not None:\n pyramid_docs_branch = \"%s-branch\" % version_match.group()\n # if can not parse the version then default to 'latest'\n else:\n pyramid_docs_branch = 'latest'\n\n vars = {\n 'project': project_name,\n 'package': pkg_name,\n 'egg': egg_name,\n 'pyramid_version': pyramid_version,\n 'pyramid_docs_branch': pyramid_docs_branch,\n }\n for scaffold_name in options.scaffold_name:\n for scaffold in self.scaffolds:\n if scaffold.name == scaffold_name:\n scaffold.run(self, output_dir, vars)\n return 0\n\n def show_scaffolds(self):\n scaffolds = sorted(self.scaffolds, key=lambda x: x.name)\n if scaffolds:\n max_name = max([len(t.name) for t in scaffolds])\n self.out('Available scaffolds:')\n for scaffold in scaffolds:\n self.out(' %s:%s %s' % (\n scaffold.name,\n ' '*(max_name-len(scaffold.name)), scaffold.summary))\n else:\n self.out('No scaffolds available')\n return 0\n\n def all_scaffolds(self):\n scaffolds = []\n eps = list(pkg_resources.iter_entry_points('pyramid.scaffold'))\n for entry in eps:\n try:\n scaffold_class = entry.load()\n scaffold = scaffold_class(entry.name)\n scaffolds.append(scaffold)\n except Exception as e: # pragma: no cover\n self.out('Warning: could not load entry point %s (%s: %s)' % (\n entry.name, e.__class__.__name__, e))\n return scaffolds\n\n def out(self, msg): # pragma: no cover\n if not self.quiet:\n print(msg)\n\nif __name__ == '__main__': # pragma: no cover\n sys.exit(main() or 0)\n", "path": "pyramid/scripts/pcreate.py"}]}
| 1,852 | 308 |
gh_patches_debug_38306
|
rasdani/github-patches
|
git_diff
|
mampfes__hacs_waste_collection_schedule-825
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Changes to Campbelltown Council URLs and APIs have broken the ability to retrieve waste collection info (campbelltown_nsw_gov_au.py)
Hi there,
I'm absolutely useless with Python or I'd be coding a fix and lodging a pull request, but instead am posting this as an issue at the community's mercy :)
The Campbelltown Council (Australia) API URLs have changed, and I believe that they're now presenting address search data a little differently too - XML instead of JSON.
The overall pattern still appears to be the same:
1. Retrieve the GeolocationID for a given address via the 'address_search' API
2. Plug that GeolocationID into the 'collection' API to retrieve the waste collection info in HTML format, wrapped in JSON
**URLs in existing version of Campbelltown NSW AU Waste Collection Source:**
```
API_URLS = {
"address_search": "https://www.campbelltown.nsw.gov.au/ocsvc/public/spatial/findaddress?address={}",
"collection": "https://www.campbelltown.nsw.gov.au/ocsvc/Public/InMyNeighbourhood/WasteServices?GeoLocationId={}",
}
```
**URLs that work now:**
```
API_URLS = {
"address_search": "https://www.campbelltown.nsw.gov.au/api/v1/myarea/search?keywords={}",
"collection": "https://www.campbelltown.nsw.gov.au/ocapi/Public/myarea/wasteservices?geolocationid={}&ocsvclang=en-AU",
}
```
I'm not familiar with the old Campbelltown address search API, but looking at the code it appears that it used to return JSON data. It's now returning pure XML.
**Examples using the new API URLs:**
Address search result: [https://www.campbelltown.nsw.gov.au/api/v1/myarea/search?keywords=10%20Brookfield%20Road%20Minto%20NSW%202566](https://www.campbelltown.nsw.gov.au/api/v1/myarea/search?keywords=10%20Brookfield%20Road%20Minto%20NSW%202566)
Collection API result for the above GeolocationID: [https://www.campbelltown.nsw.gov.au/ocapi/Public/myarea/wasteservices?geolocationid=401ee13b-b04c-4948-82e4-09d274c479a0&ocsvclang=en-AU](https://www.campbelltown.nsw.gov.au/ocapi/Public/myarea/wasteservices?geolocationid=401ee13b-b04c-4948-82e4-09d274c479a0&ocsvclang=en-AU)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `custom_components/waste_collection_schedule/waste_collection_schedule/source/campbelltown_nsw_gov_au.py`
Content:
```
1 import datetime
2 import json
3
4 import requests
5 from bs4 import BeautifulSoup
6 from requests.utils import requote_uri
7 from waste_collection_schedule import Collection
8
9 TITLE = "Campbelltown City Council"
10 DESCRIPTION = "Source for Campbelltown City Council rubbish collection."
11 URL = "https://www.campbelltown.nsw.gov.au/"
12 TEST_CASES = {
13 "Minto Mall": {
14 "post_code": "2566",
15 "suburb": "Minto",
16 "street_name": "Brookfield Road",
17 "street_number": "10",
18 },
19 "Campbelltown Catholic Club": {
20 "post_code": "2560",
21 "suburb": "Campbelltown",
22 "street_name": "Camden Road",
23 "street_number": "20-22",
24 },
25 "Australia Post Ingleburn": {
26 "post_code": "2565",
27 "suburb": "INGLEBURN",
28 "street_name": "Oxford Road",
29 "street_number": "34",
30 },
31 }
32
33 API_URLS = {
34 "address_search": "https://www.campbelltown.nsw.gov.au/ocsvc/public/spatial/findaddress?address={}",
35 "collection": "https://www.campbelltown.nsw.gov.au/ocsvc/Public/InMyNeighbourhood/WasteServices?GeoLocationId={}",
36 }
37
38 HEADERS = {"user-agent": "Mozilla/5.0"}
39
40 ICON_MAP = {
41 "General Waste": "trash-can",
42 "Recycling": "mdi:recycle",
43 "Green Waste": "mdi:leaf",
44 }
45
46
47 class Source:
48 def __init__(
49 self, post_code: str, suburb: str, street_name: str, street_number: str
50 ):
51 self.post_code = post_code
52 self.suburb = suburb
53 self.street_name = street_name
54 self.street_number = street_number
55
56 def fetch(self):
57 locationId = 0
58
59 address = "{} {} {} NSW {}".format(
60 self.street_number, self.street_name, self.suburb, self.post_code
61 )
62
63 q = requote_uri(str(API_URLS["address_search"]).format(address))
64
65 # Retrieve suburbs
66 r = requests.get(q, headers=HEADERS)
67
68 data = json.loads(r.text)
69
70 # Find the ID for our suburb
71 for item in data["locations"]:
72 locationId = item["Id"]
73 break
74
75 if locationId == 0:
76 return []
77
78 # Retrieve the upcoming collections for our property
79 q = requote_uri(str(API_URLS["collection"]).format(locationId))
80
81 r = requests.get(q, headers=HEADERS)
82
83 data = json.loads(r.text)
84
85 responseContent = data["responseContent"]
86
87 soup = BeautifulSoup(responseContent, "html.parser")
88 services = soup.find_all("div", attrs={"class": "service-details"})
89
90 entries = []
91
92 for item in services:
93 # test if <div> contains a valid date. If not, is is not a collection item.
94 date_text = item.find("span")
95 try:
96 date = datetime.datetime.strptime(date_text.text, "%A%d %b %Y").date()
97
98 except ValueError:
99 continue
100
101 waste_type = item.contents[0].strip()
102
103 entries.append(
104 Collection(
105 date=date,
106 t=waste_type,
107 icon=ICON_MAP.get(waste_type, "mdi:trash-can"),
108 )
109 )
110
111 return entries
112
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/campbelltown_nsw_gov_au.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/campbelltown_nsw_gov_au.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/campbelltown_nsw_gov_au.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/campbelltown_nsw_gov_au.py
@@ -31,8 +31,8 @@
}
API_URLS = {
- "address_search": "https://www.campbelltown.nsw.gov.au/ocsvc/public/spatial/findaddress?address={}",
- "collection": "https://www.campbelltown.nsw.gov.au/ocsvc/Public/InMyNeighbourhood/WasteServices?GeoLocationId={}",
+ "address_search": "https://www.campbelltown.nsw.gov.au/api/v1/myarea/search?keywords={}",
+ "collection": "https://www.campbelltown.nsw.gov.au/ocapi/Public/myarea/wasteservices?geolocationid={}&ocsvclang=en-AU",
}
HEADERS = {"user-agent": "Mozilla/5.0"}
@@ -43,7 +43,6 @@
"Green Waste": "mdi:leaf",
}
-
class Source:
def __init__(
self, post_code: str, suburb: str, street_name: str, street_number: str
@@ -68,7 +67,7 @@
data = json.loads(r.text)
# Find the ID for our suburb
- for item in data["locations"]:
+ for item in data["Items"]:
locationId = item["Id"]
break
@@ -85,20 +84,28 @@
responseContent = data["responseContent"]
soup = BeautifulSoup(responseContent, "html.parser")
- services = soup.find_all("div", attrs={"class": "service-details"})
+ services = soup.find_all("div", attrs={"class": "waste-services-result"})
entries = []
for item in services:
# test if <div> contains a valid date. If not, is is not a collection item.
- date_text = item.find("span")
+ date_text = item.find("div", attrs={"class": "next-service"})
+
+ # The date format currently used on https://www.campbelltown.nsw.gov.au/Services-and-Facilities/Waste-and-Recycling/Check-my-collection-day
+ date_format = '%a %d/%m/%Y'
+
try:
- date = datetime.datetime.strptime(date_text.text, "%A%d %b %Y").date()
+ # Strip carriage returns and newlines out of the HTML content
+ cleaned_date_text = date_text.text.replace('\r','').replace('\n','').strip()
+
+ # Parse the date
+ date = datetime.datetime.strptime(cleaned_date_text, date_format).date()
except ValueError:
continue
- waste_type = item.contents[0].strip()
+ waste_type = item.find("h3").text.strip()
entries.append(
Collection(
|
{"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/campbelltown_nsw_gov_au.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/campbelltown_nsw_gov_au.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/campbelltown_nsw_gov_au.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/campbelltown_nsw_gov_au.py\n@@ -31,8 +31,8 @@\n }\n \n API_URLS = {\n- \"address_search\": \"https://www.campbelltown.nsw.gov.au/ocsvc/public/spatial/findaddress?address={}\",\n- \"collection\": \"https://www.campbelltown.nsw.gov.au/ocsvc/Public/InMyNeighbourhood/WasteServices?GeoLocationId={}\",\n+ \"address_search\": \"https://www.campbelltown.nsw.gov.au/api/v1/myarea/search?keywords={}\",\n+ \"collection\": \"https://www.campbelltown.nsw.gov.au/ocapi/Public/myarea/wasteservices?geolocationid={}&ocsvclang=en-AU\",\n }\n \n HEADERS = {\"user-agent\": \"Mozilla/5.0\"}\n@@ -43,7 +43,6 @@\n \"Green Waste\": \"mdi:leaf\",\n }\n \n-\n class Source:\n def __init__(\n self, post_code: str, suburb: str, street_name: str, street_number: str\n@@ -68,7 +67,7 @@\n data = json.loads(r.text)\n \n # Find the ID for our suburb\n- for item in data[\"locations\"]:\n+ for item in data[\"Items\"]:\n locationId = item[\"Id\"]\n break\n \n@@ -85,20 +84,28 @@\n responseContent = data[\"responseContent\"]\n \n soup = BeautifulSoup(responseContent, \"html.parser\")\n- services = soup.find_all(\"div\", attrs={\"class\": \"service-details\"})\n+ services = soup.find_all(\"div\", attrs={\"class\": \"waste-services-result\"})\n \n entries = []\n \n for item in services:\n # test if <div> contains a valid date. If not, is is not a collection item.\n- date_text = item.find(\"span\")\n+ date_text = item.find(\"div\", attrs={\"class\": \"next-service\"})\n+ \n+ # The date format currently used on https://www.campbelltown.nsw.gov.au/Services-and-Facilities/Waste-and-Recycling/Check-my-collection-day\n+ date_format = '%a %d/%m/%Y'\n+\n try:\n- date = datetime.datetime.strptime(date_text.text, \"%A%d %b %Y\").date()\n+ # Strip carriage returns and newlines out of the HTML content\n+ cleaned_date_text = date_text.text.replace('\\r','').replace('\\n','').strip()\n+\n+ # Parse the date\n+ date = datetime.datetime.strptime(cleaned_date_text, date_format).date()\n \n except ValueError:\n continue\n \n- waste_type = item.contents[0].strip()\n+ waste_type = item.find(\"h3\").text.strip()\n \n entries.append(\n Collection(\n", "issue": "Changes to Campbelltown Council URLs and APIs have broken the ability to retrieve waste collection info (campbelltown_nsw_gov_au.py)\nHi there,\r\n\r\nI'm absolutely useless with Python or I'd be coding a fix and lodging a pull request, but instead am posting this as an issue at the community's mercy :)\r\n\r\nThe Campbelltown Council (Australia) API URLs have changed, and I believe that they're now presenting address search data a little differently too - XML instead of JSON.\r\n\r\nThe overall pattern still appears to be the same:\r\n\r\n1. Retrieve the GeolocationID for a given address via the 'address_search' API\r\n2. Plug that GeolocationID into the 'collection' API to retrieve the waste collection info in HTML format, wrapped in JSON\r\n\r\n**URLs in existing version of Campbelltown NSW AU Waste Collection Source:**\r\n```\r\nAPI_URLS = {\r\n \"address_search\": \"https://www.campbelltown.nsw.gov.au/ocsvc/public/spatial/findaddress?address={}\",\r\n \"collection\": \"https://www.campbelltown.nsw.gov.au/ocsvc/Public/InMyNeighbourhood/WasteServices?GeoLocationId={}\",\r\n}\r\n```\r\n\r\n**URLs that work now:**\r\n```\r\nAPI_URLS = {\r\n \"address_search\": \"https://www.campbelltown.nsw.gov.au/api/v1/myarea/search?keywords={}\",\r\n \"collection\": \"https://www.campbelltown.nsw.gov.au/ocapi/Public/myarea/wasteservices?geolocationid={}&ocsvclang=en-AU\",\r\n}\r\n```\r\n\r\nI'm not familiar with the old Campbelltown address search API, but looking at the code it appears that it used to return JSON data. It's now returning pure XML. \r\n\r\n**Examples using the new API URLs:**\r\nAddress search result: [https://www.campbelltown.nsw.gov.au/api/v1/myarea/search?keywords=10%20Brookfield%20Road%20Minto%20NSW%202566](https://www.campbelltown.nsw.gov.au/api/v1/myarea/search?keywords=10%20Brookfield%20Road%20Minto%20NSW%202566)\r\n\r\nCollection API result for the above GeolocationID: [https://www.campbelltown.nsw.gov.au/ocapi/Public/myarea/wasteservices?geolocationid=401ee13b-b04c-4948-82e4-09d274c479a0&ocsvclang=en-AU](https://www.campbelltown.nsw.gov.au/ocapi/Public/myarea/wasteservices?geolocationid=401ee13b-b04c-4948-82e4-09d274c479a0&ocsvclang=en-AU)\n", "before_files": [{"content": "import datetime\nimport json\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom requests.utils import requote_uri\nfrom waste_collection_schedule import Collection\n\nTITLE = \"Campbelltown City Council\"\nDESCRIPTION = \"Source for Campbelltown City Council rubbish collection.\"\nURL = \"https://www.campbelltown.nsw.gov.au/\"\nTEST_CASES = {\n \"Minto Mall\": {\n \"post_code\": \"2566\",\n \"suburb\": \"Minto\",\n \"street_name\": \"Brookfield Road\",\n \"street_number\": \"10\",\n },\n \"Campbelltown Catholic Club\": {\n \"post_code\": \"2560\",\n \"suburb\": \"Campbelltown\",\n \"street_name\": \"Camden Road\",\n \"street_number\": \"20-22\",\n },\n \"Australia Post Ingleburn\": {\n \"post_code\": \"2565\",\n \"suburb\": \"INGLEBURN\",\n \"street_name\": \"Oxford Road\",\n \"street_number\": \"34\",\n },\n}\n\nAPI_URLS = {\n \"address_search\": \"https://www.campbelltown.nsw.gov.au/ocsvc/public/spatial/findaddress?address={}\",\n \"collection\": \"https://www.campbelltown.nsw.gov.au/ocsvc/Public/InMyNeighbourhood/WasteServices?GeoLocationId={}\",\n}\n\nHEADERS = {\"user-agent\": \"Mozilla/5.0\"}\n\nICON_MAP = {\n \"General Waste\": \"trash-can\",\n \"Recycling\": \"mdi:recycle\",\n \"Green Waste\": \"mdi:leaf\",\n}\n\n\nclass Source:\n def __init__(\n self, post_code: str, suburb: str, street_name: str, street_number: str\n ):\n self.post_code = post_code\n self.suburb = suburb\n self.street_name = street_name\n self.street_number = street_number\n\n def fetch(self):\n locationId = 0\n\n address = \"{} {} {} NSW {}\".format(\n self.street_number, self.street_name, self.suburb, self.post_code\n )\n\n q = requote_uri(str(API_URLS[\"address_search\"]).format(address))\n\n # Retrieve suburbs\n r = requests.get(q, headers=HEADERS)\n\n data = json.loads(r.text)\n\n # Find the ID for our suburb\n for item in data[\"locations\"]:\n locationId = item[\"Id\"]\n break\n\n if locationId == 0:\n return []\n\n # Retrieve the upcoming collections for our property\n q = requote_uri(str(API_URLS[\"collection\"]).format(locationId))\n\n r = requests.get(q, headers=HEADERS)\n\n data = json.loads(r.text)\n\n responseContent = data[\"responseContent\"]\n\n soup = BeautifulSoup(responseContent, \"html.parser\")\n services = soup.find_all(\"div\", attrs={\"class\": \"service-details\"})\n\n entries = []\n\n for item in services:\n # test if <div> contains a valid date. If not, is is not a collection item.\n date_text = item.find(\"span\")\n try:\n date = datetime.datetime.strptime(date_text.text, \"%A%d %b %Y\").date()\n\n except ValueError:\n continue\n\n waste_type = item.contents[0].strip()\n\n entries.append(\n Collection(\n date=date,\n t=waste_type,\n icon=ICON_MAP.get(waste_type, \"mdi:trash-can\"),\n )\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/campbelltown_nsw_gov_au.py"}], "after_files": [{"content": "import datetime\nimport json\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom requests.utils import requote_uri\nfrom waste_collection_schedule import Collection\n\nTITLE = \"Campbelltown City Council\"\nDESCRIPTION = \"Source for Campbelltown City Council rubbish collection.\"\nURL = \"https://www.campbelltown.nsw.gov.au/\"\nTEST_CASES = {\n \"Minto Mall\": {\n \"post_code\": \"2566\",\n \"suburb\": \"Minto\",\n \"street_name\": \"Brookfield Road\",\n \"street_number\": \"10\",\n },\n \"Campbelltown Catholic Club\": {\n \"post_code\": \"2560\",\n \"suburb\": \"Campbelltown\",\n \"street_name\": \"Camden Road\",\n \"street_number\": \"20-22\",\n },\n \"Australia Post Ingleburn\": {\n \"post_code\": \"2565\",\n \"suburb\": \"INGLEBURN\",\n \"street_name\": \"Oxford Road\",\n \"street_number\": \"34\",\n },\n}\n\nAPI_URLS = {\n \"address_search\": \"https://www.campbelltown.nsw.gov.au/api/v1/myarea/search?keywords={}\",\n \"collection\": \"https://www.campbelltown.nsw.gov.au/ocapi/Public/myarea/wasteservices?geolocationid={}&ocsvclang=en-AU\",\n}\n\nHEADERS = {\"user-agent\": \"Mozilla/5.0\"}\n\nICON_MAP = {\n \"General Waste\": \"trash-can\",\n \"Recycling\": \"mdi:recycle\",\n \"Green Waste\": \"mdi:leaf\",\n}\n\nclass Source:\n def __init__(\n self, post_code: str, suburb: str, street_name: str, street_number: str\n ):\n self.post_code = post_code\n self.suburb = suburb\n self.street_name = street_name\n self.street_number = street_number\n\n def fetch(self):\n locationId = 0\n\n address = \"{} {} {} NSW {}\".format(\n self.street_number, self.street_name, self.suburb, self.post_code\n )\n\n q = requote_uri(str(API_URLS[\"address_search\"]).format(address))\n\n # Retrieve suburbs\n r = requests.get(q, headers=HEADERS)\n\n data = json.loads(r.text)\n\n # Find the ID for our suburb\n for item in data[\"Items\"]:\n locationId = item[\"Id\"]\n break\n\n if locationId == 0:\n return []\n\n # Retrieve the upcoming collections for our property\n q = requote_uri(str(API_URLS[\"collection\"]).format(locationId))\n\n r = requests.get(q, headers=HEADERS)\n\n data = json.loads(r.text)\n\n responseContent = data[\"responseContent\"]\n\n soup = BeautifulSoup(responseContent, \"html.parser\")\n services = soup.find_all(\"div\", attrs={\"class\": \"waste-services-result\"})\n\n entries = []\n\n for item in services:\n # test if <div> contains a valid date. If not, is is not a collection item.\n date_text = item.find(\"div\", attrs={\"class\": \"next-service\"})\n \n # The date format currently used on https://www.campbelltown.nsw.gov.au/Services-and-Facilities/Waste-and-Recycling/Check-my-collection-day\n date_format = '%a %d/%m/%Y'\n\n try:\n # Strip carriage returns and newlines out of the HTML content\n cleaned_date_text = date_text.text.replace('\\r','').replace('\\n','').strip()\n\n # Parse the date\n date = datetime.datetime.strptime(cleaned_date_text, date_format).date()\n\n except ValueError:\n continue\n\n waste_type = item.find(\"h3\").text.strip()\n\n entries.append(\n Collection(\n date=date,\n t=waste_type,\n icon=ICON_MAP.get(waste_type, \"mdi:trash-can\"),\n )\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/campbelltown_nsw_gov_au.py"}]}
| 1,889 | 699 |
gh_patches_debug_25514
|
rasdani/github-patches
|
git_diff
|
DataDog__dd-trace-py-1049
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
httplib patch doesn't seem to support a default pin
I'm trying to set up `httplib` patching in my application and I'm struggling to get it to send spans with a `service` set. I am not certain, but I think that the documentation and code for the `httplib` patch in `ddtrace.contrib.httplib` is broken, and doesn't support setting up a default service to send spans as.
Here's my reproduction case:
```python
import logging
logging.basicConfig(level=logging.DEBUG)
from ddtrace import patch
patch(httplib=True)
# Code taken from the ddtrace-py docs for httplib on python 3
from ddtrace import Pin
import http.client
import urllib.request
# Use a Pin to specify metadata for all http requests
Pin.override(http.client, service='httplib')
resp = urllib.request.urlopen('https://www.datadoghq.com/')
```
If I run that locally on the latest master, I get messages like these from the `dd-trace-agent` running on my machine:
```
2018-11-16 14:51:47 ERROR (api.go:249) - dropping trace reason: invalid span (SpanID:11098683504101424356): span.normalize: empty `Service` (debug for more info), [name:"http.client.request" resource:"http.client.request" traceID:2003532794568525569 spanID:11098683504101424356 start:1542397906651150080 duration:274516105 meta:<key:"http.method" value:"GET" > meta:<key:"http.status_code" value:"200" > meta:<key:"http.url" value:"https://www.datadoghq.com/" > meta:<key:"system.pid" value:"29250" > type:"http" ]
```
I did some debugging locally, and it looks like the default Pin added to the `HTTPSConnection` instance in the wrapped constructor (https://github.com/DataDog/dd-trace-py/blob/v0.16.0/ddtrace/contrib/httplib/patch.py#L19) is the one that ends up feeding the span's their service value. The pin added in the example code (as suggested by the docs) isn't having any effect. Unless there's something I'm missing, this seems like it makes sense, because the pin the docs suggest adding is on the `http.client` module, not on the classes inside nor on instances of those classes. How was that supposed to work @palazzem?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddtrace/contrib/httplib/__init__.py`
Content:
```
1 """
2 Patch the built-in httplib/http.client libraries to trace all HTTP calls.
3
4
5 Usage::
6
7 # Patch all supported modules/functions
8 from ddtrace import patch
9 patch(httplib=True)
10
11 # Python 2
12 from ddtrace import Pin
13 import httplib
14 import urllib
15
16 # Use a Pin to specify metadata for all http requests
17 Pin.override(httplib, service='httplib')
18 resp = urllib.urlopen('http://www.datadog.com/')
19
20 # Python 3
21 from ddtrace import Pin
22 import http.client
23 import urllib.request
24
25 # Use a Pin to specify metadata for all http requests
26 Pin.override(http.client, service='httplib')
27 resp = urllib.request.urlopen('http://www.datadog.com/')
28
29 :ref:`Headers tracing <http-headers-tracing>` is supported for this integration.
30 """
31 from .patch import patch, unpatch
32 __all__ = ['patch', 'unpatch']
33
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ddtrace/contrib/httplib/__init__.py b/ddtrace/contrib/httplib/__init__.py
--- a/ddtrace/contrib/httplib/__init__.py
+++ b/ddtrace/contrib/httplib/__init__.py
@@ -1,5 +1,5 @@
"""
-Patch the built-in httplib/http.client libraries to trace all HTTP calls.
+Patch the built-in ``httplib``/``http.client`` libraries to trace all HTTP calls.
Usage::
@@ -9,23 +9,24 @@
patch(httplib=True)
# Python 2
- from ddtrace import Pin
import httplib
import urllib
- # Use a Pin to specify metadata for all http requests
- Pin.override(httplib, service='httplib')
resp = urllib.urlopen('http://www.datadog.com/')
# Python 3
- from ddtrace import Pin
import http.client
import urllib.request
- # Use a Pin to specify metadata for all http requests
- Pin.override(http.client, service='httplib')
resp = urllib.request.urlopen('http://www.datadog.com/')
+``httplib`` spans do not include a default service name. Before HTTP calls are
+made, ensure a parent span has been started with a service name to be used for
+spans generated from those calls::
+
+ with tracer.trace('main', service='my-httplib-operation'):
+ resp = urllib.request.urlopen('http://www.datadog.com/')
+
:ref:`Headers tracing <http-headers-tracing>` is supported for this integration.
"""
from .patch import patch, unpatch
|
{"golden_diff": "diff --git a/ddtrace/contrib/httplib/__init__.py b/ddtrace/contrib/httplib/__init__.py\n--- a/ddtrace/contrib/httplib/__init__.py\n+++ b/ddtrace/contrib/httplib/__init__.py\n@@ -1,5 +1,5 @@\n \"\"\"\n-Patch the built-in httplib/http.client libraries to trace all HTTP calls.\n+Patch the built-in ``httplib``/``http.client`` libraries to trace all HTTP calls.\n \n \n Usage::\n@@ -9,23 +9,24 @@\n patch(httplib=True)\n \n # Python 2\n- from ddtrace import Pin\n import httplib\n import urllib\n \n- # Use a Pin to specify metadata for all http requests\n- Pin.override(httplib, service='httplib')\n resp = urllib.urlopen('http://www.datadog.com/')\n \n # Python 3\n- from ddtrace import Pin\n import http.client\n import urllib.request\n \n- # Use a Pin to specify metadata for all http requests\n- Pin.override(http.client, service='httplib')\n resp = urllib.request.urlopen('http://www.datadog.com/')\n \n+``httplib`` spans do not include a default service name. Before HTTP calls are\n+made, ensure a parent span has been started with a service name to be used for\n+spans generated from those calls::\n+\n+ with tracer.trace('main', service='my-httplib-operation'):\n+ resp = urllib.request.urlopen('http://www.datadog.com/')\n+\n :ref:`Headers tracing <http-headers-tracing>` is supported for this integration.\n \"\"\"\n from .patch import patch, unpatch\n", "issue": "httplib patch doesn't seem to support a default pin\nI'm trying to set up `httplib` patching in my application and I'm struggling to get it to send spans with a `service` set. I am not certain, but I think that the documentation and code for the `httplib` patch in `ddtrace.contrib.httplib` is broken, and doesn't support setting up a default service to send spans as.\r\n\r\nHere's my reproduction case:\r\n\r\n```python\r\nimport logging\r\nlogging.basicConfig(level=logging.DEBUG)\r\n\r\nfrom ddtrace import patch\r\npatch(httplib=True)\r\n\r\n# Code taken from the ddtrace-py docs for httplib on python 3\r\nfrom ddtrace import Pin\r\nimport http.client\r\nimport urllib.request\r\n\r\n# Use a Pin to specify metadata for all http requests\r\nPin.override(http.client, service='httplib')\r\nresp = urllib.request.urlopen('https://www.datadoghq.com/')\r\n```\r\n\r\nIf I run that locally on the latest master, I get messages like these from the `dd-trace-agent` running on my machine:\r\n\r\n```\r\n2018-11-16 14:51:47 ERROR (api.go:249) - dropping trace reason: invalid span (SpanID:11098683504101424356): span.normalize: empty `Service` (debug for more info), [name:\"http.client.request\" resource:\"http.client.request\" traceID:2003532794568525569 spanID:11098683504101424356 start:1542397906651150080 duration:274516105 meta:<key:\"http.method\" value:\"GET\" > meta:<key:\"http.status_code\" value:\"200\" > meta:<key:\"http.url\" value:\"https://www.datadoghq.com/\" > meta:<key:\"system.pid\" value:\"29250\" > type:\"http\" ]\r\n```\r\n\r\nI did some debugging locally, and it looks like the default Pin added to the `HTTPSConnection` instance in the wrapped constructor (https://github.com/DataDog/dd-trace-py/blob/v0.16.0/ddtrace/contrib/httplib/patch.py#L19) is the one that ends up feeding the span's their service value. The pin added in the example code (as suggested by the docs) isn't having any effect. Unless there's something I'm missing, this seems like it makes sense, because the pin the docs suggest adding is on the `http.client` module, not on the classes inside nor on instances of those classes. How was that supposed to work @palazzem? \n", "before_files": [{"content": "\"\"\"\nPatch the built-in httplib/http.client libraries to trace all HTTP calls.\n\n\nUsage::\n\n # Patch all supported modules/functions\n from ddtrace import patch\n patch(httplib=True)\n\n # Python 2\n from ddtrace import Pin\n import httplib\n import urllib\n\n # Use a Pin to specify metadata for all http requests\n Pin.override(httplib, service='httplib')\n resp = urllib.urlopen('http://www.datadog.com/')\n\n # Python 3\n from ddtrace import Pin\n import http.client\n import urllib.request\n\n # Use a Pin to specify metadata for all http requests\n Pin.override(http.client, service='httplib')\n resp = urllib.request.urlopen('http://www.datadog.com/')\n\n:ref:`Headers tracing <http-headers-tracing>` is supported for this integration.\n\"\"\"\nfrom .patch import patch, unpatch\n__all__ = ['patch', 'unpatch']\n", "path": "ddtrace/contrib/httplib/__init__.py"}], "after_files": [{"content": "\"\"\"\nPatch the built-in ``httplib``/``http.client`` libraries to trace all HTTP calls.\n\n\nUsage::\n\n # Patch all supported modules/functions\n from ddtrace import patch\n patch(httplib=True)\n\n # Python 2\n import httplib\n import urllib\n\n resp = urllib.urlopen('http://www.datadog.com/')\n\n # Python 3\n import http.client\n import urllib.request\n\n resp = urllib.request.urlopen('http://www.datadog.com/')\n\n``httplib`` spans do not include a default service name. Before HTTP calls are\nmade, ensure a parent span has been started with a service name to be used for\nspans generated from those calls::\n\n with tracer.trace('main', service='my-httplib-operation'):\n resp = urllib.request.urlopen('http://www.datadog.com/')\n\n:ref:`Headers tracing <http-headers-tracing>` is supported for this integration.\n\"\"\"\nfrom .patch import patch, unpatch\n__all__ = ['patch', 'unpatch']\n", "path": "ddtrace/contrib/httplib/__init__.py"}]}
| 1,131 | 364 |
gh_patches_debug_38355
|
rasdani/github-patches
|
git_diff
|
biolab__orange3-4157
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Neighbors: remove Info box and display data info in status bar
Follow an example where the change of this widget was already reported in [the blog](https://orange.biolab.si/blog/2019/3/8/the-changing-status-bar/). Change the documentation accordingly.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Orange/widgets/data/owneighbors.py`
Content:
```
1 import numpy as np
2
3 from AnyQt.QtCore import Qt
4
5 from Orange.data import Table, Domain, ContinuousVariable
6 from Orange.data.util import get_unique_names
7 from Orange.preprocess import RemoveNaNColumns, Impute
8 from Orange import distance
9 from Orange.widgets import gui
10 from Orange.widgets.settings import Setting
11 from Orange.widgets.utils.signals import Input, Output
12 from Orange.widgets.widget import OWWidget, Msg
13 from Orange.widgets.utils.widgetpreview import WidgetPreview
14
15 METRICS = [
16 ("Euclidean", distance.Euclidean),
17 ("Manhattan", distance.Manhattan),
18 ("Mahalanobis", distance.Mahalanobis),
19 ("Cosine", distance.Cosine),
20 ("Jaccard", distance.Jaccard),
21 ("Spearman", distance.SpearmanR),
22 ("Absolute Spearman", distance.SpearmanRAbsolute),
23 ("Pearson", distance.PearsonR),
24 ("Absolute Pearson", distance.PearsonRAbsolute),
25 ]
26
27
28 class OWNeighbors(OWWidget):
29 name = "Neighbors"
30 description = "Compute nearest neighbors in data according to reference."
31 icon = "icons/Neighbors.svg"
32
33 replaces = ["orangecontrib.prototypes.widgets.owneighbours.OWNeighbours"]
34
35 class Inputs:
36 data = Input("Data", Table)
37 reference = Input("Reference", Table)
38
39 class Outputs:
40 data = Output("Neighbors", Table)
41
42 class Warning(OWWidget.Warning):
43 all_data_as_reference = \
44 Msg("Every data instance is same as some reference")
45
46 class Error(OWWidget.Error):
47 diff_domains = Msg("Data and reference have different features")
48
49 n_neighbors: int
50 distance_index: int
51
52 n_neighbors = Setting(10)
53 distance_index = Setting(0)
54 exclude_reference = Setting(True)
55 auto_apply = Setting(True)
56
57 want_main_area = False
58 buttons_area_orientation = Qt.Vertical
59
60 def __init__(self):
61 super().__init__()
62
63 self.data = None
64 self.reference = None
65 self.distances = None
66
67 box = gui.vBox(self.controlArea, "Info")
68 self.data_info_label = gui.widgetLabel(box, "")
69 self.reference_info_label = gui.widgetLabel(box, "")
70 self._set_label_text("data")
71 self._set_label_text("reference")
72
73 box = gui.vBox(self.controlArea, box=True)
74 gui.comboBox(
75 box, self, "distance_index", orientation=Qt.Horizontal,
76 label="Distance: ", items=[d[0] for d in METRICS],
77 callback=self.recompute)
78 gui.spin(
79 box, self, "n_neighbors", label="Number of neighbors:",
80 step=1, spinType=int, minv=0, maxv=100,
81 # call apply by gui.auto_commit, pylint: disable=unnecessary-lambda
82 callback=lambda: self.apply())
83 gui.checkBox(
84 box, self, "exclude_reference",
85 label="Exclude rows (equal to) references",
86 # call apply by gui.auto_commit, pylint: disable=unnecessary-lambda
87 callback=lambda: self.apply())
88
89 self.apply_button = gui.auto_apply(self.controlArea, self, commit=self.apply)
90
91 def _set_label_text(self, name):
92 data = getattr(self, name)
93 label = getattr(self, f"{name}_info_label")
94 if data is None:
95 label.setText(f"No {name} instances")
96 else:
97 pl = "s" if data else ""
98 label.setText(f"{len(data)} {name} instance{pl} on input.")
99
100 @Inputs.data
101 def set_data(self, data):
102 self.data = data
103 self._set_label_text("data")
104
105 @Inputs.reference
106 def set_ref(self, refs):
107 self.reference = refs
108 self._set_label_text("reference")
109
110 def handleNewSignals(self):
111 self.compute_distances()
112 self.unconditional_apply()
113
114 def recompute(self):
115 self.compute_distances()
116 self.apply()
117
118 def compute_distances(self):
119 self.Error.diff_domains.clear()
120 if not self.data or not self.reference:
121 self.distances = None
122 return
123 if set(self.reference.domain.attributes) != \
124 set(self.data.domain.attributes):
125 self.Error.diff_domains()
126 self.distances = None
127 return
128
129 metric = METRICS[self.distance_index][1]
130 n_ref = len(self.reference)
131
132 # comparing only attributes, no metas and class-vars
133 new_domain = Domain(self.data.domain.attributes)
134 reference = self.reference.transform(new_domain)
135 data = self.data.transform(new_domain)
136
137 all_data = Table.concatenate([reference, data], 0)
138 pp_all_data = Impute()(RemoveNaNColumns()(all_data))
139 pp_reference, pp_data = pp_all_data[:n_ref], pp_all_data[n_ref:]
140 self.distances = metric(pp_data, pp_reference).min(axis=1)
141
142 def apply(self):
143 indices = self._compute_indices()
144 if indices is None:
145 neighbors = None
146 else:
147 neighbors = self._data_with_similarity(indices)
148 self.Outputs.data.send(neighbors)
149
150 def _compute_indices(self):
151 self.Warning.all_data_as_reference.clear()
152 dist = self.distances
153 if dist is None:
154 return None
155 if self.exclude_reference:
156 non_ref = dist > 1e-5
157 skip = len(dist) - non_ref.sum()
158 up_to = min(self.n_neighbors + skip, len(dist))
159 if skip >= up_to:
160 self.Warning.all_data_as_reference()
161 return None
162 indices = np.argpartition(dist, up_to - 1)[:up_to]
163 return indices[non_ref[indices]]
164 else:
165 up_to = min(self.n_neighbors, len(dist))
166 return np.argpartition(dist, up_to - 1)[:up_to]
167
168 def _data_with_similarity(self, indices):
169 data = self.data
170 varname = get_unique_names(data.domain, "distance")
171 metas = data.domain.metas + (ContinuousVariable(varname), )
172 domain = Domain(data.domain.attributes, data.domain.class_vars, metas)
173 data_metas = self.distances[indices].reshape((-1, 1))
174 if data.domain.metas:
175 data_metas = np.hstack((data.metas[indices], data_metas))
176 neighbors = Table(domain, data.X[indices], data.Y[indices], data_metas)
177 neighbors.attributes = self.data.attributes
178 return neighbors
179
180
181 if __name__ == "__main__": # pragma: no cover
182 iris = Table("iris.tab")
183 WidgetPreview(OWNeighbors).run(
184 set_data=iris,
185 set_ref=iris[:1])
186
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/Orange/widgets/data/owneighbors.py b/Orange/widgets/data/owneighbors.py
--- a/Orange/widgets/data/owneighbors.py
+++ b/Orange/widgets/data/owneighbors.py
@@ -64,12 +64,6 @@
self.reference = None
self.distances = None
- box = gui.vBox(self.controlArea, "Info")
- self.data_info_label = gui.widgetLabel(box, "")
- self.reference_info_label = gui.widgetLabel(box, "")
- self._set_label_text("data")
- self._set_label_text("reference")
-
box = gui.vBox(self.controlArea, box=True)
gui.comboBox(
box, self, "distance_index", orientation=Qt.Horizontal,
@@ -87,29 +81,33 @@
callback=lambda: self.apply())
self.apply_button = gui.auto_apply(self.controlArea, self, commit=self.apply)
-
- def _set_label_text(self, name):
- data = getattr(self, name)
- label = getattr(self, f"{name}_info_label")
- if data is None:
- label.setText(f"No {name} instances")
+ self.info.set_input_summary(self.info.NoInput)
+ self.info.set_output_summary(self.info.NoOutput)
+
+ def _set_input_summary(self):
+ n_data = len(self.data) if self.data else 0
+ n_refs = len(self.reference) if self.reference else 0
+
+ if n_data or n_refs:
+ details = \
+ f"{n_data if n_data else 'No'} data instance(s) on input\n" \
+ f"{n_refs if n_refs else 'No'} reference instance(s) on input "
+ self.info.set_input_summary(f"{n_data} | {n_refs} ", details)
else:
- pl = "s" if data else ""
- label.setText(f"{len(data)} {name} instance{pl} on input.")
+ self.info.set_input_summary(self.info.NoInput)
@Inputs.data
def set_data(self, data):
self.data = data
- self._set_label_text("data")
@Inputs.reference
def set_ref(self, refs):
self.reference = refs
- self._set_label_text("reference")
def handleNewSignals(self):
self.compute_distances()
self.unconditional_apply()
+ self._set_input_summary()
def recompute(self):
self.compute_distances()
@@ -141,10 +139,13 @@
def apply(self):
indices = self._compute_indices()
+
if indices is None:
neighbors = None
+ self.info.set_output_summary(self.info.NoOutput)
else:
neighbors = self._data_with_similarity(indices)
+ self.info.set_output_summary(str(len(neighbors)))
self.Outputs.data.send(neighbors)
def _compute_indices(self):
|
{"golden_diff": "diff --git a/Orange/widgets/data/owneighbors.py b/Orange/widgets/data/owneighbors.py\n--- a/Orange/widgets/data/owneighbors.py\n+++ b/Orange/widgets/data/owneighbors.py\n@@ -64,12 +64,6 @@\n self.reference = None\n self.distances = None\n \n- box = gui.vBox(self.controlArea, \"Info\")\n- self.data_info_label = gui.widgetLabel(box, \"\")\n- self.reference_info_label = gui.widgetLabel(box, \"\")\n- self._set_label_text(\"data\")\n- self._set_label_text(\"reference\")\n-\n box = gui.vBox(self.controlArea, box=True)\n gui.comboBox(\n box, self, \"distance_index\", orientation=Qt.Horizontal,\n@@ -87,29 +81,33 @@\n callback=lambda: self.apply())\n \n self.apply_button = gui.auto_apply(self.controlArea, self, commit=self.apply)\n-\n- def _set_label_text(self, name):\n- data = getattr(self, name)\n- label = getattr(self, f\"{name}_info_label\")\n- if data is None:\n- label.setText(f\"No {name} instances\")\n+ self.info.set_input_summary(self.info.NoInput)\n+ self.info.set_output_summary(self.info.NoOutput)\n+\n+ def _set_input_summary(self):\n+ n_data = len(self.data) if self.data else 0\n+ n_refs = len(self.reference) if self.reference else 0\n+\n+ if n_data or n_refs:\n+ details = \\\n+ f\"{n_data if n_data else 'No'} data instance(s) on input\\n\" \\\n+ f\"{n_refs if n_refs else 'No'} reference instance(s) on input \"\n+ self.info.set_input_summary(f\"{n_data} | {n_refs} \", details)\n else:\n- pl = \"s\" if data else \"\"\n- label.setText(f\"{len(data)} {name} instance{pl} on input.\")\n+ self.info.set_input_summary(self.info.NoInput)\n \n @Inputs.data\n def set_data(self, data):\n self.data = data\n- self._set_label_text(\"data\")\n \n @Inputs.reference\n def set_ref(self, refs):\n self.reference = refs\n- self._set_label_text(\"reference\")\n \n def handleNewSignals(self):\n self.compute_distances()\n self.unconditional_apply()\n+ self._set_input_summary()\n \n def recompute(self):\n self.compute_distances()\n@@ -141,10 +139,13 @@\n \n def apply(self):\n indices = self._compute_indices()\n+\n if indices is None:\n neighbors = None\n+ self.info.set_output_summary(self.info.NoOutput)\n else:\n neighbors = self._data_with_similarity(indices)\n+ self.info.set_output_summary(str(len(neighbors)))\n self.Outputs.data.send(neighbors)\n \n def _compute_indices(self):\n", "issue": "Neighbors: remove Info box and display data info in status bar\nFollow an example where the change of this widget was already reported in [the blog](https://orange.biolab.si/blog/2019/3/8/the-changing-status-bar/). Change the documentation accordingly.\r\n\r\n\r\n\n", "before_files": [{"content": "import numpy as np\n\nfrom AnyQt.QtCore import Qt\n\nfrom Orange.data import Table, Domain, ContinuousVariable\nfrom Orange.data.util import get_unique_names\nfrom Orange.preprocess import RemoveNaNColumns, Impute\nfrom Orange import distance\nfrom Orange.widgets import gui\nfrom Orange.widgets.settings import Setting\nfrom Orange.widgets.utils.signals import Input, Output\nfrom Orange.widgets.widget import OWWidget, Msg\nfrom Orange.widgets.utils.widgetpreview import WidgetPreview\n\nMETRICS = [\n (\"Euclidean\", distance.Euclidean),\n (\"Manhattan\", distance.Manhattan),\n (\"Mahalanobis\", distance.Mahalanobis),\n (\"Cosine\", distance.Cosine),\n (\"Jaccard\", distance.Jaccard),\n (\"Spearman\", distance.SpearmanR),\n (\"Absolute Spearman\", distance.SpearmanRAbsolute),\n (\"Pearson\", distance.PearsonR),\n (\"Absolute Pearson\", distance.PearsonRAbsolute),\n]\n\n\nclass OWNeighbors(OWWidget):\n name = \"Neighbors\"\n description = \"Compute nearest neighbors in data according to reference.\"\n icon = \"icons/Neighbors.svg\"\n\n replaces = [\"orangecontrib.prototypes.widgets.owneighbours.OWNeighbours\"]\n\n class Inputs:\n data = Input(\"Data\", Table)\n reference = Input(\"Reference\", Table)\n\n class Outputs:\n data = Output(\"Neighbors\", Table)\n\n class Warning(OWWidget.Warning):\n all_data_as_reference = \\\n Msg(\"Every data instance is same as some reference\")\n\n class Error(OWWidget.Error):\n diff_domains = Msg(\"Data and reference have different features\")\n\n n_neighbors: int\n distance_index: int\n\n n_neighbors = Setting(10)\n distance_index = Setting(0)\n exclude_reference = Setting(True)\n auto_apply = Setting(True)\n\n want_main_area = False\n buttons_area_orientation = Qt.Vertical\n\n def __init__(self):\n super().__init__()\n\n self.data = None\n self.reference = None\n self.distances = None\n\n box = gui.vBox(self.controlArea, \"Info\")\n self.data_info_label = gui.widgetLabel(box, \"\")\n self.reference_info_label = gui.widgetLabel(box, \"\")\n self._set_label_text(\"data\")\n self._set_label_text(\"reference\")\n\n box = gui.vBox(self.controlArea, box=True)\n gui.comboBox(\n box, self, \"distance_index\", orientation=Qt.Horizontal,\n label=\"Distance: \", items=[d[0] for d in METRICS],\n callback=self.recompute)\n gui.spin(\n box, self, \"n_neighbors\", label=\"Number of neighbors:\",\n step=1, spinType=int, minv=0, maxv=100,\n # call apply by gui.auto_commit, pylint: disable=unnecessary-lambda\n callback=lambda: self.apply())\n gui.checkBox(\n box, self, \"exclude_reference\",\n label=\"Exclude rows (equal to) references\",\n # call apply by gui.auto_commit, pylint: disable=unnecessary-lambda\n callback=lambda: self.apply())\n\n self.apply_button = gui.auto_apply(self.controlArea, self, commit=self.apply)\n\n def _set_label_text(self, name):\n data = getattr(self, name)\n label = getattr(self, f\"{name}_info_label\")\n if data is None:\n label.setText(f\"No {name} instances\")\n else:\n pl = \"s\" if data else \"\"\n label.setText(f\"{len(data)} {name} instance{pl} on input.\")\n\n @Inputs.data\n def set_data(self, data):\n self.data = data\n self._set_label_text(\"data\")\n\n @Inputs.reference\n def set_ref(self, refs):\n self.reference = refs\n self._set_label_text(\"reference\")\n\n def handleNewSignals(self):\n self.compute_distances()\n self.unconditional_apply()\n\n def recompute(self):\n self.compute_distances()\n self.apply()\n\n def compute_distances(self):\n self.Error.diff_domains.clear()\n if not self.data or not self.reference:\n self.distances = None\n return\n if set(self.reference.domain.attributes) != \\\n set(self.data.domain.attributes):\n self.Error.diff_domains()\n self.distances = None\n return\n\n metric = METRICS[self.distance_index][1]\n n_ref = len(self.reference)\n\n # comparing only attributes, no metas and class-vars\n new_domain = Domain(self.data.domain.attributes)\n reference = self.reference.transform(new_domain)\n data = self.data.transform(new_domain)\n\n all_data = Table.concatenate([reference, data], 0)\n pp_all_data = Impute()(RemoveNaNColumns()(all_data))\n pp_reference, pp_data = pp_all_data[:n_ref], pp_all_data[n_ref:]\n self.distances = metric(pp_data, pp_reference).min(axis=1)\n\n def apply(self):\n indices = self._compute_indices()\n if indices is None:\n neighbors = None\n else:\n neighbors = self._data_with_similarity(indices)\n self.Outputs.data.send(neighbors)\n\n def _compute_indices(self):\n self.Warning.all_data_as_reference.clear()\n dist = self.distances\n if dist is None:\n return None\n if self.exclude_reference:\n non_ref = dist > 1e-5\n skip = len(dist) - non_ref.sum()\n up_to = min(self.n_neighbors + skip, len(dist))\n if skip >= up_to:\n self.Warning.all_data_as_reference()\n return None\n indices = np.argpartition(dist, up_to - 1)[:up_to]\n return indices[non_ref[indices]]\n else:\n up_to = min(self.n_neighbors, len(dist))\n return np.argpartition(dist, up_to - 1)[:up_to]\n\n def _data_with_similarity(self, indices):\n data = self.data\n varname = get_unique_names(data.domain, \"distance\")\n metas = data.domain.metas + (ContinuousVariable(varname), )\n domain = Domain(data.domain.attributes, data.domain.class_vars, metas)\n data_metas = self.distances[indices].reshape((-1, 1))\n if data.domain.metas:\n data_metas = np.hstack((data.metas[indices], data_metas))\n neighbors = Table(domain, data.X[indices], data.Y[indices], data_metas)\n neighbors.attributes = self.data.attributes\n return neighbors\n\n\nif __name__ == \"__main__\": # pragma: no cover\n iris = Table(\"iris.tab\")\n WidgetPreview(OWNeighbors).run(\n set_data=iris,\n set_ref=iris[:1])\n", "path": "Orange/widgets/data/owneighbors.py"}], "after_files": [{"content": "import numpy as np\n\nfrom AnyQt.QtCore import Qt\n\nfrom Orange.data import Table, Domain, ContinuousVariable\nfrom Orange.data.util import get_unique_names\nfrom Orange.preprocess import RemoveNaNColumns, Impute\nfrom Orange import distance\nfrom Orange.widgets import gui\nfrom Orange.widgets.settings import Setting\nfrom Orange.widgets.utils.signals import Input, Output\nfrom Orange.widgets.widget import OWWidget, Msg\nfrom Orange.widgets.utils.widgetpreview import WidgetPreview\n\nMETRICS = [\n (\"Euclidean\", distance.Euclidean),\n (\"Manhattan\", distance.Manhattan),\n (\"Mahalanobis\", distance.Mahalanobis),\n (\"Cosine\", distance.Cosine),\n (\"Jaccard\", distance.Jaccard),\n (\"Spearman\", distance.SpearmanR),\n (\"Absolute Spearman\", distance.SpearmanRAbsolute),\n (\"Pearson\", distance.PearsonR),\n (\"Absolute Pearson\", distance.PearsonRAbsolute),\n]\n\n\nclass OWNeighbors(OWWidget):\n name = \"Neighbors\"\n description = \"Compute nearest neighbors in data according to reference.\"\n icon = \"icons/Neighbors.svg\"\n\n replaces = [\"orangecontrib.prototypes.widgets.owneighbours.OWNeighbours\"]\n\n class Inputs:\n data = Input(\"Data\", Table)\n reference = Input(\"Reference\", Table)\n\n class Outputs:\n data = Output(\"Neighbors\", Table)\n\n class Warning(OWWidget.Warning):\n all_data_as_reference = \\\n Msg(\"Every data instance is same as some reference\")\n\n class Error(OWWidget.Error):\n diff_domains = Msg(\"Data and reference have different features\")\n\n n_neighbors: int\n distance_index: int\n\n n_neighbors = Setting(10)\n distance_index = Setting(0)\n exclude_reference = Setting(True)\n auto_apply = Setting(True)\n\n want_main_area = False\n buttons_area_orientation = Qt.Vertical\n\n def __init__(self):\n super().__init__()\n\n self.data = None\n self.reference = None\n self.distances = None\n\n box = gui.vBox(self.controlArea, box=True)\n gui.comboBox(\n box, self, \"distance_index\", orientation=Qt.Horizontal,\n label=\"Distance: \", items=[d[0] for d in METRICS],\n callback=self.recompute)\n gui.spin(\n box, self, \"n_neighbors\", label=\"Number of neighbors:\",\n step=1, spinType=int, minv=0, maxv=100,\n # call apply by gui.auto_commit, pylint: disable=unnecessary-lambda\n callback=lambda: self.apply())\n gui.checkBox(\n box, self, \"exclude_reference\",\n label=\"Exclude rows (equal to) references\",\n # call apply by gui.auto_commit, pylint: disable=unnecessary-lambda\n callback=lambda: self.apply())\n\n self.apply_button = gui.auto_apply(self.controlArea, self, commit=self.apply)\n self.info.set_input_summary(self.info.NoInput)\n self.info.set_output_summary(self.info.NoOutput)\n\n def _set_input_summary(self):\n n_data = len(self.data) if self.data else 0\n n_refs = len(self.reference) if self.reference else 0\n\n if n_data or n_refs:\n details = \\\n f\"{n_data if n_data else 'No'} data instance(s) on input\\n\" \\\n f\"{n_refs if n_refs else 'No'} reference instance(s) on input \"\n self.info.set_input_summary(f\"{n_data} | {n_refs} \", details)\n else:\n self.info.set_input_summary(self.info.NoInput)\n\n @Inputs.data\n def set_data(self, data):\n self.data = data\n\n @Inputs.reference\n def set_ref(self, refs):\n self.reference = refs\n\n def handleNewSignals(self):\n self.compute_distances()\n self.unconditional_apply()\n self._set_input_summary()\n\n def recompute(self):\n self.compute_distances()\n self.apply()\n\n def compute_distances(self):\n self.Error.diff_domains.clear()\n if not self.data or not self.reference:\n self.distances = None\n return\n if set(self.reference.domain.attributes) != \\\n set(self.data.domain.attributes):\n self.Error.diff_domains()\n self.distances = None\n return\n\n metric = METRICS[self.distance_index][1]\n n_ref = len(self.reference)\n\n # comparing only attributes, no metas and class-vars\n new_domain = Domain(self.data.domain.attributes)\n reference = self.reference.transform(new_domain)\n data = self.data.transform(new_domain)\n\n all_data = Table.concatenate([reference, data], 0)\n pp_all_data = Impute()(RemoveNaNColumns()(all_data))\n pp_reference, pp_data = pp_all_data[:n_ref], pp_all_data[n_ref:]\n self.distances = metric(pp_data, pp_reference).min(axis=1)\n\n def apply(self):\n indices = self._compute_indices()\n\n if indices is None:\n neighbors = None\n self.info.set_output_summary(self.info.NoOutput)\n else:\n neighbors = self._data_with_similarity(indices)\n self.info.set_output_summary(str(len(neighbors)))\n self.Outputs.data.send(neighbors)\n\n def _compute_indices(self):\n self.Warning.all_data_as_reference.clear()\n dist = self.distances\n if dist is None:\n return None\n if self.exclude_reference:\n non_ref = dist > 1e-5\n skip = len(dist) - non_ref.sum()\n up_to = min(self.n_neighbors + skip, len(dist))\n if skip >= up_to:\n self.Warning.all_data_as_reference()\n return None\n indices = np.argpartition(dist, up_to - 1)[:up_to]\n return indices[non_ref[indices]]\n else:\n up_to = min(self.n_neighbors, len(dist))\n return np.argpartition(dist, up_to - 1)[:up_to]\n\n def _data_with_similarity(self, indices):\n data = self.data\n varname = get_unique_names(data.domain, \"distance\")\n metas = data.domain.metas + (ContinuousVariable(varname), )\n domain = Domain(data.domain.attributes, data.domain.class_vars, metas)\n data_metas = self.distances[indices].reshape((-1, 1))\n if data.domain.metas:\n data_metas = np.hstack((data.metas[indices], data_metas))\n neighbors = Table(domain, data.X[indices], data.Y[indices], data_metas)\n neighbors.attributes = self.data.attributes\n return neighbors\n\n\nif __name__ == \"__main__\": # pragma: no cover\n iris = Table(\"iris.tab\")\n WidgetPreview(OWNeighbors).run(\n set_data=iris,\n set_ref=iris[:1])\n", "path": "Orange/widgets/data/owneighbors.py"}]}
| 2,272 | 642 |
gh_patches_debug_35305
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-6281
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Server checks for ticket prices are missing
**Describe the bug**
Server checks are required for ticket prices. This would also allow decimal values greater than zero to pass through [similar to eventbrite]
**Additional context**
Corresponding FE PR which requires these checks : https://github.com/fossasia/open-event-frontend/pull/3341
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/tickets.py`
Content:
```
1 from flask import request, current_app
2 from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship
3 from flask_rest_jsonapi.exceptions import ObjectNotFound
4 from flask_jwt_extended import current_user, verify_jwt_in_request
5 from sqlalchemy.orm.exc import NoResultFound
6
7 from app.api.bootstrap import api
8 from app.api.helpers.db import safe_query
9 from app.api.helpers.permission_manager import has_access
10 from app.api.helpers.query import event_query
11 from app.api.helpers.utilities import require_relationship
12 from app.api.schema.tickets import TicketSchema, TicketSchemaPublic
13 from app.models import db
14 from app.models.access_code import AccessCode
15 from app.models.discount_code import DiscountCode
16 from app.models.order import Order
17 from app.models.ticket import Ticket, TicketTag, ticket_tags_table
18 from app.models.event import Event
19 from app.models.ticket_holder import TicketHolder
20 from app.api.helpers.exceptions import ConflictException, MethodNotAllowed, UnprocessableEntity
21 from app.api.helpers.db import get_count
22
23 class TicketListPost(ResourceList):
24 """
25 Create and List Tickets
26 """
27 def before_post(self, args, kwargs, data):
28 """
29 before post method to check for required relationship and proper permission
30 :param args:
31 :param kwargs:
32 :param data:
33 :return:
34 """
35 require_relationship(['event'], data)
36 if not has_access('is_coorganizer', event_id=data['event']):
37 raise ObjectNotFound({'parameter': 'event_id'},
38 "Event: {} not found".format(data['event']))
39
40 if get_count(db.session.query(Ticket.id).filter_by(name=data['name'], event_id=int(data['event']),
41 deleted_at=None)) > 0:
42 raise ConflictException({'pointer': '/data/attributes/name'}, "Ticket already exists")
43
44 if get_count(db.session.query(Event).filter_by(id=int(data['event']), is_ticketing_enabled=False)) > 0:
45 raise MethodNotAllowed({'parameter': 'event_id'}, "Ticketing is disabled for this Event")
46
47 def before_create_object(self, data, view_kwargs):
48 """
49 before create method to check if paid ticket has a paymentMethod enabled
50 :param data:
51 :param view_kwargs:
52 :return:
53 """
54 if data.get('type') == 'paid' and data.get('event'):
55 try:
56 event = db.session.query(Event).filter_by(id=data['event'], deleted_at=None).one()
57 except NoResultFound:
58 raise UnprocessableEntity({'event_id': data['event']}, "Event does not exist")
59 if not event.is_payment_enabled():
60 raise UnprocessableEntity(
61 {'event_id': data['event']}, "Event having paid ticket must have a payment method")
62
63 schema = TicketSchema
64 methods = ['POST', ]
65 data_layer = {'session': db.session,
66 'model': Ticket,
67 'methods': {
68 'before_create_object': before_create_object,
69 'before_post': before_post
70 }}
71
72
73 class TicketList(ResourceList):
74 """
75 List Tickets based on different params
76 """
77 def before_get(self, args, view_kwargs):
78 """
79 before get method to get the resource id for assigning schema
80 :param args:
81 :param view_kwargs:
82 :return:
83 """
84 if view_kwargs.get('ticket_tag_id') or view_kwargs.get('access_code_id') or view_kwargs.get('order_identifier'):
85 self.schema = TicketSchemaPublic
86
87 def query(self, view_kwargs):
88 """
89 query method for resource list
90 :param view_kwargs:
91 :return:
92 """
93
94 if 'Authorization' in request.headers:
95 verify_jwt_in_request()
96 if current_user.is_super_admin or current_user.is_admin:
97 query_ = self.session.query(Ticket)
98 elif view_kwargs.get('event_id') and has_access('is_organizer', event_id=view_kwargs['event_id']):
99 query_ = self.session.query(Ticket)
100 else:
101 query_ = self.session.query(Ticket).filter_by(is_hidden=False)
102 else:
103 query_ = self.session.query(Ticket).filter_by(is_hidden=False)
104
105 if view_kwargs.get('ticket_tag_id'):
106 ticket_tag = safe_query(self, TicketTag, 'id', view_kwargs['ticket_tag_id'], 'ticket_tag_id')
107 query_ = query_.join(ticket_tags_table).filter_by(ticket_tag_id=ticket_tag.id)
108 query_ = event_query(self, query_, view_kwargs)
109 if view_kwargs.get('access_code_id'):
110 access_code = safe_query(self, AccessCode, 'id', view_kwargs['access_code_id'], 'access_code_id')
111 # access_code - ticket :: many-to-many relationship
112 query_ = Ticket.query.filter(Ticket.access_codes.any(id=access_code.id))
113
114 if view_kwargs.get('discount_code_id'):
115 discount_code = safe_query(self, DiscountCode, 'id', view_kwargs['discount_code_id'], 'discount_code_id')
116 # discount_code - ticket :: many-to-many relationship
117 query_ = Ticket.query.filter(Ticket.discount_codes.any(id=discount_code.id))
118
119 if view_kwargs.get('order_identifier'):
120 order = safe_query(self, Order, 'identifier', view_kwargs['order_identifier'], 'order_identifier')
121 ticket_ids = []
122 for ticket in order.tickets:
123 ticket_ids.append(ticket.id)
124 query_ = query_.filter(Ticket.id.in_(tuple(ticket_ids)))
125
126 return query_
127
128 view_kwargs = True
129 methods = ['GET', ]
130 decorators = (api.has_permission('is_coorganizer', fetch='event_id',
131 fetch_as="event_id", model=Ticket, methods="POST",
132 check=lambda a: a.get('event_id') or a.get('event_identifier')),)
133 schema = TicketSchema
134 data_layer = {'session': db.session,
135 'model': Ticket,
136 'methods': {
137 'query': query,
138 }}
139
140
141 class TicketDetail(ResourceDetail):
142 """
143 Ticket Resource
144 """
145 def before_get(self, args, view_kwargs):
146 """
147 before get method to get the resource id for assigning schema
148 :param args:
149 :param view_kwargs:
150 :return:
151 """
152 if view_kwargs.get('attendee_id'):
153 self.schema = TicketSchemaPublic
154
155 def before_get_object(self, view_kwargs):
156 """
157 before get object method to get the resource id for fetching details
158 :param view_kwargs:
159 :return:
160 """
161 if view_kwargs.get('attendee_id') is not None:
162 attendee = safe_query(self, TicketHolder, 'id', view_kwargs['attendee_id'], 'attendee_id')
163 if attendee.ticket_id is not None:
164 view_kwargs['id'] = attendee.ticket_id
165 else:
166 view_kwargs['id'] = None
167
168 def before_update_object(self, ticket, data, view_kwargs):
169 """
170 method to check if paid ticket has payment method before updating ticket object
171 :param ticket:
172 :param data:
173 :param view_kwargs:
174 :return:
175 """
176 if ticket.type == 'paid':
177 try:
178 event = db.session.query(Event).filter_by(id=ticket.event.id, deleted_at=None).one()
179 except NoResultFound:
180 raise UnprocessableEntity({'event_id': ticket.event.id}, "Event does not exist")
181 if not event.is_payment_enabled():
182 raise UnprocessableEntity(
183 {'event_id': ticket.event.id}, "Event having paid ticket must have a payment method")
184
185 decorators = (api.has_permission('is_coorganizer', fetch='event_id',
186 fetch_as="event_id", model=Ticket, methods="PATCH,DELETE"),)
187 schema = TicketSchema
188 data_layer = {'session': db.session,
189 'model': Ticket,
190 'methods': {
191 'before_get_object': before_get_object,
192 'before_update_object': before_update_object
193 }}
194
195
196 class TicketRelationshipRequired(ResourceRelationship):
197 """
198 Tickets Relationship (Required)
199 """
200 decorators = (api.has_permission('is_coorganizer', fetch='event_id',
201 fetch_as="event_id", model=Ticket, methods="PATCH"),)
202 methods = ['GET', 'PATCH']
203 schema = TicketSchema
204 data_layer = {'session': db.session,
205 'model': Ticket}
206
207
208 class TicketRelationshipOptional(ResourceRelationship):
209 """
210 Tickets Relationship (Optional)
211 """
212 decorators = (api.has_permission('is_coorganizer', fetch='event_id',
213 fetch_as="event_id", model=Ticket, methods="PATCH,DELETE"),)
214 schema = TicketSchema
215 data_layer = {'session': db.session,
216 'model': Ticket}
217
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/app/api/tickets.py b/app/api/tickets.py
--- a/app/api/tickets.py
+++ b/app/api/tickets.py
@@ -20,6 +20,13 @@
from app.api.helpers.exceptions import ConflictException, MethodNotAllowed, UnprocessableEntity
from app.api.helpers.db import get_count
+
+def validate_ticket_price(data):
+ if data.get('type') != 'free' and int(data.get('price')) <= 0:
+ raise UnprocessableEntity(
+ {'price': data.get('price')}, "Price of a paid/donation ticket must be greater than zero")
+
+
class TicketListPost(ResourceList):
"""
Create and List Tickets
@@ -51,6 +58,8 @@
:param view_kwargs:
:return:
"""
+ if not data.get('price') or not data.get('type'):
+ raise UnprocessableEntity({}, "Type/price of ticket is missing")
if data.get('type') == 'paid' and data.get('event'):
try:
event = db.session.query(Event).filter_by(id=data['event'], deleted_at=None).one()
@@ -59,6 +68,7 @@
if not event.is_payment_enabled():
raise UnprocessableEntity(
{'event_id': data['event']}, "Event having paid ticket must have a payment method")
+ validate_ticket_price(data)
schema = TicketSchema
methods = ['POST', ]
@@ -181,7 +191,8 @@
if not event.is_payment_enabled():
raise UnprocessableEntity(
{'event_id': ticket.event.id}, "Event having paid ticket must have a payment method")
-
+ if data.get('type') and data.get('price'):
+ validate_ticket_price(data)
decorators = (api.has_permission('is_coorganizer', fetch='event_id',
fetch_as="event_id", model=Ticket, methods="PATCH,DELETE"),)
schema = TicketSchema
|
{"golden_diff": "diff --git a/app/api/tickets.py b/app/api/tickets.py\n--- a/app/api/tickets.py\n+++ b/app/api/tickets.py\n@@ -20,6 +20,13 @@\n from app.api.helpers.exceptions import ConflictException, MethodNotAllowed, UnprocessableEntity\n from app.api.helpers.db import get_count\n \n+\n+def validate_ticket_price(data):\n+ if data.get('type') != 'free' and int(data.get('price')) <= 0:\n+ raise UnprocessableEntity(\n+ {'price': data.get('price')}, \"Price of a paid/donation ticket must be greater than zero\")\n+\n+\n class TicketListPost(ResourceList):\n \"\"\"\n Create and List Tickets\n@@ -51,6 +58,8 @@\n :param view_kwargs:\n :return:\n \"\"\"\n+ if not data.get('price') or not data.get('type'):\n+ raise UnprocessableEntity({}, \"Type/price of ticket is missing\")\n if data.get('type') == 'paid' and data.get('event'):\n try:\n event = db.session.query(Event).filter_by(id=data['event'], deleted_at=None).one()\n@@ -59,6 +68,7 @@\n if not event.is_payment_enabled():\n raise UnprocessableEntity(\n {'event_id': data['event']}, \"Event having paid ticket must have a payment method\")\n+ validate_ticket_price(data)\n \n schema = TicketSchema\n methods = ['POST', ]\n@@ -181,7 +191,8 @@\n if not event.is_payment_enabled():\n raise UnprocessableEntity(\n {'event_id': ticket.event.id}, \"Event having paid ticket must have a payment method\")\n-\n+ if data.get('type') and data.get('price'):\n+ validate_ticket_price(data)\n decorators = (api.has_permission('is_coorganizer', fetch='event_id',\n fetch_as=\"event_id\", model=Ticket, methods=\"PATCH,DELETE\"),)\n schema = TicketSchema\n", "issue": "Server checks for ticket prices are missing\n**Describe the bug**\r\nServer checks are required for ticket prices. This would also allow decimal values greater than zero to pass through [similar to eventbrite]\r\n\r\n**Additional context**\r\nCorresponding FE PR which requires these checks : https://github.com/fossasia/open-event-frontend/pull/3341\n", "before_files": [{"content": "from flask import request, current_app\nfrom flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\nfrom flask_rest_jsonapi.exceptions import ObjectNotFound\nfrom flask_jwt_extended import current_user, verify_jwt_in_request\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom app.api.bootstrap import api\nfrom app.api.helpers.db import safe_query\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.query import event_query\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.schema.tickets import TicketSchema, TicketSchemaPublic\nfrom app.models import db\nfrom app.models.access_code import AccessCode\nfrom app.models.discount_code import DiscountCode\nfrom app.models.order import Order\nfrom app.models.ticket import Ticket, TicketTag, ticket_tags_table\nfrom app.models.event import Event\nfrom app.models.ticket_holder import TicketHolder\nfrom app.api.helpers.exceptions import ConflictException, MethodNotAllowed, UnprocessableEntity\nfrom app.api.helpers.db import get_count\n\nclass TicketListPost(ResourceList):\n \"\"\"\n Create and List Tickets\n \"\"\"\n def before_post(self, args, kwargs, data):\n \"\"\"\n before post method to check for required relationship and proper permission\n :param args:\n :param kwargs:\n :param data:\n :return:\n \"\"\"\n require_relationship(['event'], data)\n if not has_access('is_coorganizer', event_id=data['event']):\n raise ObjectNotFound({'parameter': 'event_id'},\n \"Event: {} not found\".format(data['event']))\n\n if get_count(db.session.query(Ticket.id).filter_by(name=data['name'], event_id=int(data['event']),\n deleted_at=None)) > 0:\n raise ConflictException({'pointer': '/data/attributes/name'}, \"Ticket already exists\")\n\n if get_count(db.session.query(Event).filter_by(id=int(data['event']), is_ticketing_enabled=False)) > 0:\n raise MethodNotAllowed({'parameter': 'event_id'}, \"Ticketing is disabled for this Event\")\n\n def before_create_object(self, data, view_kwargs):\n \"\"\"\n before create method to check if paid ticket has a paymentMethod enabled\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if data.get('type') == 'paid' and data.get('event'):\n try:\n event = db.session.query(Event).filter_by(id=data['event'], deleted_at=None).one()\n except NoResultFound:\n raise UnprocessableEntity({'event_id': data['event']}, \"Event does not exist\")\n if not event.is_payment_enabled():\n raise UnprocessableEntity(\n {'event_id': data['event']}, \"Event having paid ticket must have a payment method\")\n\n schema = TicketSchema\n methods = ['POST', ]\n data_layer = {'session': db.session,\n 'model': Ticket,\n 'methods': {\n 'before_create_object': before_create_object,\n 'before_post': before_post\n }}\n\n\nclass TicketList(ResourceList):\n \"\"\"\n List Tickets based on different params\n \"\"\"\n def before_get(self, args, view_kwargs):\n \"\"\"\n before get method to get the resource id for assigning schema\n :param args:\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('ticket_tag_id') or view_kwargs.get('access_code_id') or view_kwargs.get('order_identifier'):\n self.schema = TicketSchemaPublic\n\n def query(self, view_kwargs):\n \"\"\"\n query method for resource list\n :param view_kwargs:\n :return:\n \"\"\"\n\n if 'Authorization' in request.headers:\n verify_jwt_in_request()\n if current_user.is_super_admin or current_user.is_admin:\n query_ = self.session.query(Ticket)\n elif view_kwargs.get('event_id') and has_access('is_organizer', event_id=view_kwargs['event_id']):\n query_ = self.session.query(Ticket)\n else:\n query_ = self.session.query(Ticket).filter_by(is_hidden=False)\n else:\n query_ = self.session.query(Ticket).filter_by(is_hidden=False)\n\n if view_kwargs.get('ticket_tag_id'):\n ticket_tag = safe_query(self, TicketTag, 'id', view_kwargs['ticket_tag_id'], 'ticket_tag_id')\n query_ = query_.join(ticket_tags_table).filter_by(ticket_tag_id=ticket_tag.id)\n query_ = event_query(self, query_, view_kwargs)\n if view_kwargs.get('access_code_id'):\n access_code = safe_query(self, AccessCode, 'id', view_kwargs['access_code_id'], 'access_code_id')\n # access_code - ticket :: many-to-many relationship\n query_ = Ticket.query.filter(Ticket.access_codes.any(id=access_code.id))\n\n if view_kwargs.get('discount_code_id'):\n discount_code = safe_query(self, DiscountCode, 'id', view_kwargs['discount_code_id'], 'discount_code_id')\n # discount_code - ticket :: many-to-many relationship\n query_ = Ticket.query.filter(Ticket.discount_codes.any(id=discount_code.id))\n\n if view_kwargs.get('order_identifier'):\n order = safe_query(self, Order, 'identifier', view_kwargs['order_identifier'], 'order_identifier')\n ticket_ids = []\n for ticket in order.tickets:\n ticket_ids.append(ticket.id)\n query_ = query_.filter(Ticket.id.in_(tuple(ticket_ids)))\n\n return query_\n\n view_kwargs = True\n methods = ['GET', ]\n decorators = (api.has_permission('is_coorganizer', fetch='event_id',\n fetch_as=\"event_id\", model=Ticket, methods=\"POST\",\n check=lambda a: a.get('event_id') or a.get('event_identifier')),)\n schema = TicketSchema\n data_layer = {'session': db.session,\n 'model': Ticket,\n 'methods': {\n 'query': query,\n }}\n\n\nclass TicketDetail(ResourceDetail):\n \"\"\"\n Ticket Resource\n \"\"\"\n def before_get(self, args, view_kwargs):\n \"\"\"\n before get method to get the resource id for assigning schema\n :param args:\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('attendee_id'):\n self.schema = TicketSchemaPublic\n\n def before_get_object(self, view_kwargs):\n \"\"\"\n before get object method to get the resource id for fetching details\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('attendee_id') is not None:\n attendee = safe_query(self, TicketHolder, 'id', view_kwargs['attendee_id'], 'attendee_id')\n if attendee.ticket_id is not None:\n view_kwargs['id'] = attendee.ticket_id\n else:\n view_kwargs['id'] = None\n\n def before_update_object(self, ticket, data, view_kwargs):\n \"\"\"\n method to check if paid ticket has payment method before updating ticket object\n :param ticket:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if ticket.type == 'paid':\n try:\n event = db.session.query(Event).filter_by(id=ticket.event.id, deleted_at=None).one()\n except NoResultFound:\n raise UnprocessableEntity({'event_id': ticket.event.id}, \"Event does not exist\")\n if not event.is_payment_enabled():\n raise UnprocessableEntity(\n {'event_id': ticket.event.id}, \"Event having paid ticket must have a payment method\")\n\n decorators = (api.has_permission('is_coorganizer', fetch='event_id',\n fetch_as=\"event_id\", model=Ticket, methods=\"PATCH,DELETE\"),)\n schema = TicketSchema\n data_layer = {'session': db.session,\n 'model': Ticket,\n 'methods': {\n 'before_get_object': before_get_object,\n 'before_update_object': before_update_object\n }}\n\n\nclass TicketRelationshipRequired(ResourceRelationship):\n \"\"\"\n Tickets Relationship (Required)\n \"\"\"\n decorators = (api.has_permission('is_coorganizer', fetch='event_id',\n fetch_as=\"event_id\", model=Ticket, methods=\"PATCH\"),)\n methods = ['GET', 'PATCH']\n schema = TicketSchema\n data_layer = {'session': db.session,\n 'model': Ticket}\n\n\nclass TicketRelationshipOptional(ResourceRelationship):\n \"\"\"\n Tickets Relationship (Optional)\n \"\"\"\n decorators = (api.has_permission('is_coorganizer', fetch='event_id',\n fetch_as=\"event_id\", model=Ticket, methods=\"PATCH,DELETE\"),)\n schema = TicketSchema\n data_layer = {'session': db.session,\n 'model': Ticket}\n", "path": "app/api/tickets.py"}], "after_files": [{"content": "from flask import request, current_app\nfrom flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\nfrom flask_rest_jsonapi.exceptions import ObjectNotFound\nfrom flask_jwt_extended import current_user, verify_jwt_in_request\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom app.api.bootstrap import api\nfrom app.api.helpers.db import safe_query\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.query import event_query\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.schema.tickets import TicketSchema, TicketSchemaPublic\nfrom app.models import db\nfrom app.models.access_code import AccessCode\nfrom app.models.discount_code import DiscountCode\nfrom app.models.order import Order\nfrom app.models.ticket import Ticket, TicketTag, ticket_tags_table\nfrom app.models.event import Event\nfrom app.models.ticket_holder import TicketHolder\nfrom app.api.helpers.exceptions import ConflictException, MethodNotAllowed, UnprocessableEntity\nfrom app.api.helpers.db import get_count\n\n\ndef validate_ticket_price(data):\n if data.get('type') != 'free' and int(data.get('price')) <= 0:\n raise UnprocessableEntity(\n {'price': data.get('price')}, \"Price of a paid/donation ticket must be greater than zero\")\n\n\nclass TicketListPost(ResourceList):\n \"\"\"\n Create and List Tickets\n \"\"\"\n def before_post(self, args, kwargs, data):\n \"\"\"\n before post method to check for required relationship and proper permission\n :param args:\n :param kwargs:\n :param data:\n :return:\n \"\"\"\n require_relationship(['event'], data)\n if not has_access('is_coorganizer', event_id=data['event']):\n raise ObjectNotFound({'parameter': 'event_id'},\n \"Event: {} not found\".format(data['event']))\n\n if get_count(db.session.query(Ticket.id).filter_by(name=data['name'], event_id=int(data['event']),\n deleted_at=None)) > 0:\n raise ConflictException({'pointer': '/data/attributes/name'}, \"Ticket already exists\")\n\n if get_count(db.session.query(Event).filter_by(id=int(data['event']), is_ticketing_enabled=False)) > 0:\n raise MethodNotAllowed({'parameter': 'event_id'}, \"Ticketing is disabled for this Event\")\n\n def before_create_object(self, data, view_kwargs):\n \"\"\"\n before create method to check if paid ticket has a paymentMethod enabled\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if not data.get('price') or not data.get('type'):\n raise UnprocessableEntity({}, \"Type/price of ticket is missing\")\n if data.get('type') == 'paid' and data.get('event'):\n try:\n event = db.session.query(Event).filter_by(id=data['event'], deleted_at=None).one()\n except NoResultFound:\n raise UnprocessableEntity({'event_id': data['event']}, \"Event does not exist\")\n if not event.is_payment_enabled():\n raise UnprocessableEntity(\n {'event_id': data['event']}, \"Event having paid ticket must have a payment method\")\n validate_ticket_price(data)\n\n schema = TicketSchema\n methods = ['POST', ]\n data_layer = {'session': db.session,\n 'model': Ticket,\n 'methods': {\n 'before_create_object': before_create_object,\n 'before_post': before_post\n }}\n\n\nclass TicketList(ResourceList):\n \"\"\"\n List Tickets based on different params\n \"\"\"\n def before_get(self, args, view_kwargs):\n \"\"\"\n before get method to get the resource id for assigning schema\n :param args:\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('ticket_tag_id') or view_kwargs.get('access_code_id') or view_kwargs.get('order_identifier'):\n self.schema = TicketSchemaPublic\n\n def query(self, view_kwargs):\n \"\"\"\n query method for resource list\n :param view_kwargs:\n :return:\n \"\"\"\n\n if 'Authorization' in request.headers:\n verify_jwt_in_request()\n if current_user.is_super_admin or current_user.is_admin:\n query_ = self.session.query(Ticket)\n elif view_kwargs.get('event_id') and has_access('is_organizer', event_id=view_kwargs['event_id']):\n query_ = self.session.query(Ticket)\n else:\n query_ = self.session.query(Ticket).filter_by(is_hidden=False)\n else:\n query_ = self.session.query(Ticket).filter_by(is_hidden=False)\n\n if view_kwargs.get('ticket_tag_id'):\n ticket_tag = safe_query(self, TicketTag, 'id', view_kwargs['ticket_tag_id'], 'ticket_tag_id')\n query_ = query_.join(ticket_tags_table).filter_by(ticket_tag_id=ticket_tag.id)\n query_ = event_query(self, query_, view_kwargs)\n if view_kwargs.get('access_code_id'):\n access_code = safe_query(self, AccessCode, 'id', view_kwargs['access_code_id'], 'access_code_id')\n # access_code - ticket :: many-to-many relationship\n query_ = Ticket.query.filter(Ticket.access_codes.any(id=access_code.id))\n\n if view_kwargs.get('discount_code_id'):\n discount_code = safe_query(self, DiscountCode, 'id', view_kwargs['discount_code_id'], 'discount_code_id')\n # discount_code - ticket :: many-to-many relationship\n query_ = Ticket.query.filter(Ticket.discount_codes.any(id=discount_code.id))\n\n if view_kwargs.get('order_identifier'):\n order = safe_query(self, Order, 'identifier', view_kwargs['order_identifier'], 'order_identifier')\n ticket_ids = []\n for ticket in order.tickets:\n ticket_ids.append(ticket.id)\n query_ = query_.filter(Ticket.id.in_(tuple(ticket_ids)))\n\n return query_\n\n view_kwargs = True\n methods = ['GET', ]\n decorators = (api.has_permission('is_coorganizer', fetch='event_id',\n fetch_as=\"event_id\", model=Ticket, methods=\"POST\",\n check=lambda a: a.get('event_id') or a.get('event_identifier')),)\n schema = TicketSchema\n data_layer = {'session': db.session,\n 'model': Ticket,\n 'methods': {\n 'query': query,\n }}\n\n\nclass TicketDetail(ResourceDetail):\n \"\"\"\n Ticket Resource\n \"\"\"\n def before_get(self, args, view_kwargs):\n \"\"\"\n before get method to get the resource id for assigning schema\n :param args:\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('attendee_id'):\n self.schema = TicketSchemaPublic\n\n def before_get_object(self, view_kwargs):\n \"\"\"\n before get object method to get the resource id for fetching details\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('attendee_id') is not None:\n attendee = safe_query(self, TicketHolder, 'id', view_kwargs['attendee_id'], 'attendee_id')\n if attendee.ticket_id is not None:\n view_kwargs['id'] = attendee.ticket_id\n else:\n view_kwargs['id'] = None\n\n def before_update_object(self, ticket, data, view_kwargs):\n \"\"\"\n method to check if paid ticket has payment method before updating ticket object\n :param ticket:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if ticket.type == 'paid':\n try:\n event = db.session.query(Event).filter_by(id=ticket.event.id, deleted_at=None).one()\n except NoResultFound:\n raise UnprocessableEntity({'event_id': ticket.event.id}, \"Event does not exist\")\n if not event.is_payment_enabled():\n raise UnprocessableEntity(\n {'event_id': ticket.event.id}, \"Event having paid ticket must have a payment method\")\n if data.get('type') and data.get('price'):\n validate_ticket_price(data)\n decorators = (api.has_permission('is_coorganizer', fetch='event_id',\n fetch_as=\"event_id\", model=Ticket, methods=\"PATCH,DELETE\"),)\n schema = TicketSchema\n data_layer = {'session': db.session,\n 'model': Ticket,\n 'methods': {\n 'before_get_object': before_get_object,\n 'before_update_object': before_update_object\n }}\n\n\nclass TicketRelationshipRequired(ResourceRelationship):\n \"\"\"\n Tickets Relationship (Required)\n \"\"\"\n decorators = (api.has_permission('is_coorganizer', fetch='event_id',\n fetch_as=\"event_id\", model=Ticket, methods=\"PATCH\"),)\n methods = ['GET', 'PATCH']\n schema = TicketSchema\n data_layer = {'session': db.session,\n 'model': Ticket}\n\n\nclass TicketRelationshipOptional(ResourceRelationship):\n \"\"\"\n Tickets Relationship (Optional)\n \"\"\"\n decorators = (api.has_permission('is_coorganizer', fetch='event_id',\n fetch_as=\"event_id\", model=Ticket, methods=\"PATCH,DELETE\"),)\n schema = TicketSchema\n data_layer = {'session': db.session,\n 'model': Ticket}\n", "path": "app/api/tickets.py"}]}
| 2,694 | 427 |
gh_patches_debug_24586
|
rasdani/github-patches
|
git_diff
|
OpenNMT__OpenNMT-tf-527
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ValueError in guided alignment loss during training
Keras losses throw an exception when they are used within a distribution strategy scope and the reduction mode is unset.
```
ValueError: Please use `tf.keras.losses.Reduction.SUM` or `tf.keras.losses.Reduction.NONE` for loss reduction when losses are used with `tf.distribute.Strategy` outside of the built-in training loops. You can implement `tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` using global batch size like:
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opennmt/utils/losses.py`
Content:
```
1 """Define losses."""
2
3 import tensorflow as tf
4
5
6 def _smooth_one_hot_labels(logits, labels, label_smoothing):
7 label_smoothing = tf.constant(label_smoothing, dtype=logits.dtype)
8 num_classes = tf.shape(logits)[-1]
9 return tf.one_hot(
10 tf.cast(labels, tf.int32),
11 num_classes,
12 on_value=1.0 - label_smoothing,
13 off_value=label_smoothing / tf.cast(num_classes - 1, label_smoothing.dtype),
14 dtype=logits.dtype)
15
16 def _softmax_cross_entropy(logits, labels, label_smoothing, training):
17 # Computes the softmax in full precision.
18 if logits.dtype.base_dtype != tf.float32:
19 logits = tf.cast(logits, tf.float32)
20 if training and label_smoothing > 0.0:
21 smoothed_labels = _smooth_one_hot_labels(logits, labels, label_smoothing)
22 return tf.nn.softmax_cross_entropy_with_logits(smoothed_labels, logits)
23 else:
24 return tf.nn.sparse_softmax_cross_entropy_with_logits(labels, logits)
25
26 def cross_entropy_sequence_loss(logits,
27 labels,
28 sequence_length,
29 label_smoothing=0.0,
30 average_in_time=False,
31 training=None):
32 """Computes the cross entropy loss of sequences.
33
34 Args:
35 logits: The unscaled probabilities.
36 labels: The true labels.
37 sequence_length: The length of each sequence.
38 label_smoothing: The label smoothing value.
39 average_in_time: If ``True``, also average the loss in the time dimension.
40 training: Compute training loss.
41
42 Returns:
43 A tuple (cumulated loss, loss normalizer, token-level normalizer).
44 """
45 batch_size = tf.shape(logits)[0]
46 max_time = tf.shape(logits)[1]
47
48 cross_entropy = _softmax_cross_entropy(logits, labels, label_smoothing, training)
49 weights = tf.sequence_mask(
50 sequence_length, maxlen=max_time, dtype=cross_entropy.dtype)
51 loss = tf.reduce_sum(cross_entropy * weights)
52 loss_token_normalizer = tf.reduce_sum(weights)
53
54 if average_in_time or not training:
55 loss_normalizer = loss_token_normalizer
56 else:
57 loss_normalizer = tf.cast(batch_size, loss.dtype)
58
59 return loss, loss_normalizer, loss_token_normalizer
60
61 def cross_entropy_loss(logits,
62 labels,
63 label_smoothing=0.0,
64 training=None):
65 """Computes the cross entropy loss.
66
67 Args:
68 logits: The unscaled probabilities.
69 labels: The true labels.
70 label_smoothing: The label smoothing value.
71 training: Compute training loss.
72
73 Returns:
74 The cumulated loss and the loss normalizer.
75 """
76 cross_entropy = _softmax_cross_entropy(logits, labels, label_smoothing, training)
77 loss = tf.reduce_sum(cross_entropy)
78 loss_normalizer = tf.cast(tf.shape(cross_entropy)[0], loss.dtype)
79 return loss, loss_normalizer
80
81 def guided_alignment_cost(attention_probs,
82 gold_alignment,
83 sequence_length=None,
84 cost_type="ce",
85 weight=1):
86 """Computes the guided alignment cost.
87
88 Args:
89 attention_probs: The attention probabilities, a float ``tf.Tensor`` of shape
90 :math:`[B, T_t, T_s]`.
91 gold_alignment: The true alignment matrix, a float ``tf.Tensor`` of shape
92 :math:`[B, T_t, T_s]`.
93 sequence_length: The length of each sequence.
94 cost_type: The type of the cost function to compute (can be: ce, mse).
95 weight: The weight applied to the cost.
96
97 Returns:
98 The guided alignment cost.
99
100 Raises:
101 ValueError: if :obj:`cost_type` is invalid.
102 """
103 if cost_type == "ce":
104 loss = tf.keras.losses.CategoricalCrossentropy()
105 elif cost_type == "mse":
106 loss = tf.keras.losses.MeanSquaredError()
107 else:
108 raise ValueError("invalid guided alignment cost: %s" % cost_type)
109
110 if sequence_length is not None:
111 sample_weight = tf.sequence_mask(
112 sequence_length,
113 maxlen=tf.shape(attention_probs)[1],
114 dtype=attention_probs.dtype)
115 sample_weight = tf.expand_dims(sample_weight, -1)
116 else:
117 sample_weight = None
118
119 cost = loss(
120 gold_alignment,
121 attention_probs,
122 sample_weight=sample_weight)
123 return weight * cost
124
125 def regularization_penalty(regularization_type, scale, weights):
126 """Computes the weights regularization penalty.
127
128 Args:
129 regularization_type: The regularization type: ``l1``, ``l2``, or ``l1_l2``.
130 scale: The regularization multiplier. If :obj:`regularization_type` is
131 ``l1_l2``, this should be a list or tuple containing the L1 regularization
132 scale and the L2 regularization scale.
133 weights: The list of weights.
134
135 Returns:
136 The regularization penalty.
137
138 Raises:
139 ValueError: if :obj:`regularization_type` is invalid or is ``l1_l2`` but
140 :obj:`scale` is not a sequence.
141 """
142 regularization_type = regularization_type.lower()
143 if regularization_type == "l1":
144 regularizer = tf.keras.regularizers.l1(l=float(scale))
145 elif regularization_type == "l2":
146 regularizer = tf.keras.regularizers.l2(l=float(scale))
147 elif regularization_type == "l1_l2":
148 if not isinstance(scale, (list, tuple)) or len(scale) != 2:
149 raise ValueError("l1_l2 regularization requires 2 scale values")
150 regularizer = tf.keras.regularizers.l1_l2(
151 l1=float(scale[0]), l2=float(scale[1]))
152 else:
153 raise ValueError("invalid regularization type %s" % regularization_type)
154
155 weights = list(filter(lambda v: not _is_bias(v), weights))
156 penalty = tf.add_n([regularizer(w) for w in weights])
157 return penalty
158
159 def _is_bias(variable):
160 return len(variable.shape) == 1 and variable.name.endswith("bias:0")
161
162 def _negative_log_likelihood(logits, labels, sequence_length):
163 nll_num, nll_den, _ = cross_entropy_sequence_loss(
164 logits, labels, sequence_length, average_in_time=True)
165 return nll_num / nll_den
166
167 def max_margin_loss(true_logits,
168 true_labels,
169 true_sequence_length,
170 negative_logits,
171 negative_labels,
172 negative_sequence_length,
173 eta=0.1):
174 """Computes the max-margin loss described in
175 https://www.aclweb.org/anthology/P19-1623.
176
177 Args:
178 true_logits: The unscaled probabilities from the true example.
179 negative_logits: The unscaled probabilities from the negative example.
180 true_labels: The true labels.
181 true_sequence_length: The length of each true sequence.
182 negative_labels: The negative labels.
183 negative_sequence_length: The length of each negative sequence.
184 eta: Ensure that the margin is higher than this value.
185
186 Returns:
187 The max-margin loss.
188 """
189 true_nll = _negative_log_likelihood(
190 true_logits, true_labels, true_sequence_length)
191 negative_nll = _negative_log_likelihood(
192 negative_logits, negative_labels, negative_sequence_length)
193 margin = true_nll - negative_nll + eta
194 return tf.maximum(margin, 0)
195
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/opennmt/utils/losses.py b/opennmt/utils/losses.py
--- a/opennmt/utils/losses.py
+++ b/opennmt/utils/losses.py
@@ -101,9 +101,9 @@
ValueError: if :obj:`cost_type` is invalid.
"""
if cost_type == "ce":
- loss = tf.keras.losses.CategoricalCrossentropy()
+ loss = tf.keras.losses.CategoricalCrossentropy(reduction=tf.keras.losses.Reduction.SUM)
elif cost_type == "mse":
- loss = tf.keras.losses.MeanSquaredError()
+ loss = tf.keras.losses.MeanSquaredError(reduction=tf.keras.losses.Reduction.SUM)
else:
raise ValueError("invalid guided alignment cost: %s" % cost_type)
@@ -113,13 +113,16 @@
maxlen=tf.shape(attention_probs)[1],
dtype=attention_probs.dtype)
sample_weight = tf.expand_dims(sample_weight, -1)
+ normalizer = tf.reduce_sum(sequence_length)
else:
sample_weight = None
+ normalizer = tf.size(attention_probs)
cost = loss(
gold_alignment,
attention_probs,
sample_weight=sample_weight)
+ cost /= tf.cast(normalizer, cost.dtype)
return weight * cost
def regularization_penalty(regularization_type, scale, weights):
|
{"golden_diff": "diff --git a/opennmt/utils/losses.py b/opennmt/utils/losses.py\n--- a/opennmt/utils/losses.py\n+++ b/opennmt/utils/losses.py\n@@ -101,9 +101,9 @@\n ValueError: if :obj:`cost_type` is invalid.\n \"\"\"\n if cost_type == \"ce\":\n- loss = tf.keras.losses.CategoricalCrossentropy()\n+ loss = tf.keras.losses.CategoricalCrossentropy(reduction=tf.keras.losses.Reduction.SUM)\n elif cost_type == \"mse\":\n- loss = tf.keras.losses.MeanSquaredError()\n+ loss = tf.keras.losses.MeanSquaredError(reduction=tf.keras.losses.Reduction.SUM)\n else:\n raise ValueError(\"invalid guided alignment cost: %s\" % cost_type)\n \n@@ -113,13 +113,16 @@\n maxlen=tf.shape(attention_probs)[1],\n dtype=attention_probs.dtype)\n sample_weight = tf.expand_dims(sample_weight, -1)\n+ normalizer = tf.reduce_sum(sequence_length)\n else:\n sample_weight = None\n+ normalizer = tf.size(attention_probs)\n \n cost = loss(\n gold_alignment,\n attention_probs,\n sample_weight=sample_weight)\n+ cost /= tf.cast(normalizer, cost.dtype)\n return weight * cost\n \n def regularization_penalty(regularization_type, scale, weights):\n", "issue": "ValueError in guided alignment loss during training\nKeras losses throw an exception when they are used within a distribution strategy scope and the reduction mode is unset.\r\n\r\n```\r\nValueError: Please use `tf.keras.losses.Reduction.SUM` or `tf.keras.losses.Reduction.NONE` for loss reduction when losses are used with `tf.distribute.Strategy` outside of the built-in training loops. You can implement `tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` using global batch size like:\r\n```\n", "before_files": [{"content": "\"\"\"Define losses.\"\"\"\n\nimport tensorflow as tf\n\n\ndef _smooth_one_hot_labels(logits, labels, label_smoothing):\n label_smoothing = tf.constant(label_smoothing, dtype=logits.dtype)\n num_classes = tf.shape(logits)[-1]\n return tf.one_hot(\n tf.cast(labels, tf.int32),\n num_classes,\n on_value=1.0 - label_smoothing,\n off_value=label_smoothing / tf.cast(num_classes - 1, label_smoothing.dtype),\n dtype=logits.dtype)\n\ndef _softmax_cross_entropy(logits, labels, label_smoothing, training):\n # Computes the softmax in full precision.\n if logits.dtype.base_dtype != tf.float32:\n logits = tf.cast(logits, tf.float32)\n if training and label_smoothing > 0.0:\n smoothed_labels = _smooth_one_hot_labels(logits, labels, label_smoothing)\n return tf.nn.softmax_cross_entropy_with_logits(smoothed_labels, logits)\n else:\n return tf.nn.sparse_softmax_cross_entropy_with_logits(labels, logits)\n\ndef cross_entropy_sequence_loss(logits,\n labels,\n sequence_length,\n label_smoothing=0.0,\n average_in_time=False,\n training=None):\n \"\"\"Computes the cross entropy loss of sequences.\n\n Args:\n logits: The unscaled probabilities.\n labels: The true labels.\n sequence_length: The length of each sequence.\n label_smoothing: The label smoothing value.\n average_in_time: If ``True``, also average the loss in the time dimension.\n training: Compute training loss.\n\n Returns:\n A tuple (cumulated loss, loss normalizer, token-level normalizer).\n \"\"\"\n batch_size = tf.shape(logits)[0]\n max_time = tf.shape(logits)[1]\n\n cross_entropy = _softmax_cross_entropy(logits, labels, label_smoothing, training)\n weights = tf.sequence_mask(\n sequence_length, maxlen=max_time, dtype=cross_entropy.dtype)\n loss = tf.reduce_sum(cross_entropy * weights)\n loss_token_normalizer = tf.reduce_sum(weights)\n\n if average_in_time or not training:\n loss_normalizer = loss_token_normalizer\n else:\n loss_normalizer = tf.cast(batch_size, loss.dtype)\n\n return loss, loss_normalizer, loss_token_normalizer\n\ndef cross_entropy_loss(logits,\n labels,\n label_smoothing=0.0,\n training=None):\n \"\"\"Computes the cross entropy loss.\n\n Args:\n logits: The unscaled probabilities.\n labels: The true labels.\n label_smoothing: The label smoothing value.\n training: Compute training loss.\n\n Returns:\n The cumulated loss and the loss normalizer.\n \"\"\"\n cross_entropy = _softmax_cross_entropy(logits, labels, label_smoothing, training)\n loss = tf.reduce_sum(cross_entropy)\n loss_normalizer = tf.cast(tf.shape(cross_entropy)[0], loss.dtype)\n return loss, loss_normalizer\n\ndef guided_alignment_cost(attention_probs,\n gold_alignment,\n sequence_length=None,\n cost_type=\"ce\",\n weight=1):\n \"\"\"Computes the guided alignment cost.\n\n Args:\n attention_probs: The attention probabilities, a float ``tf.Tensor`` of shape\n :math:`[B, T_t, T_s]`.\n gold_alignment: The true alignment matrix, a float ``tf.Tensor`` of shape\n :math:`[B, T_t, T_s]`.\n sequence_length: The length of each sequence.\n cost_type: The type of the cost function to compute (can be: ce, mse).\n weight: The weight applied to the cost.\n\n Returns:\n The guided alignment cost.\n\n Raises:\n ValueError: if :obj:`cost_type` is invalid.\n \"\"\"\n if cost_type == \"ce\":\n loss = tf.keras.losses.CategoricalCrossentropy()\n elif cost_type == \"mse\":\n loss = tf.keras.losses.MeanSquaredError()\n else:\n raise ValueError(\"invalid guided alignment cost: %s\" % cost_type)\n\n if sequence_length is not None:\n sample_weight = tf.sequence_mask(\n sequence_length,\n maxlen=tf.shape(attention_probs)[1],\n dtype=attention_probs.dtype)\n sample_weight = tf.expand_dims(sample_weight, -1)\n else:\n sample_weight = None\n\n cost = loss(\n gold_alignment,\n attention_probs,\n sample_weight=sample_weight)\n return weight * cost\n\ndef regularization_penalty(regularization_type, scale, weights):\n \"\"\"Computes the weights regularization penalty.\n\n Args:\n regularization_type: The regularization type: ``l1``, ``l2``, or ``l1_l2``.\n scale: The regularization multiplier. If :obj:`regularization_type` is\n ``l1_l2``, this should be a list or tuple containing the L1 regularization\n scale and the L2 regularization scale.\n weights: The list of weights.\n\n Returns:\n The regularization penalty.\n\n Raises:\n ValueError: if :obj:`regularization_type` is invalid or is ``l1_l2`` but\n :obj:`scale` is not a sequence.\n \"\"\"\n regularization_type = regularization_type.lower()\n if regularization_type == \"l1\":\n regularizer = tf.keras.regularizers.l1(l=float(scale))\n elif regularization_type == \"l2\":\n regularizer = tf.keras.regularizers.l2(l=float(scale))\n elif regularization_type == \"l1_l2\":\n if not isinstance(scale, (list, tuple)) or len(scale) != 2:\n raise ValueError(\"l1_l2 regularization requires 2 scale values\")\n regularizer = tf.keras.regularizers.l1_l2(\n l1=float(scale[0]), l2=float(scale[1]))\n else:\n raise ValueError(\"invalid regularization type %s\" % regularization_type)\n\n weights = list(filter(lambda v: not _is_bias(v), weights))\n penalty = tf.add_n([regularizer(w) for w in weights])\n return penalty\n\ndef _is_bias(variable):\n return len(variable.shape) == 1 and variable.name.endswith(\"bias:0\")\n\ndef _negative_log_likelihood(logits, labels, sequence_length):\n nll_num, nll_den, _ = cross_entropy_sequence_loss(\n logits, labels, sequence_length, average_in_time=True)\n return nll_num / nll_den\n\ndef max_margin_loss(true_logits,\n true_labels,\n true_sequence_length,\n negative_logits,\n negative_labels,\n negative_sequence_length,\n eta=0.1):\n \"\"\"Computes the max-margin loss described in\n https://www.aclweb.org/anthology/P19-1623.\n\n Args:\n true_logits: The unscaled probabilities from the true example.\n negative_logits: The unscaled probabilities from the negative example.\n true_labels: The true labels.\n true_sequence_length: The length of each true sequence.\n negative_labels: The negative labels.\n negative_sequence_length: The length of each negative sequence.\n eta: Ensure that the margin is higher than this value.\n\n Returns:\n The max-margin loss.\n \"\"\"\n true_nll = _negative_log_likelihood(\n true_logits, true_labels, true_sequence_length)\n negative_nll = _negative_log_likelihood(\n negative_logits, negative_labels, negative_sequence_length)\n margin = true_nll - negative_nll + eta\n return tf.maximum(margin, 0)\n", "path": "opennmt/utils/losses.py"}], "after_files": [{"content": "\"\"\"Define losses.\"\"\"\n\nimport tensorflow as tf\n\n\ndef _smooth_one_hot_labels(logits, labels, label_smoothing):\n label_smoothing = tf.constant(label_smoothing, dtype=logits.dtype)\n num_classes = tf.shape(logits)[-1]\n return tf.one_hot(\n tf.cast(labels, tf.int32),\n num_classes,\n on_value=1.0 - label_smoothing,\n off_value=label_smoothing / tf.cast(num_classes - 1, label_smoothing.dtype),\n dtype=logits.dtype)\n\ndef _softmax_cross_entropy(logits, labels, label_smoothing, training):\n # Computes the softmax in full precision.\n if logits.dtype.base_dtype != tf.float32:\n logits = tf.cast(logits, tf.float32)\n if training and label_smoothing > 0.0:\n smoothed_labels = _smooth_one_hot_labels(logits, labels, label_smoothing)\n return tf.nn.softmax_cross_entropy_with_logits(smoothed_labels, logits)\n else:\n return tf.nn.sparse_softmax_cross_entropy_with_logits(labels, logits)\n\ndef cross_entropy_sequence_loss(logits,\n labels,\n sequence_length,\n label_smoothing=0.0,\n average_in_time=False,\n training=None):\n \"\"\"Computes the cross entropy loss of sequences.\n\n Args:\n logits: The unscaled probabilities.\n labels: The true labels.\n sequence_length: The length of each sequence.\n label_smoothing: The label smoothing value.\n average_in_time: If ``True``, also average the loss in the time dimension.\n training: Compute training loss.\n\n Returns:\n A tuple (cumulated loss, loss normalizer, token-level normalizer).\n \"\"\"\n batch_size = tf.shape(logits)[0]\n max_time = tf.shape(logits)[1]\n\n cross_entropy = _softmax_cross_entropy(logits, labels, label_smoothing, training)\n weights = tf.sequence_mask(\n sequence_length, maxlen=max_time, dtype=cross_entropy.dtype)\n loss = tf.reduce_sum(cross_entropy * weights)\n loss_token_normalizer = tf.reduce_sum(weights)\n\n if average_in_time or not training:\n loss_normalizer = loss_token_normalizer\n else:\n loss_normalizer = tf.cast(batch_size, loss.dtype)\n\n return loss, loss_normalizer, loss_token_normalizer\n\ndef cross_entropy_loss(logits,\n labels,\n label_smoothing=0.0,\n training=None):\n \"\"\"Computes the cross entropy loss.\n\n Args:\n logits: The unscaled probabilities.\n labels: The true labels.\n label_smoothing: The label smoothing value.\n training: Compute training loss.\n\n Returns:\n The cumulated loss and the loss normalizer.\n \"\"\"\n cross_entropy = _softmax_cross_entropy(logits, labels, label_smoothing, training)\n loss = tf.reduce_sum(cross_entropy)\n loss_normalizer = tf.cast(tf.shape(cross_entropy)[0], loss.dtype)\n return loss, loss_normalizer\n\ndef guided_alignment_cost(attention_probs,\n gold_alignment,\n sequence_length=None,\n cost_type=\"ce\",\n weight=1):\n \"\"\"Computes the guided alignment cost.\n\n Args:\n attention_probs: The attention probabilities, a float ``tf.Tensor`` of shape\n :math:`[B, T_t, T_s]`.\n gold_alignment: The true alignment matrix, a float ``tf.Tensor`` of shape\n :math:`[B, T_t, T_s]`.\n sequence_length: The length of each sequence.\n cost_type: The type of the cost function to compute (can be: ce, mse).\n weight: The weight applied to the cost.\n\n Returns:\n The guided alignment cost.\n\n Raises:\n ValueError: if :obj:`cost_type` is invalid.\n \"\"\"\n if cost_type == \"ce\":\n loss = tf.keras.losses.CategoricalCrossentropy(reduction=tf.keras.losses.Reduction.SUM)\n elif cost_type == \"mse\":\n loss = tf.keras.losses.MeanSquaredError(reduction=tf.keras.losses.Reduction.SUM)\n else:\n raise ValueError(\"invalid guided alignment cost: %s\" % cost_type)\n\n if sequence_length is not None:\n sample_weight = tf.sequence_mask(\n sequence_length,\n maxlen=tf.shape(attention_probs)[1],\n dtype=attention_probs.dtype)\n sample_weight = tf.expand_dims(sample_weight, -1)\n normalizer = tf.reduce_sum(sequence_length)\n else:\n sample_weight = None\n normalizer = tf.size(attention_probs)\n\n cost = loss(\n gold_alignment,\n attention_probs,\n sample_weight=sample_weight)\n cost /= tf.cast(normalizer, cost.dtype)\n return weight * cost\n\ndef regularization_penalty(regularization_type, scale, weights):\n \"\"\"Computes the weights regularization penalty.\n\n Args:\n regularization_type: The regularization type: ``l1``, ``l2``, or ``l1_l2``.\n scale: The regularization multiplier. If :obj:`regularization_type` is\n ``l1_l2``, this should be a list or tuple containing the L1 regularization\n scale and the L2 regularization scale.\n weights: The list of weights.\n\n Returns:\n The regularization penalty.\n\n Raises:\n ValueError: if :obj:`regularization_type` is invalid or is ``l1_l2`` but\n :obj:`scale` is not a sequence.\n \"\"\"\n regularization_type = regularization_type.lower()\n if regularization_type == \"l1\":\n regularizer = tf.keras.regularizers.l1(l=float(scale))\n elif regularization_type == \"l2\":\n regularizer = tf.keras.regularizers.l2(l=float(scale))\n elif regularization_type == \"l1_l2\":\n if not isinstance(scale, (list, tuple)) or len(scale) != 2:\n raise ValueError(\"l1_l2 regularization requires 2 scale values\")\n regularizer = tf.keras.regularizers.l1_l2(\n l1=float(scale[0]), l2=float(scale[1]))\n else:\n raise ValueError(\"invalid regularization type %s\" % regularization_type)\n\n weights = list(filter(lambda v: not _is_bias(v), weights))\n penalty = tf.add_n([regularizer(w) for w in weights])\n return penalty\n\ndef _is_bias(variable):\n return len(variable.shape) == 1 and variable.name.endswith(\"bias:0\")\n\ndef _negative_log_likelihood(logits, labels, sequence_length):\n nll_num, nll_den, _ = cross_entropy_sequence_loss(\n logits, labels, sequence_length, average_in_time=True)\n return nll_num / nll_den\n\ndef max_margin_loss(true_logits,\n true_labels,\n true_sequence_length,\n negative_logits,\n negative_labels,\n negative_sequence_length,\n eta=0.1):\n \"\"\"Computes the max-margin loss described in\n https://www.aclweb.org/anthology/P19-1623.\n\n Args:\n true_logits: The unscaled probabilities from the true example.\n negative_logits: The unscaled probabilities from the negative example.\n true_labels: The true labels.\n true_sequence_length: The length of each true sequence.\n negative_labels: The negative labels.\n negative_sequence_length: The length of each negative sequence.\n eta: Ensure that the margin is higher than this value.\n\n Returns:\n The max-margin loss.\n \"\"\"\n true_nll = _negative_log_likelihood(\n true_logits, true_labels, true_sequence_length)\n negative_nll = _negative_log_likelihood(\n negative_logits, negative_labels, negative_sequence_length)\n margin = true_nll - negative_nll + eta\n return tf.maximum(margin, 0)\n", "path": "opennmt/utils/losses.py"}]}
| 2,432 | 309 |
gh_patches_debug_16296
|
rasdani/github-patches
|
git_diff
|
qtile__qtile-2233
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Copy entire config folder for qtile check
<!--
Please do not ask general questions here! There are [community
contact](https://github.com/qtile/qtile#community) options for that.
-->
# Issue description
My config is split across two files, `config.py` (main config) and `custom.py` (custom widgets), but `qtile check` only copies the `config.py` file to the `/tmp/tmp…/` folder; therefore, the `mypy` check fails because it cannot find the `custom` module. I can work around this by quickly copying the `custom.py` file to the folder, but this gets annoying.
This could be fixed by automatically copying the entire `~/.config/qtile` folder to the temporary folder, instead of just the `config.py`. It might also be necessary to remove the `__pycache__` folder if it is present.
<!--
A brief discussion of what failed and how it failed. A description of
what you tried is helpful, i.e. "When I use lazy.kill() on a window I get
the following stack trace" instead of "Closing windows doesn't work".
-->
# Qtile version
The latest version on the Archlinux repos, `v0.17.0-1` (not sure about exact commit hash, sadly).
<!--
Please include the exact commit hash of the version of Qtile that failed.
-->
# Configuration
https://github.com/MithicSpirit/dotfiles/tree/master/.config/qtile
<!--
Please include a link or attach your configuration to the issue.
-->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libqtile/scripts/check.py`
Content:
```
1 # Copyright (c) 2020, Tycho Andersen. All rights reserved.
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a copy
4 # of this software and associated documentation files (the "Software"), to deal
5 # in the Software without restriction, including without limitation the rights
6 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 # copies of the Software, and to permit persons to whom the Software is
8 # furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19 # SOFTWARE.
20
21 # Set the locale before any widgets or anything are imported, so any widget
22 # whose defaults depend on a reasonable locale sees something reasonable.
23 import shutil
24 import subprocess
25 import sys
26 import tempfile
27 from os import environ, getenv, path
28
29 from libqtile import confreader
30
31
32 def type_check_config_vars(tempdir, config_name):
33 if shutil.which("stubtest") is None:
34 print("stubtest not found, can't type check config file\n"
35 "install it and try again")
36 return
37
38 # write a .pyi file to tempdir:
39 f = open(path.join(tempdir, config_name+".pyi"), "w")
40 f.write(confreader.config_pyi_header)
41 for (name, type_) in confreader.Config.settings_keys:
42 f.write(name)
43 f.write(": ")
44 f.write(type_)
45 f.write("\n")
46 f.close()
47
48 # need to tell python to look in pwd for modules
49 newenv = environ.copy()
50 newenv["PYTHONPATH"] = newenv.get("PYTHONPATH", "") + ":"
51
52 p = subprocess.Popen(
53 ["stubtest", "--concise", config_name],
54 stdout=subprocess.PIPE,
55 stderr=subprocess.PIPE,
56 cwd=tempdir,
57 text=True,
58 env=newenv,
59 )
60 stdout, stderr = p.communicate()
61 missing_vars = []
62 for line in (stdout+stderr).split("\n"):
63 # filter out stuff that users didn't specify; they'll be imported from
64 # the default config
65 if "is not present at runtime" in line:
66 missing_vars.append(line.split()[0])
67
68 # write missing vars to a tempfile
69 whitelist = open(path.join(tempdir, "stubtest_whitelist"), "w")
70 for var in missing_vars:
71 whitelist.write(var)
72 whitelist.write("\n")
73 whitelist.close()
74
75 p = subprocess.Popen([
76 "stubtest",
77 # ignore variables that the user creates in their config that
78 # aren't in our default config list
79 "--ignore-missing-stub",
80 # use our whitelist to ignore stuff users didn't specify
81 "--whitelist", whitelist.name,
82 config_name,
83 ],
84 cwd=tempdir,
85 text=True,
86 env=newenv,
87 )
88 p.wait()
89 if p.returncode != 0:
90 sys.exit(1)
91
92
93 def type_check_config_args(config_file):
94 if shutil.which("mypy") is None:
95 print("mypy not found, can't type check config file"
96 "install it and try again")
97 return
98 try:
99 # we want to use Literal, which is in 3.8. If people have a mypy that
100 # is too old, they can upgrade; this is an optional check anyways.
101 subprocess.check_call(["mypy", "--python-version=3.8", config_file])
102 print("config file type checking succeeded")
103 except subprocess.CalledProcessError as e:
104 print("config file type checking failed: {}".format(e))
105 sys.exit(1)
106
107
108 def check_config(args):
109 print("checking qtile config file {}".format(args.configfile))
110
111 # need to do all the checking in a tempdir because we need to write stuff
112 # for stubtest
113 with tempfile.TemporaryDirectory() as tempdir:
114 tmp_path = path.join(tempdir, "config.py")
115 shutil.copy(args.configfile, tmp_path)
116
117 # are the top level config variables the right type?
118 type_check_config_vars(tempdir, "config")
119
120 # are arguments passed to qtile APIs correct?
121 type_check_config_args(tmp_path)
122
123 # can we load the config?
124 config = confreader.Config(args.configfile)
125 config.load()
126 config.validate()
127 print("config file can be loaded by qtile")
128
129
130 def add_subcommand(subparsers):
131 parser = subparsers.add_parser("check", help="Check a configuration file for errors")
132 parser.add_argument(
133 "-c", "--config",
134 action="store",
135 default=path.expanduser(path.join(
136 getenv('XDG_CONFIG_HOME', '~/.config'), 'qtile', 'config.py')),
137 dest="configfile",
138 help='Use the specified configuration file',
139 )
140 parser.set_defaults(func=check_config)
141
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/libqtile/scripts/check.py b/libqtile/scripts/check.py
--- a/libqtile/scripts/check.py
+++ b/libqtile/scripts/check.py
@@ -111,11 +111,12 @@
# need to do all the checking in a tempdir because we need to write stuff
# for stubtest
with tempfile.TemporaryDirectory() as tempdir:
- tmp_path = path.join(tempdir, "config.py")
- shutil.copy(args.configfile, tmp_path)
+ shutil.copytree(path.dirname(args.configfile), tempdir, dirs_exist_ok=True)
+ tmp_path = path.join(tempdir, path.basename(args.configfile))
# are the top level config variables the right type?
- type_check_config_vars(tempdir, "config")
+ module_name = path.splitext(path.basename(args.configfile))[0]
+ type_check_config_vars(tempdir, module_name)
# are arguments passed to qtile APIs correct?
type_check_config_args(tmp_path)
|
{"golden_diff": "diff --git a/libqtile/scripts/check.py b/libqtile/scripts/check.py\n--- a/libqtile/scripts/check.py\n+++ b/libqtile/scripts/check.py\n@@ -111,11 +111,12 @@\n # need to do all the checking in a tempdir because we need to write stuff\n # for stubtest\n with tempfile.TemporaryDirectory() as tempdir:\n- tmp_path = path.join(tempdir, \"config.py\")\n- shutil.copy(args.configfile, tmp_path)\n+ shutil.copytree(path.dirname(args.configfile), tempdir, dirs_exist_ok=True)\n+ tmp_path = path.join(tempdir, path.basename(args.configfile))\n \n # are the top level config variables the right type?\n- type_check_config_vars(tempdir, \"config\")\n+ module_name = path.splitext(path.basename(args.configfile))[0]\n+ type_check_config_vars(tempdir, module_name)\n \n # are arguments passed to qtile APIs correct?\n type_check_config_args(tmp_path)\n", "issue": "Copy entire config folder for qtile check\n<!--\r\nPlease do not ask general questions here! There are [community\r\ncontact](https://github.com/qtile/qtile#community) options for that.\r\n-->\r\n\r\n# Issue description\r\nMy config is split across two files, `config.py` (main config) and `custom.py` (custom widgets), but `qtile check` only copies the `config.py` file to the `/tmp/tmp\u2026/` folder; therefore, the `mypy` check fails because it cannot find the `custom` module. I can work around this by quickly copying the `custom.py` file to the folder, but this gets annoying.\r\n\r\nThis could be fixed by automatically copying the entire `~/.config/qtile` folder to the temporary folder, instead of just the `config.py`. It might also be necessary to remove the `__pycache__` folder if it is present.\r\n<!--\r\nA brief discussion of what failed and how it failed. A description of\r\nwhat you tried is helpful, i.e. \"When I use lazy.kill() on a window I get\r\nthe following stack trace\" instead of \"Closing windows doesn't work\".\r\n-->\r\n\r\n# Qtile version\r\nThe latest version on the Archlinux repos, `v0.17.0-1` (not sure about exact commit hash, sadly).\r\n<!--\r\nPlease include the exact commit hash of the version of Qtile that failed.\r\n-->\r\n\r\n# Configuration\r\nhttps://github.com/MithicSpirit/dotfiles/tree/master/.config/qtile\r\n<!--\r\nPlease include a link or attach your configuration to the issue.\r\n-->\r\n\n", "before_files": [{"content": "# Copyright (c) 2020, Tycho Andersen. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n# Set the locale before any widgets or anything are imported, so any widget\n# whose defaults depend on a reasonable locale sees something reasonable.\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\nfrom os import environ, getenv, path\n\nfrom libqtile import confreader\n\n\ndef type_check_config_vars(tempdir, config_name):\n if shutil.which(\"stubtest\") is None:\n print(\"stubtest not found, can't type check config file\\n\"\n \"install it and try again\")\n return\n\n # write a .pyi file to tempdir:\n f = open(path.join(tempdir, config_name+\".pyi\"), \"w\")\n f.write(confreader.config_pyi_header)\n for (name, type_) in confreader.Config.settings_keys:\n f.write(name)\n f.write(\": \")\n f.write(type_)\n f.write(\"\\n\")\n f.close()\n\n # need to tell python to look in pwd for modules\n newenv = environ.copy()\n newenv[\"PYTHONPATH\"] = newenv.get(\"PYTHONPATH\", \"\") + \":\"\n\n p = subprocess.Popen(\n [\"stubtest\", \"--concise\", config_name],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n cwd=tempdir,\n text=True,\n env=newenv,\n )\n stdout, stderr = p.communicate()\n missing_vars = []\n for line in (stdout+stderr).split(\"\\n\"):\n # filter out stuff that users didn't specify; they'll be imported from\n # the default config\n if \"is not present at runtime\" in line:\n missing_vars.append(line.split()[0])\n\n # write missing vars to a tempfile\n whitelist = open(path.join(tempdir, \"stubtest_whitelist\"), \"w\")\n for var in missing_vars:\n whitelist.write(var)\n whitelist.write(\"\\n\")\n whitelist.close()\n\n p = subprocess.Popen([\n \"stubtest\",\n # ignore variables that the user creates in their config that\n # aren't in our default config list\n \"--ignore-missing-stub\",\n # use our whitelist to ignore stuff users didn't specify\n \"--whitelist\", whitelist.name,\n config_name,\n ],\n cwd=tempdir,\n text=True,\n env=newenv,\n )\n p.wait()\n if p.returncode != 0:\n sys.exit(1)\n\n\ndef type_check_config_args(config_file):\n if shutil.which(\"mypy\") is None:\n print(\"mypy not found, can't type check config file\"\n \"install it and try again\")\n return\n try:\n # we want to use Literal, which is in 3.8. If people have a mypy that\n # is too old, they can upgrade; this is an optional check anyways.\n subprocess.check_call([\"mypy\", \"--python-version=3.8\", config_file])\n print(\"config file type checking succeeded\")\n except subprocess.CalledProcessError as e:\n print(\"config file type checking failed: {}\".format(e))\n sys.exit(1)\n\n\ndef check_config(args):\n print(\"checking qtile config file {}\".format(args.configfile))\n\n # need to do all the checking in a tempdir because we need to write stuff\n # for stubtest\n with tempfile.TemporaryDirectory() as tempdir:\n tmp_path = path.join(tempdir, \"config.py\")\n shutil.copy(args.configfile, tmp_path)\n\n # are the top level config variables the right type?\n type_check_config_vars(tempdir, \"config\")\n\n # are arguments passed to qtile APIs correct?\n type_check_config_args(tmp_path)\n\n # can we load the config?\n config = confreader.Config(args.configfile)\n config.load()\n config.validate()\n print(\"config file can be loaded by qtile\")\n\n\ndef add_subcommand(subparsers):\n parser = subparsers.add_parser(\"check\", help=\"Check a configuration file for errors\")\n parser.add_argument(\n \"-c\", \"--config\",\n action=\"store\",\n default=path.expanduser(path.join(\n getenv('XDG_CONFIG_HOME', '~/.config'), 'qtile', 'config.py')),\n dest=\"configfile\",\n help='Use the specified configuration file',\n )\n parser.set_defaults(func=check_config)\n", "path": "libqtile/scripts/check.py"}], "after_files": [{"content": "# Copyright (c) 2020, Tycho Andersen. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n# Set the locale before any widgets or anything are imported, so any widget\n# whose defaults depend on a reasonable locale sees something reasonable.\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\nfrom os import environ, getenv, path\n\nfrom libqtile import confreader\n\n\ndef type_check_config_vars(tempdir, config_name):\n if shutil.which(\"stubtest\") is None:\n print(\"stubtest not found, can't type check config file\\n\"\n \"install it and try again\")\n return\n\n # write a .pyi file to tempdir:\n f = open(path.join(tempdir, config_name+\".pyi\"), \"w\")\n f.write(confreader.config_pyi_header)\n for (name, type_) in confreader.Config.settings_keys:\n f.write(name)\n f.write(\": \")\n f.write(type_)\n f.write(\"\\n\")\n f.close()\n\n # need to tell python to look in pwd for modules\n newenv = environ.copy()\n newenv[\"PYTHONPATH\"] = newenv.get(\"PYTHONPATH\", \"\") + \":\"\n\n p = subprocess.Popen(\n [\"stubtest\", \"--concise\", config_name],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n cwd=tempdir,\n text=True,\n env=newenv,\n )\n stdout, stderr = p.communicate()\n missing_vars = []\n for line in (stdout+stderr).split(\"\\n\"):\n # filter out stuff that users didn't specify; they'll be imported from\n # the default config\n if \"is not present at runtime\" in line:\n missing_vars.append(line.split()[0])\n\n # write missing vars to a tempfile\n whitelist = open(path.join(tempdir, \"stubtest_whitelist\"), \"w\")\n for var in missing_vars:\n whitelist.write(var)\n whitelist.write(\"\\n\")\n whitelist.close()\n\n p = subprocess.Popen([\n \"stubtest\",\n # ignore variables that the user creates in their config that\n # aren't in our default config list\n \"--ignore-missing-stub\",\n # use our whitelist to ignore stuff users didn't specify\n \"--whitelist\", whitelist.name,\n config_name,\n ],\n cwd=tempdir,\n text=True,\n env=newenv,\n )\n p.wait()\n if p.returncode != 0:\n sys.exit(1)\n\n\ndef type_check_config_args(config_file):\n if shutil.which(\"mypy\") is None:\n print(\"mypy not found, can't type check config file\"\n \"install it and try again\")\n return\n try:\n # we want to use Literal, which is in 3.8. If people have a mypy that\n # is too old, they can upgrade; this is an optional check anyways.\n subprocess.check_call([\"mypy\", \"--python-version=3.8\", config_file])\n print(\"config file type checking succeeded\")\n except subprocess.CalledProcessError as e:\n print(\"config file type checking failed: {}\".format(e))\n sys.exit(1)\n\n\ndef check_config(args):\n print(\"checking qtile config file {}\".format(args.configfile))\n\n # need to do all the checking in a tempdir because we need to write stuff\n # for stubtest\n with tempfile.TemporaryDirectory() as tempdir:\n shutil.copytree(path.dirname(args.configfile), tempdir, dirs_exist_ok=True)\n tmp_path = path.join(tempdir, path.basename(args.configfile))\n\n # are the top level config variables the right type?\n module_name = path.splitext(path.basename(args.configfile))[0]\n type_check_config_vars(tempdir, module_name)\n\n # are arguments passed to qtile APIs correct?\n type_check_config_args(tmp_path)\n\n # can we load the config?\n config = confreader.Config(args.configfile)\n config.load()\n config.validate()\n print(\"config file can be loaded by qtile\")\n\n\ndef add_subcommand(subparsers):\n parser = subparsers.add_parser(\"check\", help=\"Check a configuration file for errors\")\n parser.add_argument(\n \"-c\", \"--config\",\n action=\"store\",\n default=path.expanduser(path.join(\n getenv('XDG_CONFIG_HOME', '~/.config'), 'qtile', 'config.py')),\n dest=\"configfile\",\n help='Use the specified configuration file',\n )\n parser.set_defaults(func=check_config)\n", "path": "libqtile/scripts/check.py"}]}
| 2,062 | 220 |
gh_patches_debug_4366
|
rasdani/github-patches
|
git_diff
|
ephios-dev__ephios-1078
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Notification Mails detected as SPAM
**Describe the bug**
This is likely specific to the exact email setup users have but in my case, the notification mails in my case are caught by the SpamAssassin rule `TO_NO_BRKTS_HTML_IMG` which is discussed [here](https://stackoverflow.com/questions/40082594/meaning-of-to-no-brkts-html-img-in-spamassassin). As a fix for this, it would be useful if ephios used the configured first and last name to properly address the users in the `To:` field of the email.
**To Reproduce**
Make ephios send a notification email.
**Expected behavior**
Emails should arrive in my inbox without being detected as SPAM. This could be done by modifying the `To:` field.
**Screenshots**
\-
**Environment**
An email SPAM filter using SpamAssassin
**Additional context**
\-
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ephios/core/services/notifications/backends.py`
Content:
```
1 import logging
2 import smtplib
3 import traceback
4
5 from django.conf import settings
6 from django.core.mail import mail_admins
7 from django.utils.translation import gettext_lazy as _
8 from webpush import send_user_notification
9
10 from ephios.core.models.users import Notification
11 from ephios.core.services.mail.send import send_mail
12
13 logger = logging.getLogger(__name__)
14
15
16 def installed_notification_backends():
17 from ephios.core.signals import register_notification_backends
18
19 for _, backends in register_notification_backends.send_to_all_plugins(None):
20 yield from (b() for b in backends)
21
22
23 def enabled_notification_backends():
24 from ephios.core.signals import register_notification_backends
25
26 for _, backends in register_notification_backends.send(None):
27 yield from (b() for b in backends)
28
29
30 def send_all_notifications():
31 for backend in installed_notification_backends():
32 for notification in Notification.objects.filter(failed=False):
33 if backend.can_send(notification) and backend.user_prefers_sending(notification):
34 try:
35 backend.send(notification)
36 except Exception as e: # pylint: disable=broad-except
37 if settings.DEBUG:
38 raise e
39 notification.failed = True
40 notification.save()
41 try:
42 mail_admins(
43 "Notification sending failed",
44 f"Notification: {notification}\nException: {e}\n{traceback.format_exc()}",
45 )
46 except smtplib.SMTPConnectError:
47 pass # if the mail backend threw this, mail admin will probably throw this as well
48 logger.warning(
49 f"Notification sending failed for notification object #{notification.pk} ({notification}) for backend {backend} with {e}"
50 )
51 Notification.objects.filter(failed=False).delete()
52
53
54 class AbstractNotificationBackend:
55 @property
56 def slug(self):
57 return NotImplementedError
58
59 @property
60 def title(self):
61 return NotImplementedError
62
63 @classmethod
64 def can_send(cls, notification):
65 return notification.user is not None
66
67 @classmethod
68 def user_prefers_sending(cls, notification):
69 if notification.notification_type.unsubscribe_allowed and notification.user is not None:
70 if not notification.user.is_active:
71 return False
72 backends = notification.user.preferences["notifications__notifications"].get(
73 notification.slug
74 )
75 if backends is not None:
76 return cls.slug in backends
77 return True
78
79 @classmethod
80 def send(cls, notification: Notification):
81 raise NotImplementedError
82
83
84 class EmailNotificationBackend(AbstractNotificationBackend):
85 slug = "ephios_backend_email"
86 title = _("via email")
87
88 @classmethod
89 def can_send(cls, notification):
90 return notification.user is not None or "email" in notification.data
91
92 @classmethod
93 def _get_mailaddress(cls, notification):
94 return notification.user.email if notification.user else notification.data.get("email")
95
96 @classmethod
97 def send(cls, notification):
98 send_mail(
99 to=[cls._get_mailaddress(notification)],
100 subject=notification.subject,
101 plaintext=notification.as_plaintext(),
102 html=notification.as_html(),
103 is_autogenerated=True,
104 )
105
106
107 class WebPushNotificationBackend(AbstractNotificationBackend):
108 slug = "ephios_backend_webpush"
109 title = _("via push notification")
110
111 @classmethod
112 def send(cls, notification):
113 payload = {
114 "head": str(notification.subject),
115 "body": notification.body,
116 "icon": "/static/ephios/img/ephios-symbol-red.svg",
117 }
118 if actions := notification.get_actions():
119 payload["url"] = actions[0][1]
120 send_user_notification(user=notification.user, payload=payload, ttl=1000)
121
122
123 CORE_NOTIFICATION_BACKENDS = [EmailNotificationBackend, WebPushNotificationBackend]
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ephios/core/services/notifications/backends.py b/ephios/core/services/notifications/backends.py
--- a/ephios/core/services/notifications/backends.py
+++ b/ephios/core/services/notifications/backends.py
@@ -91,7 +91,9 @@
@classmethod
def _get_mailaddress(cls, notification):
- return notification.user.email if notification.user else notification.data.get("email")
+ if notification.user:
+ return f"{notification.user.get_full_name()} <{notification.user.email}>"
+ return notification.data.get("email")
@classmethod
def send(cls, notification):
|
{"golden_diff": "diff --git a/ephios/core/services/notifications/backends.py b/ephios/core/services/notifications/backends.py\n--- a/ephios/core/services/notifications/backends.py\n+++ b/ephios/core/services/notifications/backends.py\n@@ -91,7 +91,9 @@\n \n @classmethod\n def _get_mailaddress(cls, notification):\n- return notification.user.email if notification.user else notification.data.get(\"email\")\n+ if notification.user:\n+ return f\"{notification.user.get_full_name()} <{notification.user.email}>\"\n+ return notification.data.get(\"email\")\n \n @classmethod\n def send(cls, notification):\n", "issue": "Notification Mails detected as SPAM\n**Describe the bug**\r\nThis is likely specific to the exact email setup users have but in my case, the notification mails in my case are caught by the SpamAssassin rule `TO_NO_BRKTS_HTML_IMG` which is discussed [here](https://stackoverflow.com/questions/40082594/meaning-of-to-no-brkts-html-img-in-spamassassin). As a fix for this, it would be useful if ephios used the configured first and last name to properly address the users in the `To:` field of the email. \r\n\r\n**To Reproduce**\r\nMake ephios send a notification email.\r\n\r\n**Expected behavior**\r\nEmails should arrive in my inbox without being detected as SPAM. This could be done by modifying the `To:` field. \r\n\r\n**Screenshots**\r\n\\-\r\n\r\n**Environment**\r\nAn email SPAM filter using SpamAssassin\r\n\r\n**Additional context**\r\n\\-\r\n\n", "before_files": [{"content": "import logging\nimport smtplib\nimport traceback\n\nfrom django.conf import settings\nfrom django.core.mail import mail_admins\nfrom django.utils.translation import gettext_lazy as _\nfrom webpush import send_user_notification\n\nfrom ephios.core.models.users import Notification\nfrom ephios.core.services.mail.send import send_mail\n\nlogger = logging.getLogger(__name__)\n\n\ndef installed_notification_backends():\n from ephios.core.signals import register_notification_backends\n\n for _, backends in register_notification_backends.send_to_all_plugins(None):\n yield from (b() for b in backends)\n\n\ndef enabled_notification_backends():\n from ephios.core.signals import register_notification_backends\n\n for _, backends in register_notification_backends.send(None):\n yield from (b() for b in backends)\n\n\ndef send_all_notifications():\n for backend in installed_notification_backends():\n for notification in Notification.objects.filter(failed=False):\n if backend.can_send(notification) and backend.user_prefers_sending(notification):\n try:\n backend.send(notification)\n except Exception as e: # pylint: disable=broad-except\n if settings.DEBUG:\n raise e\n notification.failed = True\n notification.save()\n try:\n mail_admins(\n \"Notification sending failed\",\n f\"Notification: {notification}\\nException: {e}\\n{traceback.format_exc()}\",\n )\n except smtplib.SMTPConnectError:\n pass # if the mail backend threw this, mail admin will probably throw this as well\n logger.warning(\n f\"Notification sending failed for notification object #{notification.pk} ({notification}) for backend {backend} with {e}\"\n )\n Notification.objects.filter(failed=False).delete()\n\n\nclass AbstractNotificationBackend:\n @property\n def slug(self):\n return NotImplementedError\n\n @property\n def title(self):\n return NotImplementedError\n\n @classmethod\n def can_send(cls, notification):\n return notification.user is not None\n\n @classmethod\n def user_prefers_sending(cls, notification):\n if notification.notification_type.unsubscribe_allowed and notification.user is not None:\n if not notification.user.is_active:\n return False\n backends = notification.user.preferences[\"notifications__notifications\"].get(\n notification.slug\n )\n if backends is not None:\n return cls.slug in backends\n return True\n\n @classmethod\n def send(cls, notification: Notification):\n raise NotImplementedError\n\n\nclass EmailNotificationBackend(AbstractNotificationBackend):\n slug = \"ephios_backend_email\"\n title = _(\"via email\")\n\n @classmethod\n def can_send(cls, notification):\n return notification.user is not None or \"email\" in notification.data\n\n @classmethod\n def _get_mailaddress(cls, notification):\n return notification.user.email if notification.user else notification.data.get(\"email\")\n\n @classmethod\n def send(cls, notification):\n send_mail(\n to=[cls._get_mailaddress(notification)],\n subject=notification.subject,\n plaintext=notification.as_plaintext(),\n html=notification.as_html(),\n is_autogenerated=True,\n )\n\n\nclass WebPushNotificationBackend(AbstractNotificationBackend):\n slug = \"ephios_backend_webpush\"\n title = _(\"via push notification\")\n\n @classmethod\n def send(cls, notification):\n payload = {\n \"head\": str(notification.subject),\n \"body\": notification.body,\n \"icon\": \"/static/ephios/img/ephios-symbol-red.svg\",\n }\n if actions := notification.get_actions():\n payload[\"url\"] = actions[0][1]\n send_user_notification(user=notification.user, payload=payload, ttl=1000)\n\n\nCORE_NOTIFICATION_BACKENDS = [EmailNotificationBackend, WebPushNotificationBackend]\n", "path": "ephios/core/services/notifications/backends.py"}], "after_files": [{"content": "import logging\nimport smtplib\nimport traceback\n\nfrom django.conf import settings\nfrom django.core.mail import mail_admins\nfrom django.utils.translation import gettext_lazy as _\nfrom webpush import send_user_notification\n\nfrom ephios.core.models.users import Notification\nfrom ephios.core.services.mail.send import send_mail\n\nlogger = logging.getLogger(__name__)\n\n\ndef installed_notification_backends():\n from ephios.core.signals import register_notification_backends\n\n for _, backends in register_notification_backends.send_to_all_plugins(None):\n yield from (b() for b in backends)\n\n\ndef enabled_notification_backends():\n from ephios.core.signals import register_notification_backends\n\n for _, backends in register_notification_backends.send(None):\n yield from (b() for b in backends)\n\n\ndef send_all_notifications():\n for backend in installed_notification_backends():\n for notification in Notification.objects.filter(failed=False):\n if backend.can_send(notification) and backend.user_prefers_sending(notification):\n try:\n backend.send(notification)\n except Exception as e: # pylint: disable=broad-except\n if settings.DEBUG:\n raise e\n notification.failed = True\n notification.save()\n try:\n mail_admins(\n \"Notification sending failed\",\n f\"Notification: {notification}\\nException: {e}\\n{traceback.format_exc()}\",\n )\n except smtplib.SMTPConnectError:\n pass # if the mail backend threw this, mail admin will probably throw this as well\n logger.warning(\n f\"Notification sending failed for notification object #{notification.pk} ({notification}) for backend {backend} with {e}\"\n )\n Notification.objects.filter(failed=False).delete()\n\n\nclass AbstractNotificationBackend:\n @property\n def slug(self):\n return NotImplementedError\n\n @property\n def title(self):\n return NotImplementedError\n\n @classmethod\n def can_send(cls, notification):\n return notification.user is not None\n\n @classmethod\n def user_prefers_sending(cls, notification):\n if notification.notification_type.unsubscribe_allowed and notification.user is not None:\n if not notification.user.is_active:\n return False\n backends = notification.user.preferences[\"notifications__notifications\"].get(\n notification.slug\n )\n if backends is not None:\n return cls.slug in backends\n return True\n\n @classmethod\n def send(cls, notification: Notification):\n raise NotImplementedError\n\n\nclass EmailNotificationBackend(AbstractNotificationBackend):\n slug = \"ephios_backend_email\"\n title = _(\"via email\")\n\n @classmethod\n def can_send(cls, notification):\n return notification.user is not None or \"email\" in notification.data\n\n @classmethod\n def _get_mailaddress(cls, notification):\n if notification.user:\n return f\"{notification.user.get_full_name()} <{notification.user.email}>\"\n return notification.data.get(\"email\")\n\n @classmethod\n def send(cls, notification):\n send_mail(\n to=[cls._get_mailaddress(notification)],\n subject=notification.subject,\n plaintext=notification.as_plaintext(),\n html=notification.as_html(),\n is_autogenerated=True,\n )\n\n\nclass WebPushNotificationBackend(AbstractNotificationBackend):\n slug = \"ephios_backend_webpush\"\n title = _(\"via push notification\")\n\n @classmethod\n def send(cls, notification):\n payload = {\n \"head\": str(notification.subject),\n \"body\": notification.body,\n \"icon\": \"/static/ephios/img/ephios-symbol-red.svg\",\n }\n if actions := notification.get_actions():\n payload[\"url\"] = actions[0][1]\n send_user_notification(user=notification.user, payload=payload, ttl=1000)\n\n\nCORE_NOTIFICATION_BACKENDS = [EmailNotificationBackend, WebPushNotificationBackend]\n", "path": "ephios/core/services/notifications/backends.py"}]}
| 1,520 | 140 |
gh_patches_debug_9171
|
rasdani/github-patches
|
git_diff
|
nilearn__nilearn-4267
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Adding tests for rapid inspection of visual elements
**What would you like changed/added and why?**
Quoting @jeromedockes here, from https://github.com/nilearn/nilearn/pull/2191#issuecomment-546278888
Since we are adding reports etc. we should add more meaningful inspection of generated plots and reports in the tests
**What would be the benefit? Does the change make something easier to use?**
I think CircleCI does all this but it takes way too long. Maybe a faster simpler test to quickly generate plots and reports to visually examine will be useful?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nilearn/reporting/__init__.py`
Content:
```
1 """Reporting code for nilearn.
2
3 This module implements plotting functions useful to report analysis results.
4
5 Author: Martin Perez-Guevara, Elvis Dohmatob, 2017
6 """
7
8 from nilearn.reporting.get_clusters_table import get_clusters_table
9 from nilearn.reporting.glm_reporter import make_glm_report
10 from nilearn.reporting.html_report import HTMLReport
11
12 __all__ = [
13 "HTMLReport",
14 "get_clusters_table",
15 "make_glm_report",
16 ]
17
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nilearn/reporting/__init__.py b/nilearn/reporting/__init__.py
--- a/nilearn/reporting/__init__.py
+++ b/nilearn/reporting/__init__.py
@@ -1,10 +1,10 @@
"""Reporting code for nilearn.
This module implements plotting functions useful to report analysis results.
-
-Author: Martin Perez-Guevara, Elvis Dohmatob, 2017
"""
+# Author: Martin Perez-Guevara, Elvis Dohmatob, 2017
+
from nilearn.reporting.get_clusters_table import get_clusters_table
from nilearn.reporting.glm_reporter import make_glm_report
from nilearn.reporting.html_report import HTMLReport
|
{"golden_diff": "diff --git a/nilearn/reporting/__init__.py b/nilearn/reporting/__init__.py\n--- a/nilearn/reporting/__init__.py\n+++ b/nilearn/reporting/__init__.py\n@@ -1,10 +1,10 @@\n \"\"\"Reporting code for nilearn.\n \n This module implements plotting functions useful to report analysis results.\n-\n-Author: Martin Perez-Guevara, Elvis Dohmatob, 2017\n \"\"\"\n \n+# Author: Martin Perez-Guevara, Elvis Dohmatob, 2017\n+\n from nilearn.reporting.get_clusters_table import get_clusters_table\n from nilearn.reporting.glm_reporter import make_glm_report\n from nilearn.reporting.html_report import HTMLReport\n", "issue": "Adding tests for rapid inspection of visual elements\n**What would you like changed/added and why?**\r\n\r\nQuoting @jeromedockes here, from https://github.com/nilearn/nilearn/pull/2191#issuecomment-546278888\r\nSince we are adding reports etc. we should add more meaningful inspection of generated plots and reports in the tests\r\n\r\n**What would be the benefit? Does the change make something easier to use?**\r\n\r\n I think CircleCI does all this but it takes way too long. Maybe a faster simpler test to quickly generate plots and reports to visually examine will be useful?\n", "before_files": [{"content": "\"\"\"Reporting code for nilearn.\n\nThis module implements plotting functions useful to report analysis results.\n\nAuthor: Martin Perez-Guevara, Elvis Dohmatob, 2017\n\"\"\"\n\nfrom nilearn.reporting.get_clusters_table import get_clusters_table\nfrom nilearn.reporting.glm_reporter import make_glm_report\nfrom nilearn.reporting.html_report import HTMLReport\n\n__all__ = [\n \"HTMLReport\",\n \"get_clusters_table\",\n \"make_glm_report\",\n]\n", "path": "nilearn/reporting/__init__.py"}], "after_files": [{"content": "\"\"\"Reporting code for nilearn.\n\nThis module implements plotting functions useful to report analysis results.\n\"\"\"\n\n# Author: Martin Perez-Guevara, Elvis Dohmatob, 2017\n\nfrom nilearn.reporting.get_clusters_table import get_clusters_table\nfrom nilearn.reporting.glm_reporter import make_glm_report\nfrom nilearn.reporting.html_report import HTMLReport\n\n__all__ = [\n \"HTMLReport\",\n \"get_clusters_table\",\n \"make_glm_report\",\n]\n", "path": "nilearn/reporting/__init__.py"}]}
| 531 | 165 |
gh_patches_debug_64400
|
rasdani/github-patches
|
git_diff
|
pytorch__pytorch-4788
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
unsupported Python type error from set_default_tensor_type()
Hi,
I've updated pytorch to the lastest master (`a54407a7`) from github, but got an error when I use `torch.set_default_tensor_type("torch.cuda.FloatTensor")`:
```
terminate called after throwing an instance of 'std::invalid_argument'
what(): unsupported Python type
Aborted (core dumped)
```
What to do for workaround? Thanks!
Jin
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torch/__init__.py`
Content:
```
1 """
2 The torch package contains data structures for multi-dimensional
3 tensors and mathematical operations over these are defined.
4 Additionally, it provides many utilities for efficient serializing of
5 Tensors and arbitrary types, and other useful utilities.
6
7 It has a CUDA counterpart, that enables you to run your tensor computations
8 on an NVIDIA GPU with compute capability >= 3.0.
9 """
10
11 import sys
12 import platform
13 from ._utils import _import_dotted_name
14 from .version import __version__
15
16 __all__ = [
17 'typename', 'is_tensor', 'is_storage', 'set_default_tensor_type',
18 'set_rng_state', 'get_rng_state', 'manual_seed', 'initial_seed',
19 'save', 'load', 'set_printoptions', 'chunk', 'split', 'stack', 'matmul',
20 'no_grad', 'enable_grad',
21 'DoubleStorage', 'FloatStorage', 'LongStorage', 'IntStorage',
22 'ShortStorage', 'CharStorage', 'ByteStorage',
23 'DoubleTensor', 'FloatTensor', 'LongTensor', 'IntTensor',
24 'ShortTensor', 'CharTensor', 'ByteTensor',
25 ]
26
27 ################################################################################
28 # Load the extension module
29 ################################################################################
30
31 # Loading the extension with RTLD_GLOBAL option allows to not link extension
32 # modules against the _C shared object. Their missing THP symbols will be
33 # automatically filled by the dynamic loader.
34 import os as _dl_flags
35
36 # if we have numpy, it *must* be imported before the call to setdlopenflags()
37 # or there is risk that later c modules will segfault when importing numpy
38 try:
39 import numpy as _np
40 except ImportError:
41 pass
42
43 if platform.system() == 'Windows':
44 # first get nvToolsExt PATH
45 def get_nvToolsExt_path():
46 NVTOOLEXT_HOME = _dl_flags.getenv('NVTOOLSEXT_PATH', 'C:\\Program Files\\NVIDIA Corporation\\NvToolsExt')
47
48 if _dl_flags.path.exists(NVTOOLEXT_HOME):
49 return NVTOOLEXT_HOME + '\\bin\\x64\\'
50 else:
51 return ''
52
53 # then add the path to env
54 _dl_flags.environ['PATH'] = _dl_flags.path.dirname(
55 __file__) + '\\lib\\;' + get_nvToolsExt_path() + ';' + _dl_flags.environ['PATH']
56
57 else:
58 # first check if the os package has the required flags
59 if not hasattr(_dl_flags, 'RTLD_GLOBAL') or not hasattr(_dl_flags, 'RTLD_LAZY'):
60 try:
61 # next try if DLFCN exists
62 import DLFCN as _dl_flags
63 except ImportError:
64 # as a last attempt, use compile-time constants
65 import torch._dl as _dl_flags
66
67 old_flags = sys.getdlopenflags()
68 sys.setdlopenflags(_dl_flags.RTLD_GLOBAL | _dl_flags.RTLD_LAZY)
69
70 del _dl_flags
71
72 try:
73 import torch._nvrtc
74 except ImportError:
75 pass
76
77 from torch._C import *
78
79 __all__ += [name for name in dir(_C)
80 if name[0] != '_' and
81 not name.endswith('Base')]
82
83 if platform.system() != 'Windows':
84 sys.setdlopenflags(old_flags)
85 del old_flags
86
87 ################################################################################
88 # Define basic utilities
89 ################################################################################
90
91
92 def typename(o):
93 module = ''
94 class_name = ''
95 if hasattr(o, '__module__') and o.__module__ != 'builtins' \
96 and o.__module__ != '__builtin__' and o.__module__ is not None:
97 module = o.__module__ + '.'
98
99 if hasattr(o, '__qualname__'):
100 class_name = o.__qualname__
101 elif hasattr(o, '__name__'):
102 class_name = o.__name__
103 else:
104 class_name = o.__class__.__name__
105
106 return module + class_name
107
108
109 def is_tensor(obj):
110 r"""Returns True if `obj` is a pytorch tensor.
111
112 Args:
113 obj (Object): Object to test
114 """
115 return type(obj) in _tensor_classes
116
117
118 def is_storage(obj):
119 r"""Returns True if `obj` is a pytorch storage object.
120
121 Args:
122 obj (Object): Object to test
123 """
124 return type(obj) in _storage_classes
125
126
127 def set_default_tensor_type(t):
128 global Tensor
129 global Storage
130 Tensor = _import_dotted_name(t)
131 Storage = _import_dotted_name(t.replace('Tensor', 'Storage'))
132 _C._set_default_tensor_type(Tensor)
133
134
135 from .random import set_rng_state, get_rng_state, manual_seed, initial_seed
136 from .serialization import save, load
137 from ._tensor_str import set_printoptions
138
139 ################################################################################
140 # Define Storage and Tensor classes
141 ################################################################################
142
143 from .storage import _StorageBase
144 from .tensor import _TensorBase
145
146
147 class DoubleStorage(_C.DoubleStorageBase, _StorageBase):
148 pass
149
150
151 class FloatStorage(_C.FloatStorageBase, _StorageBase):
152 pass
153
154
155 class HalfStorage(_C.HalfStorageBase, _StorageBase):
156 pass
157
158
159 class LongStorage(_C.LongStorageBase, _StorageBase):
160 pass
161
162
163 class IntStorage(_C.IntStorageBase, _StorageBase):
164 pass
165
166
167 class ShortStorage(_C.ShortStorageBase, _StorageBase):
168 pass
169
170
171 class CharStorage(_C.CharStorageBase, _StorageBase):
172 pass
173
174
175 class ByteStorage(_C.ByteStorageBase, _StorageBase):
176 pass
177
178
179 class DoubleTensor(_C.DoubleTensorBase, _TensorBase):
180
181 def is_signed(self):
182 return True
183
184 @classmethod
185 def storage_type(cls):
186 return DoubleStorage
187
188
189 class FloatTensor(_C.FloatTensorBase, _TensorBase):
190
191 def is_signed(self):
192 return True
193
194 @classmethod
195 def storage_type(cls):
196 return FloatStorage
197
198
199 class HalfTensor(_C.HalfTensorBase, _TensorBase):
200
201 def is_signed(self):
202 return True
203
204 @classmethod
205 def storage_type(cls):
206 return HalfStorage
207
208
209 class LongTensor(_C.LongTensorBase, _TensorBase):
210
211 def is_signed(self):
212 return True
213
214 @classmethod
215 def storage_type(cls):
216 return LongStorage
217
218
219 class IntTensor(_C.IntTensorBase, _TensorBase):
220
221 def is_signed(self):
222 return True
223
224 @classmethod
225 def storage_type(cls):
226 return IntStorage
227
228
229 class ShortTensor(_C.ShortTensorBase, _TensorBase):
230
231 def is_signed(self):
232 return True
233
234 @classmethod
235 def storage_type(cls):
236 return ShortStorage
237
238
239 class CharTensor(_C.CharTensorBase, _TensorBase):
240
241 def is_signed(self):
242 # TODO
243 return False
244
245 @classmethod
246 def storage_type(cls):
247 return CharStorage
248
249
250 class ByteTensor(_C.ByteTensorBase, _TensorBase):
251
252 def is_signed(self):
253 return False
254
255 @classmethod
256 def storage_type(cls):
257 return ByteStorage
258
259
260 _storage_classes = {
261 DoubleStorage, FloatStorage, LongStorage, IntStorage, ShortStorage,
262 CharStorage, ByteStorage, HalfStorage
263 }
264
265 _tensor_classes = {
266 DoubleTensor, FloatTensor, LongTensor, IntTensor, ShortTensor,
267 CharTensor, ByteTensor, HalfTensor
268 }
269
270 _integer_tensor_classes = {
271 LongTensor, IntTensor, ShortTensor, CharTensor, ByteTensor
272 }
273
274
275 ################################################################################
276 # Import interface functions defined in Python
277 ################################################################################
278
279 from .functional import *
280
281
282 ################################################################################
283 # Initialize extension
284 ################################################################################
285
286 def manager_path():
287 if platform.system() == 'Windows':
288 return b""
289 import os
290 path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'lib', 'torch_shm_manager')
291 if not os.path.exists(path):
292 raise RuntimeError("Unable to find torch_shm_manager at " + path)
293 return path.encode('utf-8')
294
295
296 # Shared memory manager needs to know the exact location of manager executable
297 _C._initExtension(manager_path())
298 del manager_path
299
300 set_default_tensor_type('torch.FloatTensor')
301
302 ################################################################################
303 # Remove unnecessary members
304 ################################################################################
305
306 del DoubleStorageBase
307 del FloatStorageBase
308 del LongStorageBase
309 del IntStorageBase
310 del ShortStorageBase
311 del CharStorageBase
312 del ByteStorageBase
313 del DoubleTensorBase
314 del FloatTensorBase
315 del LongTensorBase
316 del IntTensorBase
317 del ShortTensorBase
318 del CharTensorBase
319 del ByteTensorBase
320
321 del SparseDoubleTensorBase
322 del SparseFloatTensorBase
323 del SparseLongTensorBase
324 del SparseIntTensorBase
325 del SparseShortTensorBase
326 del SparseCharTensorBase
327 del SparseByteTensorBase
328
329 ################################################################################
330 # Import most common subpackages
331 ################################################################################
332
333 import torch.cuda
334 import torch.autograd
335 import torch.nn
336 import torch.optim
337 import torch.multiprocessing
338 import torch.sparse
339 import torch.utils.backcompat
340 import torch.onnx
341 import torch.random
342 import torch.distributions
343 import torch.testing
344 from torch.autograd import no_grad, enable_grad
345
346 _C._init_names(list(torch._tensor_classes) + list(torch._storage_classes))
347
348 # attach docstrings to torch and tensor functions
349 from . import _torch_docs, _tensor_docs, _storage_docs
350 del _torch_docs, _tensor_docs, _storage_docs
351
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/torch/__init__.py b/torch/__init__.py
--- a/torch/__init__.py
+++ b/torch/__init__.py
@@ -129,6 +129,11 @@
global Storage
Tensor = _import_dotted_name(t)
Storage = _import_dotted_name(t.replace('Tensor', 'Storage'))
+
+ if 'cuda' in t:
+ import torch.cuda
+ torch.cuda.init()
+
_C._set_default_tensor_type(Tensor)
|
{"golden_diff": "diff --git a/torch/__init__.py b/torch/__init__.py\n--- a/torch/__init__.py\n+++ b/torch/__init__.py\n@@ -129,6 +129,11 @@\n global Storage\n Tensor = _import_dotted_name(t)\n Storage = _import_dotted_name(t.replace('Tensor', 'Storage'))\n+\n+ if 'cuda' in t:\n+ import torch.cuda\n+ torch.cuda.init()\n+\n _C._set_default_tensor_type(Tensor)\n", "issue": "unsupported Python type error from set_default_tensor_type()\nHi,\r\nI've updated pytorch to the lastest master (`a54407a7`) from github, but got an error when I use `torch.set_default_tensor_type(\"torch.cuda.FloatTensor\")`:\r\n```\r\nterminate called after throwing an instance of 'std::invalid_argument'\r\n what(): unsupported Python type\r\nAborted (core dumped)\r\n```\r\n\r\nWhat to do for workaround? Thanks!\r\nJin\n", "before_files": [{"content": "\"\"\"\nThe torch package contains data structures for multi-dimensional\ntensors and mathematical operations over these are defined.\nAdditionally, it provides many utilities for efficient serializing of\nTensors and arbitrary types, and other useful utilities.\n\nIt has a CUDA counterpart, that enables you to run your tensor computations\non an NVIDIA GPU with compute capability >= 3.0.\n\"\"\"\n\nimport sys\nimport platform\nfrom ._utils import _import_dotted_name\nfrom .version import __version__\n\n__all__ = [\n 'typename', 'is_tensor', 'is_storage', 'set_default_tensor_type',\n 'set_rng_state', 'get_rng_state', 'manual_seed', 'initial_seed',\n 'save', 'load', 'set_printoptions', 'chunk', 'split', 'stack', 'matmul',\n 'no_grad', 'enable_grad',\n 'DoubleStorage', 'FloatStorage', 'LongStorage', 'IntStorage',\n 'ShortStorage', 'CharStorage', 'ByteStorage',\n 'DoubleTensor', 'FloatTensor', 'LongTensor', 'IntTensor',\n 'ShortTensor', 'CharTensor', 'ByteTensor',\n]\n\n################################################################################\n# Load the extension module\n################################################################################\n\n# Loading the extension with RTLD_GLOBAL option allows to not link extension\n# modules against the _C shared object. Their missing THP symbols will be\n# automatically filled by the dynamic loader.\nimport os as _dl_flags\n\n# if we have numpy, it *must* be imported before the call to setdlopenflags()\n# or there is risk that later c modules will segfault when importing numpy\ntry:\n import numpy as _np\nexcept ImportError:\n pass\n\nif platform.system() == 'Windows':\n # first get nvToolsExt PATH\n def get_nvToolsExt_path():\n NVTOOLEXT_HOME = _dl_flags.getenv('NVTOOLSEXT_PATH', 'C:\\\\Program Files\\\\NVIDIA Corporation\\\\NvToolsExt')\n\n if _dl_flags.path.exists(NVTOOLEXT_HOME):\n return NVTOOLEXT_HOME + '\\\\bin\\\\x64\\\\'\n else:\n return ''\n\n # then add the path to env\n _dl_flags.environ['PATH'] = _dl_flags.path.dirname(\n __file__) + '\\\\lib\\\\;' + get_nvToolsExt_path() + ';' + _dl_flags.environ['PATH']\n\nelse:\n # first check if the os package has the required flags\n if not hasattr(_dl_flags, 'RTLD_GLOBAL') or not hasattr(_dl_flags, 'RTLD_LAZY'):\n try:\n # next try if DLFCN exists\n import DLFCN as _dl_flags\n except ImportError:\n # as a last attempt, use compile-time constants\n import torch._dl as _dl_flags\n\n old_flags = sys.getdlopenflags()\n sys.setdlopenflags(_dl_flags.RTLD_GLOBAL | _dl_flags.RTLD_LAZY)\n\ndel _dl_flags\n\ntry:\n import torch._nvrtc\nexcept ImportError:\n pass\n\nfrom torch._C import *\n\n__all__ += [name for name in dir(_C)\n if name[0] != '_' and\n not name.endswith('Base')]\n\nif platform.system() != 'Windows':\n sys.setdlopenflags(old_flags)\n del old_flags\n\n################################################################################\n# Define basic utilities\n################################################################################\n\n\ndef typename(o):\n module = ''\n class_name = ''\n if hasattr(o, '__module__') and o.__module__ != 'builtins' \\\n and o.__module__ != '__builtin__' and o.__module__ is not None:\n module = o.__module__ + '.'\n\n if hasattr(o, '__qualname__'):\n class_name = o.__qualname__\n elif hasattr(o, '__name__'):\n class_name = o.__name__\n else:\n class_name = o.__class__.__name__\n\n return module + class_name\n\n\ndef is_tensor(obj):\n r\"\"\"Returns True if `obj` is a pytorch tensor.\n\n Args:\n obj (Object): Object to test\n \"\"\"\n return type(obj) in _tensor_classes\n\n\ndef is_storage(obj):\n r\"\"\"Returns True if `obj` is a pytorch storage object.\n\n Args:\n obj (Object): Object to test\n \"\"\"\n return type(obj) in _storage_classes\n\n\ndef set_default_tensor_type(t):\n global Tensor\n global Storage\n Tensor = _import_dotted_name(t)\n Storage = _import_dotted_name(t.replace('Tensor', 'Storage'))\n _C._set_default_tensor_type(Tensor)\n\n\nfrom .random import set_rng_state, get_rng_state, manual_seed, initial_seed\nfrom .serialization import save, load\nfrom ._tensor_str import set_printoptions\n\n################################################################################\n# Define Storage and Tensor classes\n################################################################################\n\nfrom .storage import _StorageBase\nfrom .tensor import _TensorBase\n\n\nclass DoubleStorage(_C.DoubleStorageBase, _StorageBase):\n pass\n\n\nclass FloatStorage(_C.FloatStorageBase, _StorageBase):\n pass\n\n\nclass HalfStorage(_C.HalfStorageBase, _StorageBase):\n pass\n\n\nclass LongStorage(_C.LongStorageBase, _StorageBase):\n pass\n\n\nclass IntStorage(_C.IntStorageBase, _StorageBase):\n pass\n\n\nclass ShortStorage(_C.ShortStorageBase, _StorageBase):\n pass\n\n\nclass CharStorage(_C.CharStorageBase, _StorageBase):\n pass\n\n\nclass ByteStorage(_C.ByteStorageBase, _StorageBase):\n pass\n\n\nclass DoubleTensor(_C.DoubleTensorBase, _TensorBase):\n\n def is_signed(self):\n return True\n\n @classmethod\n def storage_type(cls):\n return DoubleStorage\n\n\nclass FloatTensor(_C.FloatTensorBase, _TensorBase):\n\n def is_signed(self):\n return True\n\n @classmethod\n def storage_type(cls):\n return FloatStorage\n\n\nclass HalfTensor(_C.HalfTensorBase, _TensorBase):\n\n def is_signed(self):\n return True\n\n @classmethod\n def storage_type(cls):\n return HalfStorage\n\n\nclass LongTensor(_C.LongTensorBase, _TensorBase):\n\n def is_signed(self):\n return True\n\n @classmethod\n def storage_type(cls):\n return LongStorage\n\n\nclass IntTensor(_C.IntTensorBase, _TensorBase):\n\n def is_signed(self):\n return True\n\n @classmethod\n def storage_type(cls):\n return IntStorage\n\n\nclass ShortTensor(_C.ShortTensorBase, _TensorBase):\n\n def is_signed(self):\n return True\n\n @classmethod\n def storage_type(cls):\n return ShortStorage\n\n\nclass CharTensor(_C.CharTensorBase, _TensorBase):\n\n def is_signed(self):\n # TODO\n return False\n\n @classmethod\n def storage_type(cls):\n return CharStorage\n\n\nclass ByteTensor(_C.ByteTensorBase, _TensorBase):\n\n def is_signed(self):\n return False\n\n @classmethod\n def storage_type(cls):\n return ByteStorage\n\n\n_storage_classes = {\n DoubleStorage, FloatStorage, LongStorage, IntStorage, ShortStorage,\n CharStorage, ByteStorage, HalfStorage\n}\n\n_tensor_classes = {\n DoubleTensor, FloatTensor, LongTensor, IntTensor, ShortTensor,\n CharTensor, ByteTensor, HalfTensor\n}\n\n_integer_tensor_classes = {\n LongTensor, IntTensor, ShortTensor, CharTensor, ByteTensor\n}\n\n\n################################################################################\n# Import interface functions defined in Python\n################################################################################\n\nfrom .functional import *\n\n\n################################################################################\n# Initialize extension\n################################################################################\n\ndef manager_path():\n if platform.system() == 'Windows':\n return b\"\"\n import os\n path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'lib', 'torch_shm_manager')\n if not os.path.exists(path):\n raise RuntimeError(\"Unable to find torch_shm_manager at \" + path)\n return path.encode('utf-8')\n\n\n# Shared memory manager needs to know the exact location of manager executable\n_C._initExtension(manager_path())\ndel manager_path\n\nset_default_tensor_type('torch.FloatTensor')\n\n################################################################################\n# Remove unnecessary members\n################################################################################\n\ndel DoubleStorageBase\ndel FloatStorageBase\ndel LongStorageBase\ndel IntStorageBase\ndel ShortStorageBase\ndel CharStorageBase\ndel ByteStorageBase\ndel DoubleTensorBase\ndel FloatTensorBase\ndel LongTensorBase\ndel IntTensorBase\ndel ShortTensorBase\ndel CharTensorBase\ndel ByteTensorBase\n\ndel SparseDoubleTensorBase\ndel SparseFloatTensorBase\ndel SparseLongTensorBase\ndel SparseIntTensorBase\ndel SparseShortTensorBase\ndel SparseCharTensorBase\ndel SparseByteTensorBase\n\n################################################################################\n# Import most common subpackages\n################################################################################\n\nimport torch.cuda\nimport torch.autograd\nimport torch.nn\nimport torch.optim\nimport torch.multiprocessing\nimport torch.sparse\nimport torch.utils.backcompat\nimport torch.onnx\nimport torch.random\nimport torch.distributions\nimport torch.testing\nfrom torch.autograd import no_grad, enable_grad\n\n_C._init_names(list(torch._tensor_classes) + list(torch._storage_classes))\n\n# attach docstrings to torch and tensor functions\nfrom . import _torch_docs, _tensor_docs, _storage_docs\ndel _torch_docs, _tensor_docs, _storage_docs\n", "path": "torch/__init__.py"}], "after_files": [{"content": "\"\"\"\nThe torch package contains data structures for multi-dimensional\ntensors and mathematical operations over these are defined.\nAdditionally, it provides many utilities for efficient serializing of\nTensors and arbitrary types, and other useful utilities.\n\nIt has a CUDA counterpart, that enables you to run your tensor computations\non an NVIDIA GPU with compute capability >= 3.0.\n\"\"\"\n\nimport sys\nimport platform\nfrom ._utils import _import_dotted_name\nfrom .version import __version__\n\n__all__ = [\n 'typename', 'is_tensor', 'is_storage', 'set_default_tensor_type',\n 'set_rng_state', 'get_rng_state', 'manual_seed', 'initial_seed',\n 'save', 'load', 'set_printoptions', 'chunk', 'split', 'stack', 'matmul',\n 'no_grad', 'enable_grad',\n 'DoubleStorage', 'FloatStorage', 'LongStorage', 'IntStorage',\n 'ShortStorage', 'CharStorage', 'ByteStorage',\n 'DoubleTensor', 'FloatTensor', 'LongTensor', 'IntTensor',\n 'ShortTensor', 'CharTensor', 'ByteTensor',\n]\n\n################################################################################\n# Load the extension module\n################################################################################\n\n# Loading the extension with RTLD_GLOBAL option allows to not link extension\n# modules against the _C shared object. Their missing THP symbols will be\n# automatically filled by the dynamic loader.\nimport os as _dl_flags\n\n# if we have numpy, it *must* be imported before the call to setdlopenflags()\n# or there is risk that later c modules will segfault when importing numpy\ntry:\n import numpy as _np\nexcept ImportError:\n pass\n\nif platform.system() == 'Windows':\n # first get nvToolsExt PATH\n def get_nvToolsExt_path():\n NVTOOLEXT_HOME = _dl_flags.getenv('NVTOOLSEXT_PATH', 'C:\\\\Program Files\\\\NVIDIA Corporation\\\\NvToolsExt')\n\n if _dl_flags.path.exists(NVTOOLEXT_HOME):\n return NVTOOLEXT_HOME + '\\\\bin\\\\x64\\\\'\n else:\n return ''\n\n # then add the path to env\n _dl_flags.environ['PATH'] = _dl_flags.path.dirname(\n __file__) + '\\\\lib\\\\;' + get_nvToolsExt_path() + ';' + _dl_flags.environ['PATH']\n\nelse:\n # first check if the os package has the required flags\n if not hasattr(_dl_flags, 'RTLD_GLOBAL') or not hasattr(_dl_flags, 'RTLD_LAZY'):\n try:\n # next try if DLFCN exists\n import DLFCN as _dl_flags\n except ImportError:\n # as a last attempt, use compile-time constants\n import torch._dl as _dl_flags\n\n old_flags = sys.getdlopenflags()\n sys.setdlopenflags(_dl_flags.RTLD_GLOBAL | _dl_flags.RTLD_LAZY)\n\ndel _dl_flags\n\ntry:\n import torch._nvrtc\nexcept ImportError:\n pass\n\nfrom torch._C import *\n\n__all__ += [name for name in dir(_C)\n if name[0] != '_' and\n not name.endswith('Base')]\n\nif platform.system() != 'Windows':\n sys.setdlopenflags(old_flags)\n del old_flags\n\n################################################################################\n# Define basic utilities\n################################################################################\n\n\ndef typename(o):\n module = ''\n class_name = ''\n if hasattr(o, '__module__') and o.__module__ != 'builtins' \\\n and o.__module__ != '__builtin__' and o.__module__ is not None:\n module = o.__module__ + '.'\n\n if hasattr(o, '__qualname__'):\n class_name = o.__qualname__\n elif hasattr(o, '__name__'):\n class_name = o.__name__\n else:\n class_name = o.__class__.__name__\n\n return module + class_name\n\n\ndef is_tensor(obj):\n r\"\"\"Returns True if `obj` is a pytorch tensor.\n\n Args:\n obj (Object): Object to test\n \"\"\"\n return type(obj) in _tensor_classes\n\n\ndef is_storage(obj):\n r\"\"\"Returns True if `obj` is a pytorch storage object.\n\n Args:\n obj (Object): Object to test\n \"\"\"\n return type(obj) in _storage_classes\n\n\ndef set_default_tensor_type(t):\n global Tensor\n global Storage\n Tensor = _import_dotted_name(t)\n Storage = _import_dotted_name(t.replace('Tensor', 'Storage'))\n\n if 'cuda' in t:\n import torch.cuda\n torch.cuda.init()\n\n _C._set_default_tensor_type(Tensor)\n\n\nfrom .random import set_rng_state, get_rng_state, manual_seed, initial_seed\nfrom .serialization import save, load\nfrom ._tensor_str import set_printoptions\n\n################################################################################\n# Define Storage and Tensor classes\n################################################################################\n\nfrom .storage import _StorageBase\nfrom .tensor import _TensorBase\n\n\nclass DoubleStorage(_C.DoubleStorageBase, _StorageBase):\n pass\n\n\nclass FloatStorage(_C.FloatStorageBase, _StorageBase):\n pass\n\n\nclass HalfStorage(_C.HalfStorageBase, _StorageBase):\n pass\n\n\nclass LongStorage(_C.LongStorageBase, _StorageBase):\n pass\n\n\nclass IntStorage(_C.IntStorageBase, _StorageBase):\n pass\n\n\nclass ShortStorage(_C.ShortStorageBase, _StorageBase):\n pass\n\n\nclass CharStorage(_C.CharStorageBase, _StorageBase):\n pass\n\n\nclass ByteStorage(_C.ByteStorageBase, _StorageBase):\n pass\n\n\nclass DoubleTensor(_C.DoubleTensorBase, _TensorBase):\n\n def is_signed(self):\n return True\n\n @classmethod\n def storage_type(cls):\n return DoubleStorage\n\n\nclass FloatTensor(_C.FloatTensorBase, _TensorBase):\n\n def is_signed(self):\n return True\n\n @classmethod\n def storage_type(cls):\n return FloatStorage\n\n\nclass HalfTensor(_C.HalfTensorBase, _TensorBase):\n\n def is_signed(self):\n return True\n\n @classmethod\n def storage_type(cls):\n return HalfStorage\n\n\nclass LongTensor(_C.LongTensorBase, _TensorBase):\n\n def is_signed(self):\n return True\n\n @classmethod\n def storage_type(cls):\n return LongStorage\n\n\nclass IntTensor(_C.IntTensorBase, _TensorBase):\n\n def is_signed(self):\n return True\n\n @classmethod\n def storage_type(cls):\n return IntStorage\n\n\nclass ShortTensor(_C.ShortTensorBase, _TensorBase):\n\n def is_signed(self):\n return True\n\n @classmethod\n def storage_type(cls):\n return ShortStorage\n\n\nclass CharTensor(_C.CharTensorBase, _TensorBase):\n\n def is_signed(self):\n # TODO\n return False\n\n @classmethod\n def storage_type(cls):\n return CharStorage\n\n\nclass ByteTensor(_C.ByteTensorBase, _TensorBase):\n\n def is_signed(self):\n return False\n\n @classmethod\n def storage_type(cls):\n return ByteStorage\n\n\n_storage_classes = {\n DoubleStorage, FloatStorage, LongStorage, IntStorage, ShortStorage,\n CharStorage, ByteStorage, HalfStorage\n}\n\n_tensor_classes = {\n DoubleTensor, FloatTensor, LongTensor, IntTensor, ShortTensor,\n CharTensor, ByteTensor, HalfTensor\n}\n\n_integer_tensor_classes = {\n LongTensor, IntTensor, ShortTensor, CharTensor, ByteTensor\n}\n\n\n################################################################################\n# Import interface functions defined in Python\n################################################################################\n\nfrom .functional import *\n\n\n################################################################################\n# Initialize extension\n################################################################################\n\ndef manager_path():\n if platform.system() == 'Windows':\n return b\"\"\n import os\n path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'lib', 'torch_shm_manager')\n if not os.path.exists(path):\n raise RuntimeError(\"Unable to find torch_shm_manager at \" + path)\n return path.encode('utf-8')\n\n\n# Shared memory manager needs to know the exact location of manager executable\n_C._initExtension(manager_path())\ndel manager_path\n\nset_default_tensor_type('torch.FloatTensor')\n\n################################################################################\n# Remove unnecessary members\n################################################################################\n\ndel DoubleStorageBase\ndel FloatStorageBase\ndel LongStorageBase\ndel IntStorageBase\ndel ShortStorageBase\ndel CharStorageBase\ndel ByteStorageBase\ndel DoubleTensorBase\ndel FloatTensorBase\ndel LongTensorBase\ndel IntTensorBase\ndel ShortTensorBase\ndel CharTensorBase\ndel ByteTensorBase\n\ndel SparseDoubleTensorBase\ndel SparseFloatTensorBase\ndel SparseLongTensorBase\ndel SparseIntTensorBase\ndel SparseShortTensorBase\ndel SparseCharTensorBase\ndel SparseByteTensorBase\n\n################################################################################\n# Import most common subpackages\n################################################################################\n\nimport torch.cuda\nimport torch.autograd\nimport torch.nn\nimport torch.optim\nimport torch.multiprocessing\nimport torch.sparse\nimport torch.utils.backcompat\nimport torch.onnx\nimport torch.random\nimport torch.distributions\nimport torch.testing\nfrom torch.autograd import no_grad, enable_grad\n\n_C._init_names(list(torch._tensor_classes) + list(torch._storage_classes))\n\n# attach docstrings to torch and tensor functions\nfrom . import _torch_docs, _tensor_docs, _storage_docs\ndel _torch_docs, _tensor_docs, _storage_docs\n", "path": "torch/__init__.py"}]}
| 3,332 | 115 |
gh_patches_debug_12341
|
rasdani/github-patches
|
git_diff
|
aio-libs__aiohttp-3065
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Change appveyor account
https://github.com/aio-libs/aiohttp/pull/2990#discussion_r187559585:
> This requires owner privileges AFAIR, so I can't. It's a bit tricky, but the bottom line is that you create a team there mapped to the team in GitHub and give it some access there. Also you need to somehow create an org link there.
> Here you go: https://www.appveyor.com/docs/team-setup/#setting-up-appveyor-account-for-github-organization
Create a separate account called aio-libs and then add your personal account as its administrator. You can also create team mapped to some team in GitHub, allowing its members to cancel/rerun builds.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import codecs
2 import pathlib
3 import re
4 import sys
5 from distutils.command.build_ext import build_ext
6 from distutils.errors import (CCompilerError, DistutilsExecError,
7 DistutilsPlatformError)
8
9 from setuptools import Extension, setup
10
11
12 if sys.version_info < (3, 5, 3):
13 raise RuntimeError("aiohttp 3.x requires Python 3.5.3+")
14
15
16 try:
17 from Cython.Build import cythonize
18 USE_CYTHON = True
19 except ImportError:
20 USE_CYTHON = False
21
22 ext = '.pyx' if USE_CYTHON else '.c'
23
24
25 extensions = [Extension('aiohttp._websocket', ['aiohttp/_websocket' + ext]),
26 Extension('aiohttp._http_parser',
27 ['aiohttp/_http_parser' + ext,
28 'vendor/http-parser/http_parser.c'],
29 define_macros=[('HTTP_PARSER_STRICT', 0)],
30 ),
31 Extension('aiohttp._frozenlist',
32 ['aiohttp/_frozenlist' + ext]),
33 Extension('aiohttp._helpers',
34 ['aiohttp/_helpers' + ext]),
35 Extension('aiohttp._http_writer',
36 ['aiohttp/_http_writer' + ext])]
37
38
39 if USE_CYTHON:
40 extensions = cythonize(extensions)
41
42
43 class BuildFailed(Exception):
44 pass
45
46
47 class ve_build_ext(build_ext):
48 # This class allows C extension building to fail.
49
50 def run(self):
51 try:
52 build_ext.run(self)
53 except (DistutilsPlatformError, FileNotFoundError):
54 raise BuildFailed()
55
56 def build_extension(self, ext):
57 try:
58 build_ext.build_extension(self, ext)
59 except (CCompilerError, DistutilsExecError,
60 DistutilsPlatformError, ValueError):
61 raise BuildFailed()
62
63
64 here = pathlib.Path(__file__).parent
65
66 txt = (here / 'aiohttp' / '__init__.py').read_text('utf-8')
67 try:
68 version = re.findall(r"^__version__ = '([^']+)'\r?$",
69 txt, re.M)[0]
70 except IndexError:
71 raise RuntimeError('Unable to determine version.')
72
73
74 install_requires = ['attrs>=17.3.0', 'chardet>=2.0,<4.0',
75 'multidict>=4.0,<5.0',
76 'async_timeout>=3.0,<4.0',
77 'yarl>=1.0,<2.0']
78
79 if sys.version_info < (3, 7):
80 install_requires.append('idna-ssl>=1.0')
81
82
83 def read(f):
84 return (here / f).read_text('utf-8').strip()
85
86
87 NEEDS_PYTEST = {'pytest', 'test'}.intersection(sys.argv)
88 pytest_runner = ['pytest-runner'] if NEEDS_PYTEST else []
89
90 tests_require = ['pytest', 'gunicorn',
91 'pytest-timeout', 'async-generator']
92
93
94 args = dict(
95 name='aiohttp',
96 version=version,
97 description='Async http client/server framework (asyncio)',
98 long_description='\n\n'.join((read('README.rst'), read('CHANGES.rst'))),
99 classifiers=[
100 'License :: OSI Approved :: Apache Software License',
101 'Intended Audience :: Developers',
102 'Programming Language :: Python',
103 'Programming Language :: Python :: 3',
104 'Programming Language :: Python :: 3.5',
105 'Programming Language :: Python :: 3.6',
106 'Programming Language :: Python :: 3.7',
107 'Development Status :: 5 - Production/Stable',
108 'Operating System :: POSIX',
109 'Operating System :: MacOS :: MacOS X',
110 'Operating System :: Microsoft :: Windows',
111 'Topic :: Internet :: WWW/HTTP',
112 'Framework :: AsyncIO',
113 ],
114 author='Nikolay Kim',
115 author_email='[email protected]',
116 maintainer=', '.join(('Nikolay Kim <[email protected]>',
117 'Andrew Svetlov <[email protected]>')),
118 maintainer_email='[email protected]',
119 url='https://github.com/aio-libs/aiohttp',
120 project_urls={
121 'Chat: Gitter': 'https://gitter.im/aio-libs/Lobby',
122 'CI: AppVeyor': 'https://ci.appveyor.com/project/asvetlov/aiohttp', # FIXME: move under aio-libs/* slug
123 'CI: Circle': 'https://circleci.com/gh/aio-libs/aiohttp',
124 'CI: Shippable': 'https://app.shippable.com/github/aio-libs/aiohttp',
125 'CI: Travis': 'https://travis-ci.com/aio-libs/aiohttp',
126 'Coverage: codecov': 'https://codecov.io/github/aio-libs/aiohttp',
127 'Docs: RTD': 'https://docs.aiohttp.org',
128 'GitHub: issues': 'https://github.com/aio-libs/aiohttp/issues',
129 'GitHub: repo': 'https://github.com/aio-libs/aiohttp',
130 },
131 license='Apache 2',
132 packages=['aiohttp'],
133 python_requires='>=3.5.3',
134 install_requires=install_requires,
135 tests_require=tests_require,
136 setup_requires=pytest_runner,
137 include_package_data=True,
138 ext_modules=extensions,
139 cmdclass=dict(build_ext=ve_build_ext),
140 )
141
142 try:
143 setup(**args)
144 except BuildFailed:
145 print("************************************************************")
146 print("Cannot compile C accelerator module, use pure python version")
147 print("************************************************************")
148 del args['ext_modules']
149 del args['cmdclass']
150 setup(**args)
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -119,7 +119,7 @@
url='https://github.com/aio-libs/aiohttp',
project_urls={
'Chat: Gitter': 'https://gitter.im/aio-libs/Lobby',
- 'CI: AppVeyor': 'https://ci.appveyor.com/project/asvetlov/aiohttp', # FIXME: move under aio-libs/* slug
+ 'CI: AppVeyor': 'https://ci.appveyor.com/project/aio-libs/aiohttp',
'CI: Circle': 'https://circleci.com/gh/aio-libs/aiohttp',
'CI: Shippable': 'https://app.shippable.com/github/aio-libs/aiohttp',
'CI: Travis': 'https://travis-ci.com/aio-libs/aiohttp',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -119,7 +119,7 @@\n url='https://github.com/aio-libs/aiohttp',\n project_urls={\n 'Chat: Gitter': 'https://gitter.im/aio-libs/Lobby',\n- 'CI: AppVeyor': 'https://ci.appveyor.com/project/asvetlov/aiohttp', # FIXME: move under aio-libs/* slug\n+ 'CI: AppVeyor': 'https://ci.appveyor.com/project/aio-libs/aiohttp',\n 'CI: Circle': 'https://circleci.com/gh/aio-libs/aiohttp',\n 'CI: Shippable': 'https://app.shippable.com/github/aio-libs/aiohttp',\n 'CI: Travis': 'https://travis-ci.com/aio-libs/aiohttp',\n", "issue": "Change appveyor account\nhttps://github.com/aio-libs/aiohttp/pull/2990#discussion_r187559585:\r\n\r\n> This requires owner privileges AFAIR, so I can't. It's a bit tricky, but the bottom line is that you create a team there mapped to the team in GitHub and give it some access there. Also you need to somehow create an org link there.\r\n\r\n> Here you go: https://www.appveyor.com/docs/team-setup/#setting-up-appveyor-account-for-github-organization\r\nCreate a separate account called aio-libs and then add your personal account as its administrator. You can also create team mapped to some team in GitHub, allowing its members to cancel/rerun builds.\n", "before_files": [{"content": "import codecs\nimport pathlib\nimport re\nimport sys\nfrom distutils.command.build_ext import build_ext\nfrom distutils.errors import (CCompilerError, DistutilsExecError,\n DistutilsPlatformError)\n\nfrom setuptools import Extension, setup\n\n\nif sys.version_info < (3, 5, 3):\n raise RuntimeError(\"aiohttp 3.x requires Python 3.5.3+\")\n\n\ntry:\n from Cython.Build import cythonize\n USE_CYTHON = True\nexcept ImportError:\n USE_CYTHON = False\n\next = '.pyx' if USE_CYTHON else '.c'\n\n\nextensions = [Extension('aiohttp._websocket', ['aiohttp/_websocket' + ext]),\n Extension('aiohttp._http_parser',\n ['aiohttp/_http_parser' + ext,\n 'vendor/http-parser/http_parser.c'],\n define_macros=[('HTTP_PARSER_STRICT', 0)],\n ),\n Extension('aiohttp._frozenlist',\n ['aiohttp/_frozenlist' + ext]),\n Extension('aiohttp._helpers',\n ['aiohttp/_helpers' + ext]),\n Extension('aiohttp._http_writer',\n ['aiohttp/_http_writer' + ext])]\n\n\nif USE_CYTHON:\n extensions = cythonize(extensions)\n\n\nclass BuildFailed(Exception):\n pass\n\n\nclass ve_build_ext(build_ext):\n # This class allows C extension building to fail.\n\n def run(self):\n try:\n build_ext.run(self)\n except (DistutilsPlatformError, FileNotFoundError):\n raise BuildFailed()\n\n def build_extension(self, ext):\n try:\n build_ext.build_extension(self, ext)\n except (CCompilerError, DistutilsExecError,\n DistutilsPlatformError, ValueError):\n raise BuildFailed()\n\n\nhere = pathlib.Path(__file__).parent\n\ntxt = (here / 'aiohttp' / '__init__.py').read_text('utf-8')\ntry:\n version = re.findall(r\"^__version__ = '([^']+)'\\r?$\",\n txt, re.M)[0]\nexcept IndexError:\n raise RuntimeError('Unable to determine version.')\n\n\ninstall_requires = ['attrs>=17.3.0', 'chardet>=2.0,<4.0',\n 'multidict>=4.0,<5.0',\n 'async_timeout>=3.0,<4.0',\n 'yarl>=1.0,<2.0']\n\nif sys.version_info < (3, 7):\n install_requires.append('idna-ssl>=1.0')\n\n\ndef read(f):\n return (here / f).read_text('utf-8').strip()\n\n\nNEEDS_PYTEST = {'pytest', 'test'}.intersection(sys.argv)\npytest_runner = ['pytest-runner'] if NEEDS_PYTEST else []\n\ntests_require = ['pytest', 'gunicorn',\n 'pytest-timeout', 'async-generator']\n\n\nargs = dict(\n name='aiohttp',\n version=version,\n description='Async http client/server framework (asyncio)',\n long_description='\\n\\n'.join((read('README.rst'), read('CHANGES.rst'))),\n classifiers=[\n 'License :: OSI Approved :: Apache Software License',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Development Status :: 5 - Production/Stable',\n 'Operating System :: POSIX',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Topic :: Internet :: WWW/HTTP',\n 'Framework :: AsyncIO',\n ],\n author='Nikolay Kim',\n author_email='[email protected]',\n maintainer=', '.join(('Nikolay Kim <[email protected]>',\n 'Andrew Svetlov <[email protected]>')),\n maintainer_email='[email protected]',\n url='https://github.com/aio-libs/aiohttp',\n project_urls={\n 'Chat: Gitter': 'https://gitter.im/aio-libs/Lobby',\n 'CI: AppVeyor': 'https://ci.appveyor.com/project/asvetlov/aiohttp', # FIXME: move under aio-libs/* slug\n 'CI: Circle': 'https://circleci.com/gh/aio-libs/aiohttp',\n 'CI: Shippable': 'https://app.shippable.com/github/aio-libs/aiohttp',\n 'CI: Travis': 'https://travis-ci.com/aio-libs/aiohttp',\n 'Coverage: codecov': 'https://codecov.io/github/aio-libs/aiohttp',\n 'Docs: RTD': 'https://docs.aiohttp.org',\n 'GitHub: issues': 'https://github.com/aio-libs/aiohttp/issues',\n 'GitHub: repo': 'https://github.com/aio-libs/aiohttp',\n },\n license='Apache 2',\n packages=['aiohttp'],\n python_requires='>=3.5.3',\n install_requires=install_requires,\n tests_require=tests_require,\n setup_requires=pytest_runner,\n include_package_data=True,\n ext_modules=extensions,\n cmdclass=dict(build_ext=ve_build_ext),\n)\n\ntry:\n setup(**args)\nexcept BuildFailed:\n print(\"************************************************************\")\n print(\"Cannot compile C accelerator module, use pure python version\")\n print(\"************************************************************\")\n del args['ext_modules']\n del args['cmdclass']\n setup(**args)\n", "path": "setup.py"}], "after_files": [{"content": "import codecs\nimport pathlib\nimport re\nimport sys\nfrom distutils.command.build_ext import build_ext\nfrom distutils.errors import (CCompilerError, DistutilsExecError,\n DistutilsPlatformError)\n\nfrom setuptools import Extension, setup\n\n\nif sys.version_info < (3, 5, 3):\n raise RuntimeError(\"aiohttp 3.x requires Python 3.5.3+\")\n\n\ntry:\n from Cython.Build import cythonize\n USE_CYTHON = True\nexcept ImportError:\n USE_CYTHON = False\n\next = '.pyx' if USE_CYTHON else '.c'\n\n\nextensions = [Extension('aiohttp._websocket', ['aiohttp/_websocket' + ext]),\n Extension('aiohttp._http_parser',\n ['aiohttp/_http_parser' + ext,\n 'vendor/http-parser/http_parser.c'],\n define_macros=[('HTTP_PARSER_STRICT', 0)],\n ),\n Extension('aiohttp._frozenlist',\n ['aiohttp/_frozenlist' + ext]),\n Extension('aiohttp._helpers',\n ['aiohttp/_helpers' + ext]),\n Extension('aiohttp._http_writer',\n ['aiohttp/_http_writer' + ext])]\n\n\nif USE_CYTHON:\n extensions = cythonize(extensions)\n\n\nclass BuildFailed(Exception):\n pass\n\n\nclass ve_build_ext(build_ext):\n # This class allows C extension building to fail.\n\n def run(self):\n try:\n build_ext.run(self)\n except (DistutilsPlatformError, FileNotFoundError):\n raise BuildFailed()\n\n def build_extension(self, ext):\n try:\n build_ext.build_extension(self, ext)\n except (CCompilerError, DistutilsExecError,\n DistutilsPlatformError, ValueError):\n raise BuildFailed()\n\n\nhere = pathlib.Path(__file__).parent\n\ntxt = (here / 'aiohttp' / '__init__.py').read_text('utf-8')\ntry:\n version = re.findall(r\"^__version__ = '([^']+)'\\r?$\",\n txt, re.M)[0]\nexcept IndexError:\n raise RuntimeError('Unable to determine version.')\n\n\ninstall_requires = ['attrs>=17.3.0', 'chardet>=2.0,<4.0',\n 'multidict>=4.0,<5.0',\n 'async_timeout>=3.0,<4.0',\n 'yarl>=1.0,<2.0']\n\nif sys.version_info < (3, 7):\n install_requires.append('idna-ssl>=1.0')\n\n\ndef read(f):\n return (here / f).read_text('utf-8').strip()\n\n\nNEEDS_PYTEST = {'pytest', 'test'}.intersection(sys.argv)\npytest_runner = ['pytest-runner'] if NEEDS_PYTEST else []\n\ntests_require = ['pytest', 'gunicorn',\n 'pytest-timeout', 'async-generator']\n\n\nargs = dict(\n name='aiohttp',\n version=version,\n description='Async http client/server framework (asyncio)',\n long_description='\\n\\n'.join((read('README.rst'), read('CHANGES.rst'))),\n classifiers=[\n 'License :: OSI Approved :: Apache Software License',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Development Status :: 5 - Production/Stable',\n 'Operating System :: POSIX',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Topic :: Internet :: WWW/HTTP',\n 'Framework :: AsyncIO',\n ],\n author='Nikolay Kim',\n author_email='[email protected]',\n maintainer=', '.join(('Nikolay Kim <[email protected]>',\n 'Andrew Svetlov <[email protected]>')),\n maintainer_email='[email protected]',\n url='https://github.com/aio-libs/aiohttp',\n project_urls={\n 'Chat: Gitter': 'https://gitter.im/aio-libs/Lobby',\n 'CI: AppVeyor': 'https://ci.appveyor.com/project/aio-libs/aiohttp',\n 'CI: Circle': 'https://circleci.com/gh/aio-libs/aiohttp',\n 'CI: Shippable': 'https://app.shippable.com/github/aio-libs/aiohttp',\n 'CI: Travis': 'https://travis-ci.com/aio-libs/aiohttp',\n 'Coverage: codecov': 'https://codecov.io/github/aio-libs/aiohttp',\n 'Docs: RTD': 'https://docs.aiohttp.org',\n 'GitHub: issues': 'https://github.com/aio-libs/aiohttp/issues',\n 'GitHub: repo': 'https://github.com/aio-libs/aiohttp',\n },\n license='Apache 2',\n packages=['aiohttp'],\n python_requires='>=3.5.3',\n install_requires=install_requires,\n tests_require=tests_require,\n setup_requires=pytest_runner,\n include_package_data=True,\n ext_modules=extensions,\n cmdclass=dict(build_ext=ve_build_ext),\n)\n\ntry:\n setup(**args)\nexcept BuildFailed:\n print(\"************************************************************\")\n print(\"Cannot compile C accelerator module, use pure python version\")\n print(\"************************************************************\")\n del args['ext_modules']\n del args['cmdclass']\n setup(**args)\n", "path": "setup.py"}]}
| 1,999 | 204 |
gh_patches_debug_9225
|
rasdani/github-patches
|
git_diff
|
open-mmlab__mmpose-259
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pylint: W0707
```bash
mmpose/utils/hooks.py:37:20: W0707: Consider explicitly re-raising using the 'from' keyword (raise-missing-from)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mmpose/utils/hooks.py`
Content:
```
1 import functools
2
3
4 class OutputHook:
5
6 def __init__(self, module, outputs=None, as_tensor=False):
7 self.outputs = outputs
8 self.as_tensor = as_tensor
9 self.layer_outputs = {}
10 self.register(module)
11
12 def register(self, module):
13
14 def hook_wrapper(name):
15
16 def hook(model, input, output):
17 if self.as_tensor:
18 self.layer_outputs[name] = output
19 else:
20 if isinstance(output, list):
21 self.layer_outputs[name] = [
22 out.detach().cpu().numpy() for out in output
23 ]
24 else:
25 self.layer_outputs[name] = output.detach().cpu().numpy(
26 )
27
28 return hook
29
30 self.handles = []
31 if isinstance(self.outputs, (list, tuple)):
32 for name in self.outputs:
33 try:
34 layer = rgetattr(module, name)
35 h = layer.register_forward_hook(hook_wrapper(name))
36 except AttributeError:
37 raise AttributeError(f'Module {name} not found')
38 self.handles.append(h)
39
40 def remove(self):
41 for h in self.handles:
42 h.remove()
43
44 def __enter__(self):
45 return self
46
47 def __exit__(self, exc_type, exc_val, exc_tb):
48 self.remove()
49
50
51 # using wonder's beautiful simplification:
52 # https://stackoverflow.com/questions/31174295/getattr-and-setattr-on-nested-objects
53 def rgetattr(obj, attr, *args):
54
55 def _getattr(obj, attr):
56 return getattr(obj, attr, *args)
57
58 return functools.reduce(_getattr, [obj] + attr.split('.'))
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mmpose/utils/hooks.py b/mmpose/utils/hooks.py
--- a/mmpose/utils/hooks.py
+++ b/mmpose/utils/hooks.py
@@ -33,8 +33,9 @@
try:
layer = rgetattr(module, name)
h = layer.register_forward_hook(hook_wrapper(name))
- except AttributeError:
- raise AttributeError(f'Module {name} not found')
+ except ModuleNotFoundError as module_not_found:
+ raise ModuleNotFoundError(
+ f'Module {name} not found') from module_not_found
self.handles.append(h)
def remove(self):
|
{"golden_diff": "diff --git a/mmpose/utils/hooks.py b/mmpose/utils/hooks.py\n--- a/mmpose/utils/hooks.py\n+++ b/mmpose/utils/hooks.py\n@@ -33,8 +33,9 @@\n try:\n layer = rgetattr(module, name)\n h = layer.register_forward_hook(hook_wrapper(name))\n- except AttributeError:\n- raise AttributeError(f'Module {name} not found')\n+ except ModuleNotFoundError as module_not_found:\n+ raise ModuleNotFoundError(\n+ f'Module {name} not found') from module_not_found\n self.handles.append(h)\n \n def remove(self):\n", "issue": "Pylint: W0707\n```bash\r\nmmpose/utils/hooks.py:37:20: W0707: Consider explicitly re-raising using the 'from' keyword (raise-missing-from)\r\n```\n", "before_files": [{"content": "import functools\n\n\nclass OutputHook:\n\n def __init__(self, module, outputs=None, as_tensor=False):\n self.outputs = outputs\n self.as_tensor = as_tensor\n self.layer_outputs = {}\n self.register(module)\n\n def register(self, module):\n\n def hook_wrapper(name):\n\n def hook(model, input, output):\n if self.as_tensor:\n self.layer_outputs[name] = output\n else:\n if isinstance(output, list):\n self.layer_outputs[name] = [\n out.detach().cpu().numpy() for out in output\n ]\n else:\n self.layer_outputs[name] = output.detach().cpu().numpy(\n )\n\n return hook\n\n self.handles = []\n if isinstance(self.outputs, (list, tuple)):\n for name in self.outputs:\n try:\n layer = rgetattr(module, name)\n h = layer.register_forward_hook(hook_wrapper(name))\n except AttributeError:\n raise AttributeError(f'Module {name} not found')\n self.handles.append(h)\n\n def remove(self):\n for h in self.handles:\n h.remove()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.remove()\n\n\n# using wonder's beautiful simplification:\n# https://stackoverflow.com/questions/31174295/getattr-and-setattr-on-nested-objects\ndef rgetattr(obj, attr, *args):\n\n def _getattr(obj, attr):\n return getattr(obj, attr, *args)\n\n return functools.reduce(_getattr, [obj] + attr.split('.'))\n", "path": "mmpose/utils/hooks.py"}], "after_files": [{"content": "import functools\n\n\nclass OutputHook:\n\n def __init__(self, module, outputs=None, as_tensor=False):\n self.outputs = outputs\n self.as_tensor = as_tensor\n self.layer_outputs = {}\n self.register(module)\n\n def register(self, module):\n\n def hook_wrapper(name):\n\n def hook(model, input, output):\n if self.as_tensor:\n self.layer_outputs[name] = output\n else:\n if isinstance(output, list):\n self.layer_outputs[name] = [\n out.detach().cpu().numpy() for out in output\n ]\n else:\n self.layer_outputs[name] = output.detach().cpu().numpy(\n )\n\n return hook\n\n self.handles = []\n if isinstance(self.outputs, (list, tuple)):\n for name in self.outputs:\n try:\n layer = rgetattr(module, name)\n h = layer.register_forward_hook(hook_wrapper(name))\n except ModuleNotFoundError as module_not_found:\n raise ModuleNotFoundError(\n f'Module {name} not found') from module_not_found\n self.handles.append(h)\n\n def remove(self):\n for h in self.handles:\n h.remove()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.remove()\n\n\n# using wonder's beautiful simplification:\n# https://stackoverflow.com/questions/31174295/getattr-and-setattr-on-nested-objects\ndef rgetattr(obj, attr, *args):\n\n def _getattr(obj, attr):\n return getattr(obj, attr, *args)\n\n return functools.reduce(_getattr, [obj] + attr.split('.'))\n", "path": "mmpose/utils/hooks.py"}]}
| 772 | 135 |
gh_patches_debug_4115
|
rasdani/github-patches
|
git_diff
|
DataDog__dd-trace-py-608
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Either opentracing SpanContextCorruptedException or no child spans
Using the ddtracer.opentracing exactly as in the example, just switching it out with the current Jaeger setup we're using, I get this exception on all requests:
```python
ERROR:root:tracer.extract() failed
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/grpc_opentracing/_server.py", line 117, in _start_span
opentracing.Format.HTTP_HEADERS, dict(metadata))
File "/usr/local/lib/python3.6/site-packages/ddtrace/opentracer/tracer.py", line 291, in extract
return propagator.extract(carrier)
File "/usr/local/lib/python3.6/site-packages/ddtrace/opentracer/propagation/http.py", line 73, in extract
raise SpanContextCorruptedException('failed to extract span context')
opentracing.propagation.SpanContextCorruptedException: failed to extract span context
```
The spans do make it do datadog though, they're just errors of course:

Using Python 3.6.6 with ddtrace[opentracing] and grpcio-opentracing 1.1.3, running the apps with `ddtrace-run`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddtrace/opentracer/tracer.py`
Content:
```
1 import logging
2 import opentracing
3 from opentracing import Format
4 from opentracing.scope_managers import ThreadLocalScopeManager
5
6 import ddtrace
7 from ddtrace import Tracer as DatadogTracer
8 from ddtrace.constants import FILTERS_KEY
9 from ddtrace.settings import ConfigException
10 from ddtrace.utils import merge_dicts
11 from ddtrace.utils.config import get_application_name
12
13 from .propagation import HTTPPropagator
14 from .span import Span
15 from .span_context import SpanContext
16 from .settings import ConfigKeys as keys, config_invalid_keys
17 from .utils import get_context_provider_for_scope_manager
18
19 log = logging.getLogger(__name__)
20
21 DEFAULT_CONFIG = {
22 keys.AGENT_HOSTNAME: 'localhost',
23 keys.AGENT_PORT: 8126,
24 keys.DEBUG: False,
25 keys.ENABLED: True,
26 keys.GLOBAL_TAGS: {},
27 keys.SAMPLER: None,
28 keys.PRIORITY_SAMPLING: None,
29 keys.SETTINGS: {
30 FILTERS_KEY: [],
31 },
32 }
33
34
35 class Tracer(opentracing.Tracer):
36 """A wrapper providing an OpenTracing API for the Datadog tracer."""
37
38 def __init__(self, service_name=None, config=None, scope_manager=None, dd_tracer=None):
39 """Initialize a new Datadog opentracer.
40
41 :param service_name: (optional) the name of the service that this
42 tracer will be used with. Note if not provided, a service name will
43 try to be determined based off of ``sys.argv``. If this fails a
44 :class:`ddtrace.settings.ConfigException` will be raised.
45 :param config: (optional) a configuration object to specify additional
46 options. See the documentation for further information.
47 :param scope_manager: (optional) the scope manager for this tracer to
48 use. The available managers are listed in the Python OpenTracing repo
49 here: https://github.com/opentracing/opentracing-python#scope-managers.
50 If ``None`` is provided, defaults to
51 :class:`opentracing.scope_managers.ThreadLocalScopeManager`.
52 :param dd_tracer: (optional) the Datadog tracer for this tracer to use. This
53 should only be passed if a custom Datadog tracer is being used. Defaults
54 to the global ``ddtrace.tracer`` tracer.
55 """
56 # Merge the given config with the default into a new dict
57 config = config or {}
58 self._config = merge_dicts(DEFAULT_CONFIG, config)
59
60 # Pull out commonly used properties for performance
61 self._service_name = service_name or get_application_name()
62 self._enabled = self._config.get(keys.ENABLED)
63 self._debug = self._config.get(keys.DEBUG)
64
65 if self._debug:
66 # Ensure there are no typos in any of the keys
67 invalid_keys = config_invalid_keys(self._config)
68 if invalid_keys:
69 str_invalid_keys = ','.join(invalid_keys)
70 raise ConfigException('invalid key(s) given (%s)'.format(str_invalid_keys))
71
72 if not self._service_name:
73 raise ConfigException(""" Cannot detect the \'service_name\'.
74 Please set the \'service_name=\'
75 keyword argument.
76 """)
77
78 self._scope_manager = scope_manager or ThreadLocalScopeManager()
79
80 dd_context_provider = get_context_provider_for_scope_manager(self._scope_manager)
81
82 self._dd_tracer = dd_tracer or ddtrace.tracer or DatadogTracer()
83 self._dd_tracer.configure(enabled=self._enabled,
84 hostname=self._config.get(keys.AGENT_HOSTNAME),
85 port=self._config.get(keys.AGENT_PORT),
86 sampler=self._config.get(keys.SAMPLER),
87 settings=self._config.get(keys.SETTINGS),
88 priority_sampling=self._config.get(keys.PRIORITY_SAMPLING),
89 context_provider=dd_context_provider,
90 )
91 self._propagators = {
92 Format.HTTP_HEADERS: HTTPPropagator(),
93 Format.TEXT_MAP: HTTPPropagator(),
94 }
95
96 @property
97 def scope_manager(self):
98 """Returns the scope manager being used by this tracer."""
99 return self._scope_manager
100
101 def start_active_span(self, operation_name, child_of=None, references=None,
102 tags=None, start_time=None, ignore_active_span=False,
103 finish_on_close=True):
104 """Returns a newly started and activated `Scope`.
105 The returned `Scope` supports with-statement contexts. For example::
106
107 with tracer.start_active_span('...') as scope:
108 scope.span.set_tag('http.method', 'GET')
109 do_some_work()
110 # Span.finish() is called as part of Scope deactivation through
111 # the with statement.
112
113 It's also possible to not finish the `Span` when the `Scope` context
114 expires::
115
116 with tracer.start_active_span('...',
117 finish_on_close=False) as scope:
118 scope.span.set_tag('http.method', 'GET')
119 do_some_work()
120 # Span.finish() is not called as part of Scope deactivation as
121 # `finish_on_close` is `False`.
122
123 :param operation_name: name of the operation represented by the new
124 span from the perspective of the current service.
125 :param child_of: (optional) a Span or SpanContext instance representing
126 the parent in a REFERENCE_CHILD_OF Reference. If specified, the
127 `references` parameter must be omitted.
128 :param references: (optional) a list of Reference objects that identify
129 one or more parent SpanContexts. (See the Reference documentation
130 for detail).
131 :param tags: an optional dictionary of Span Tags. The caller gives up
132 ownership of that dictionary, because the Tracer may use it as-is
133 to avoid extra data copying.
134 :param start_time: an explicit Span start time as a unix timestamp per
135 time.time().
136 :param ignore_active_span: (optional) an explicit flag that ignores
137 the current active `Scope` and creates a root `Span`.
138 :param finish_on_close: whether span should automatically be finished
139 when `Scope.close()` is called.
140 :return: a `Scope`, already registered via the `ScopeManager`.
141 """
142 otspan = self.start_span(
143 operation_name=operation_name,
144 child_of=child_of,
145 references=references,
146 tags=tags,
147 start_time=start_time,
148 ignore_active_span=ignore_active_span,
149 )
150
151 # activate this new span
152 scope = self._scope_manager.activate(otspan, finish_on_close)
153
154 return scope
155
156 def start_span(self, operation_name=None, child_of=None, references=None,
157 tags=None, start_time=None, ignore_active_span=False):
158 """Starts and returns a new Span representing a unit of work.
159
160 Starting a root Span (a Span with no causal references)::
161
162 tracer.start_span('...')
163
164 Starting a child Span (see also start_child_span())::
165
166 tracer.start_span(
167 '...',
168 child_of=parent_span)
169
170 Starting a child Span in a more verbose way::
171
172 tracer.start_span(
173 '...',
174 references=[opentracing.child_of(parent_span)])
175
176 Note: the precedence when defining a relationship is the following:
177 (highest)
178 1. *child_of*
179 2. *references*
180 3. `scope_manager.active` (unless *ignore_active_span* is True)
181 4. None
182 (lowest)
183
184 Currently Datadog only supports `child_of` references.
185
186 :param operation_name: name of the operation represented by the new
187 span from the perspective of the current service.
188 :param child_of: (optional) a Span or SpanContext instance representing
189 the parent in a REFERENCE_CHILD_OF Reference. If specified, the
190 `references` parameter must be omitted.
191 :param references: (optional) a list of Reference objects that identify
192 one or more parent SpanContexts. (See the Reference documentation
193 for detail)
194 :param tags: an optional dictionary of Span Tags. The caller gives up
195 ownership of that dictionary, because the Tracer may use it as-is
196 to avoid extra data copying.
197 :param start_time: an explicit Span start time as a unix timestamp per
198 time.time()
199 :param ignore_active_span: an explicit flag that ignores the current
200 active `Scope` and creates a root `Span`.
201 :return: an already-started Span instance.
202 """
203 ot_parent = None # 'ot_parent' is more readable than 'child_of'
204 ot_parent_context = None # the parent span's context
205 dd_parent = None # the child_of to pass to the ddtracer
206
207 if child_of is not None:
208 ot_parent = child_of # 'ot_parent' is more readable than 'child_of'
209 elif references and isinstance(references, list):
210 # we currently only support child_of relations to one span
211 ot_parent = references[0].referenced_context
212
213 # - whenever child_of is not None ddspans with parent-child
214 # relationships will share a ddcontext which maintains a hierarchy of
215 # ddspans for the execution flow
216 # - when child_of is a ddspan then the ddtracer uses this ddspan to
217 # create the child ddspan
218 # - when child_of is a ddcontext then the ddtracer uses the ddcontext to
219 # get_current_span() for the parent
220 if ot_parent is None and not ignore_active_span:
221 # attempt to get the parent span from the scope manager
222 scope = self._scope_manager.active
223 parent_span = getattr(scope, 'span', None)
224 ot_parent_context = getattr(parent_span, 'context', None)
225 # we want the ddcontext of the active span in order to maintain the
226 # ddspan hierarchy
227 dd_parent = getattr(ot_parent_context, '_dd_context', None)
228
229 # if we cannot get the context then try getting it from the DD tracer
230 # this emulates the behaviour of tracer.trace()
231 if dd_parent is None:
232 dd_parent = self._dd_tracer.get_call_context()
233 elif ot_parent is not None and isinstance(ot_parent, Span):
234 # a span is given to use as a parent
235 ot_parent_context = ot_parent.context
236 dd_parent = ot_parent._dd_span
237 elif ot_parent is not None and isinstance(ot_parent, SpanContext):
238 # a span context is given to use to find the parent ddspan
239 dd_parent = ot_parent._dd_context
240 elif ot_parent is None:
241 # user wants to create a new parent span we don't have to do
242 # anything
243 pass
244 else:
245 raise TypeError('invalid span configuration given')
246
247 # create a new otspan and ddspan using the ddtracer and associate it
248 # with the new otspan
249 ddspan = self._dd_tracer.start_span(
250 name=operation_name,
251 child_of=dd_parent,
252 service=self._service_name,
253 )
254
255 # set the start time if one is specified
256 ddspan.start = start_time or ddspan.start
257 if tags is not None:
258 ddspan.set_tags(tags)
259
260 otspan = Span(self, ot_parent_context, operation_name)
261 # sync up the OT span with the DD span
262 otspan._associate_dd_span(ddspan)
263
264 return otspan
265
266 def inject(self, span_context, format, carrier):
267 """Injects a span context into a carrier.
268
269 :param span_context: span context to inject.
270 :param format: format to encode the span context with.
271 :param carrier: the carrier of the encoded span context.
272 """
273 propagator = self._propagators.get(format, None)
274
275 if propagator is None:
276 raise opentracing.UnsupportedFormatException
277
278 propagator.inject(span_context, carrier)
279
280 def extract(self, format, carrier):
281 """Extracts a span context from a carrier.
282
283 :param format: format that the carrier is encoded with.
284 :param carrier: the carrier to extract from.
285 """
286 propagator = self._propagators.get(format, None)
287
288 if propagator is None:
289 raise opentracing.UnsupportedFormatException
290
291 return propagator.extract(carrier)
292
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ddtrace/opentracer/tracer.py b/ddtrace/opentracer/tracer.py
--- a/ddtrace/opentracer/tracer.py
+++ b/ddtrace/opentracer/tracer.py
@@ -288,4 +288,9 @@
if propagator is None:
raise opentracing.UnsupportedFormatException
- return propagator.extract(carrier)
+ # we have to manually activate the returned context from a distributed
+ # trace
+ ot_span_ctx = propagator.extract(carrier)
+ dd_span_ctx = ot_span_ctx._dd_context
+ self._dd_tracer.context_provider.activate(dd_span_ctx)
+ return ot_span_ctx
|
{"golden_diff": "diff --git a/ddtrace/opentracer/tracer.py b/ddtrace/opentracer/tracer.py\n--- a/ddtrace/opentracer/tracer.py\n+++ b/ddtrace/opentracer/tracer.py\n@@ -288,4 +288,9 @@\n if propagator is None:\n raise opentracing.UnsupportedFormatException\n \n- return propagator.extract(carrier)\n+ # we have to manually activate the returned context from a distributed\n+ # trace\n+ ot_span_ctx = propagator.extract(carrier)\n+ dd_span_ctx = ot_span_ctx._dd_context\n+ self._dd_tracer.context_provider.activate(dd_span_ctx)\n+ return ot_span_ctx\n", "issue": "Either opentracing SpanContextCorruptedException or no child spans\nUsing the ddtracer.opentracing exactly as in the example, just switching it out with the current Jaeger setup we're using, I get this exception on all requests:\r\n```python\r\nERROR:root:tracer.extract() failed\r\n Traceback (most recent call last):\r\n File \"/usr/local/lib/python3.6/site-packages/grpc_opentracing/_server.py\", line 117, in _start_span\r\n opentracing.Format.HTTP_HEADERS, dict(metadata))\r\n File \"/usr/local/lib/python3.6/site-packages/ddtrace/opentracer/tracer.py\", line 291, in extract\r\n return propagator.extract(carrier)\r\n File \"/usr/local/lib/python3.6/site-packages/ddtrace/opentracer/propagation/http.py\", line 73, in extract\r\n raise SpanContextCorruptedException('failed to extract span context')\r\n opentracing.propagation.SpanContextCorruptedException: failed to extract span context\r\n```\r\nThe spans do make it do datadog though, they're just errors of course:\r\n\r\n\r\nUsing Python 3.6.6 with ddtrace[opentracing] and grpcio-opentracing 1.1.3, running the apps with `ddtrace-run`.\r\n\n", "before_files": [{"content": "import logging\nimport opentracing\nfrom opentracing import Format\nfrom opentracing.scope_managers import ThreadLocalScopeManager\n\nimport ddtrace\nfrom ddtrace import Tracer as DatadogTracer\nfrom ddtrace.constants import FILTERS_KEY\nfrom ddtrace.settings import ConfigException\nfrom ddtrace.utils import merge_dicts\nfrom ddtrace.utils.config import get_application_name\n\nfrom .propagation import HTTPPropagator\nfrom .span import Span\nfrom .span_context import SpanContext\nfrom .settings import ConfigKeys as keys, config_invalid_keys\nfrom .utils import get_context_provider_for_scope_manager\n\nlog = logging.getLogger(__name__)\n\nDEFAULT_CONFIG = {\n keys.AGENT_HOSTNAME: 'localhost',\n keys.AGENT_PORT: 8126,\n keys.DEBUG: False,\n keys.ENABLED: True,\n keys.GLOBAL_TAGS: {},\n keys.SAMPLER: None,\n keys.PRIORITY_SAMPLING: None,\n keys.SETTINGS: {\n FILTERS_KEY: [],\n },\n}\n\n\nclass Tracer(opentracing.Tracer):\n \"\"\"A wrapper providing an OpenTracing API for the Datadog tracer.\"\"\"\n\n def __init__(self, service_name=None, config=None, scope_manager=None, dd_tracer=None):\n \"\"\"Initialize a new Datadog opentracer.\n\n :param service_name: (optional) the name of the service that this\n tracer will be used with. Note if not provided, a service name will\n try to be determined based off of ``sys.argv``. If this fails a\n :class:`ddtrace.settings.ConfigException` will be raised.\n :param config: (optional) a configuration object to specify additional\n options. See the documentation for further information.\n :param scope_manager: (optional) the scope manager for this tracer to\n use. The available managers are listed in the Python OpenTracing repo\n here: https://github.com/opentracing/opentracing-python#scope-managers.\n If ``None`` is provided, defaults to\n :class:`opentracing.scope_managers.ThreadLocalScopeManager`.\n :param dd_tracer: (optional) the Datadog tracer for this tracer to use. This\n should only be passed if a custom Datadog tracer is being used. Defaults\n to the global ``ddtrace.tracer`` tracer.\n \"\"\"\n # Merge the given config with the default into a new dict\n config = config or {}\n self._config = merge_dicts(DEFAULT_CONFIG, config)\n\n # Pull out commonly used properties for performance\n self._service_name = service_name or get_application_name()\n self._enabled = self._config.get(keys.ENABLED)\n self._debug = self._config.get(keys.DEBUG)\n\n if self._debug:\n # Ensure there are no typos in any of the keys\n invalid_keys = config_invalid_keys(self._config)\n if invalid_keys:\n str_invalid_keys = ','.join(invalid_keys)\n raise ConfigException('invalid key(s) given (%s)'.format(str_invalid_keys))\n\n if not self._service_name:\n raise ConfigException(\"\"\" Cannot detect the \\'service_name\\'.\n Please set the \\'service_name=\\'\n keyword argument.\n \"\"\")\n\n self._scope_manager = scope_manager or ThreadLocalScopeManager()\n\n dd_context_provider = get_context_provider_for_scope_manager(self._scope_manager)\n\n self._dd_tracer = dd_tracer or ddtrace.tracer or DatadogTracer()\n self._dd_tracer.configure(enabled=self._enabled,\n hostname=self._config.get(keys.AGENT_HOSTNAME),\n port=self._config.get(keys.AGENT_PORT),\n sampler=self._config.get(keys.SAMPLER),\n settings=self._config.get(keys.SETTINGS),\n priority_sampling=self._config.get(keys.PRIORITY_SAMPLING),\n context_provider=dd_context_provider,\n )\n self._propagators = {\n Format.HTTP_HEADERS: HTTPPropagator(),\n Format.TEXT_MAP: HTTPPropagator(),\n }\n\n @property\n def scope_manager(self):\n \"\"\"Returns the scope manager being used by this tracer.\"\"\"\n return self._scope_manager\n\n def start_active_span(self, operation_name, child_of=None, references=None,\n tags=None, start_time=None, ignore_active_span=False,\n finish_on_close=True):\n \"\"\"Returns a newly started and activated `Scope`.\n The returned `Scope` supports with-statement contexts. For example::\n\n with tracer.start_active_span('...') as scope:\n scope.span.set_tag('http.method', 'GET')\n do_some_work()\n # Span.finish() is called as part of Scope deactivation through\n # the with statement.\n\n It's also possible to not finish the `Span` when the `Scope` context\n expires::\n\n with tracer.start_active_span('...',\n finish_on_close=False) as scope:\n scope.span.set_tag('http.method', 'GET')\n do_some_work()\n # Span.finish() is not called as part of Scope deactivation as\n # `finish_on_close` is `False`.\n\n :param operation_name: name of the operation represented by the new\n span from the perspective of the current service.\n :param child_of: (optional) a Span or SpanContext instance representing\n the parent in a REFERENCE_CHILD_OF Reference. If specified, the\n `references` parameter must be omitted.\n :param references: (optional) a list of Reference objects that identify\n one or more parent SpanContexts. (See the Reference documentation\n for detail).\n :param tags: an optional dictionary of Span Tags. The caller gives up\n ownership of that dictionary, because the Tracer may use it as-is\n to avoid extra data copying.\n :param start_time: an explicit Span start time as a unix timestamp per\n time.time().\n :param ignore_active_span: (optional) an explicit flag that ignores\n the current active `Scope` and creates a root `Span`.\n :param finish_on_close: whether span should automatically be finished\n when `Scope.close()` is called.\n :return: a `Scope`, already registered via the `ScopeManager`.\n \"\"\"\n otspan = self.start_span(\n operation_name=operation_name,\n child_of=child_of,\n references=references,\n tags=tags,\n start_time=start_time,\n ignore_active_span=ignore_active_span,\n )\n\n # activate this new span\n scope = self._scope_manager.activate(otspan, finish_on_close)\n\n return scope\n\n def start_span(self, operation_name=None, child_of=None, references=None,\n tags=None, start_time=None, ignore_active_span=False):\n \"\"\"Starts and returns a new Span representing a unit of work.\n\n Starting a root Span (a Span with no causal references)::\n\n tracer.start_span('...')\n\n Starting a child Span (see also start_child_span())::\n\n tracer.start_span(\n '...',\n child_of=parent_span)\n\n Starting a child Span in a more verbose way::\n\n tracer.start_span(\n '...',\n references=[opentracing.child_of(parent_span)])\n\n Note: the precedence when defining a relationship is the following:\n (highest)\n 1. *child_of*\n 2. *references*\n 3. `scope_manager.active` (unless *ignore_active_span* is True)\n 4. None\n (lowest)\n\n Currently Datadog only supports `child_of` references.\n\n :param operation_name: name of the operation represented by the new\n span from the perspective of the current service.\n :param child_of: (optional) a Span or SpanContext instance representing\n the parent in a REFERENCE_CHILD_OF Reference. If specified, the\n `references` parameter must be omitted.\n :param references: (optional) a list of Reference objects that identify\n one or more parent SpanContexts. (See the Reference documentation\n for detail)\n :param tags: an optional dictionary of Span Tags. The caller gives up\n ownership of that dictionary, because the Tracer may use it as-is\n to avoid extra data copying.\n :param start_time: an explicit Span start time as a unix timestamp per\n time.time()\n :param ignore_active_span: an explicit flag that ignores the current\n active `Scope` and creates a root `Span`.\n :return: an already-started Span instance.\n \"\"\"\n ot_parent = None # 'ot_parent' is more readable than 'child_of'\n ot_parent_context = None # the parent span's context\n dd_parent = None # the child_of to pass to the ddtracer\n\n if child_of is not None:\n ot_parent = child_of # 'ot_parent' is more readable than 'child_of'\n elif references and isinstance(references, list):\n # we currently only support child_of relations to one span\n ot_parent = references[0].referenced_context\n\n # - whenever child_of is not None ddspans with parent-child\n # relationships will share a ddcontext which maintains a hierarchy of\n # ddspans for the execution flow\n # - when child_of is a ddspan then the ddtracer uses this ddspan to\n # create the child ddspan\n # - when child_of is a ddcontext then the ddtracer uses the ddcontext to\n # get_current_span() for the parent\n if ot_parent is None and not ignore_active_span:\n # attempt to get the parent span from the scope manager\n scope = self._scope_manager.active\n parent_span = getattr(scope, 'span', None)\n ot_parent_context = getattr(parent_span, 'context', None)\n # we want the ddcontext of the active span in order to maintain the\n # ddspan hierarchy\n dd_parent = getattr(ot_parent_context, '_dd_context', None)\n\n # if we cannot get the context then try getting it from the DD tracer\n # this emulates the behaviour of tracer.trace()\n if dd_parent is None:\n dd_parent = self._dd_tracer.get_call_context()\n elif ot_parent is not None and isinstance(ot_parent, Span):\n # a span is given to use as a parent\n ot_parent_context = ot_parent.context\n dd_parent = ot_parent._dd_span\n elif ot_parent is not None and isinstance(ot_parent, SpanContext):\n # a span context is given to use to find the parent ddspan\n dd_parent = ot_parent._dd_context\n elif ot_parent is None:\n # user wants to create a new parent span we don't have to do\n # anything\n pass\n else:\n raise TypeError('invalid span configuration given')\n\n # create a new otspan and ddspan using the ddtracer and associate it\n # with the new otspan\n ddspan = self._dd_tracer.start_span(\n name=operation_name,\n child_of=dd_parent,\n service=self._service_name,\n )\n\n # set the start time if one is specified\n ddspan.start = start_time or ddspan.start\n if tags is not None:\n ddspan.set_tags(tags)\n\n otspan = Span(self, ot_parent_context, operation_name)\n # sync up the OT span with the DD span\n otspan._associate_dd_span(ddspan)\n\n return otspan\n\n def inject(self, span_context, format, carrier):\n \"\"\"Injects a span context into a carrier.\n\n :param span_context: span context to inject.\n :param format: format to encode the span context with.\n :param carrier: the carrier of the encoded span context.\n \"\"\"\n propagator = self._propagators.get(format, None)\n\n if propagator is None:\n raise opentracing.UnsupportedFormatException\n\n propagator.inject(span_context, carrier)\n\n def extract(self, format, carrier):\n \"\"\"Extracts a span context from a carrier.\n\n :param format: format that the carrier is encoded with.\n :param carrier: the carrier to extract from.\n \"\"\"\n propagator = self._propagators.get(format, None)\n\n if propagator is None:\n raise opentracing.UnsupportedFormatException\n\n return propagator.extract(carrier)\n", "path": "ddtrace/opentracer/tracer.py"}], "after_files": [{"content": "import logging\nimport opentracing\nfrom opentracing import Format\nfrom opentracing.scope_managers import ThreadLocalScopeManager\n\nimport ddtrace\nfrom ddtrace import Tracer as DatadogTracer\nfrom ddtrace.constants import FILTERS_KEY\nfrom ddtrace.settings import ConfigException\nfrom ddtrace.utils import merge_dicts\nfrom ddtrace.utils.config import get_application_name\n\nfrom .propagation import HTTPPropagator\nfrom .span import Span\nfrom .span_context import SpanContext\nfrom .settings import ConfigKeys as keys, config_invalid_keys\nfrom .utils import get_context_provider_for_scope_manager\n\nlog = logging.getLogger(__name__)\n\nDEFAULT_CONFIG = {\n keys.AGENT_HOSTNAME: 'localhost',\n keys.AGENT_PORT: 8126,\n keys.DEBUG: False,\n keys.ENABLED: True,\n keys.GLOBAL_TAGS: {},\n keys.SAMPLER: None,\n keys.PRIORITY_SAMPLING: None,\n keys.SETTINGS: {\n FILTERS_KEY: [],\n },\n}\n\n\nclass Tracer(opentracing.Tracer):\n \"\"\"A wrapper providing an OpenTracing API for the Datadog tracer.\"\"\"\n\n def __init__(self, service_name=None, config=None, scope_manager=None, dd_tracer=None):\n \"\"\"Initialize a new Datadog opentracer.\n\n :param service_name: (optional) the name of the service that this\n tracer will be used with. Note if not provided, a service name will\n try to be determined based off of ``sys.argv``. If this fails a\n :class:`ddtrace.settings.ConfigException` will be raised.\n :param config: (optional) a configuration object to specify additional\n options. See the documentation for further information.\n :param scope_manager: (optional) the scope manager for this tracer to\n use. The available managers are listed in the Python OpenTracing repo\n here: https://github.com/opentracing/opentracing-python#scope-managers.\n If ``None`` is provided, defaults to\n :class:`opentracing.scope_managers.ThreadLocalScopeManager`.\n :param dd_tracer: (optional) the Datadog tracer for this tracer to use. This\n should only be passed if a custom Datadog tracer is being used. Defaults\n to the global ``ddtrace.tracer`` tracer.\n \"\"\"\n # Merge the given config with the default into a new dict\n config = config or {}\n self._config = merge_dicts(DEFAULT_CONFIG, config)\n\n # Pull out commonly used properties for performance\n self._service_name = service_name or get_application_name()\n self._enabled = self._config.get(keys.ENABLED)\n self._debug = self._config.get(keys.DEBUG)\n\n if self._debug:\n # Ensure there are no typos in any of the keys\n invalid_keys = config_invalid_keys(self._config)\n if invalid_keys:\n str_invalid_keys = ','.join(invalid_keys)\n raise ConfigException('invalid key(s) given (%s)'.format(str_invalid_keys))\n\n if not self._service_name:\n raise ConfigException(\"\"\" Cannot detect the \\'service_name\\'.\n Please set the \\'service_name=\\'\n keyword argument.\n \"\"\")\n\n self._scope_manager = scope_manager or ThreadLocalScopeManager()\n\n dd_context_provider = get_context_provider_for_scope_manager(self._scope_manager)\n\n self._dd_tracer = dd_tracer or ddtrace.tracer or DatadogTracer()\n self._dd_tracer.configure(enabled=self._enabled,\n hostname=self._config.get(keys.AGENT_HOSTNAME),\n port=self._config.get(keys.AGENT_PORT),\n sampler=self._config.get(keys.SAMPLER),\n settings=self._config.get(keys.SETTINGS),\n priority_sampling=self._config.get(keys.PRIORITY_SAMPLING),\n context_provider=dd_context_provider,\n )\n self._propagators = {\n Format.HTTP_HEADERS: HTTPPropagator(),\n Format.TEXT_MAP: HTTPPropagator(),\n }\n\n @property\n def scope_manager(self):\n \"\"\"Returns the scope manager being used by this tracer.\"\"\"\n return self._scope_manager\n\n def start_active_span(self, operation_name, child_of=None, references=None,\n tags=None, start_time=None, ignore_active_span=False,\n finish_on_close=True):\n \"\"\"Returns a newly started and activated `Scope`.\n The returned `Scope` supports with-statement contexts. For example::\n\n with tracer.start_active_span('...') as scope:\n scope.span.set_tag('http.method', 'GET')\n do_some_work()\n # Span.finish() is called as part of Scope deactivation through\n # the with statement.\n\n It's also possible to not finish the `Span` when the `Scope` context\n expires::\n\n with tracer.start_active_span('...',\n finish_on_close=False) as scope:\n scope.span.set_tag('http.method', 'GET')\n do_some_work()\n # Span.finish() is not called as part of Scope deactivation as\n # `finish_on_close` is `False`.\n\n :param operation_name: name of the operation represented by the new\n span from the perspective of the current service.\n :param child_of: (optional) a Span or SpanContext instance representing\n the parent in a REFERENCE_CHILD_OF Reference. If specified, the\n `references` parameter must be omitted.\n :param references: (optional) a list of Reference objects that identify\n one or more parent SpanContexts. (See the Reference documentation\n for detail).\n :param tags: an optional dictionary of Span Tags. The caller gives up\n ownership of that dictionary, because the Tracer may use it as-is\n to avoid extra data copying.\n :param start_time: an explicit Span start time as a unix timestamp per\n time.time().\n :param ignore_active_span: (optional) an explicit flag that ignores\n the current active `Scope` and creates a root `Span`.\n :param finish_on_close: whether span should automatically be finished\n when `Scope.close()` is called.\n :return: a `Scope`, already registered via the `ScopeManager`.\n \"\"\"\n otspan = self.start_span(\n operation_name=operation_name,\n child_of=child_of,\n references=references,\n tags=tags,\n start_time=start_time,\n ignore_active_span=ignore_active_span,\n )\n\n # activate this new span\n scope = self._scope_manager.activate(otspan, finish_on_close)\n\n return scope\n\n def start_span(self, operation_name=None, child_of=None, references=None,\n tags=None, start_time=None, ignore_active_span=False):\n \"\"\"Starts and returns a new Span representing a unit of work.\n\n Starting a root Span (a Span with no causal references)::\n\n tracer.start_span('...')\n\n Starting a child Span (see also start_child_span())::\n\n tracer.start_span(\n '...',\n child_of=parent_span)\n\n Starting a child Span in a more verbose way::\n\n tracer.start_span(\n '...',\n references=[opentracing.child_of(parent_span)])\n\n Note: the precedence when defining a relationship is the following:\n (highest)\n 1. *child_of*\n 2. *references*\n 3. `scope_manager.active` (unless *ignore_active_span* is True)\n 4. None\n (lowest)\n\n Currently Datadog only supports `child_of` references.\n\n :param operation_name: name of the operation represented by the new\n span from the perspective of the current service.\n :param child_of: (optional) a Span or SpanContext instance representing\n the parent in a REFERENCE_CHILD_OF Reference. If specified, the\n `references` parameter must be omitted.\n :param references: (optional) a list of Reference objects that identify\n one or more parent SpanContexts. (See the Reference documentation\n for detail)\n :param tags: an optional dictionary of Span Tags. The caller gives up\n ownership of that dictionary, because the Tracer may use it as-is\n to avoid extra data copying.\n :param start_time: an explicit Span start time as a unix timestamp per\n time.time()\n :param ignore_active_span: an explicit flag that ignores the current\n active `Scope` and creates a root `Span`.\n :return: an already-started Span instance.\n \"\"\"\n ot_parent = None # 'ot_parent' is more readable than 'child_of'\n ot_parent_context = None # the parent span's context\n dd_parent = None # the child_of to pass to the ddtracer\n\n if child_of is not None:\n ot_parent = child_of # 'ot_parent' is more readable than 'child_of'\n elif references and isinstance(references, list):\n # we currently only support child_of relations to one span\n ot_parent = references[0].referenced_context\n\n # - whenever child_of is not None ddspans with parent-child\n # relationships will share a ddcontext which maintains a hierarchy of\n # ddspans for the execution flow\n # - when child_of is a ddspan then the ddtracer uses this ddspan to\n # create the child ddspan\n # - when child_of is a ddcontext then the ddtracer uses the ddcontext to\n # get_current_span() for the parent\n if ot_parent is None and not ignore_active_span:\n # attempt to get the parent span from the scope manager\n scope = self._scope_manager.active\n parent_span = getattr(scope, 'span', None)\n ot_parent_context = getattr(parent_span, 'context', None)\n # we want the ddcontext of the active span in order to maintain the\n # ddspan hierarchy\n dd_parent = getattr(ot_parent_context, '_dd_context', None)\n\n # if we cannot get the context then try getting it from the DD tracer\n # this emulates the behaviour of tracer.trace()\n if dd_parent is None:\n dd_parent = self._dd_tracer.get_call_context()\n elif ot_parent is not None and isinstance(ot_parent, Span):\n # a span is given to use as a parent\n ot_parent_context = ot_parent.context\n dd_parent = ot_parent._dd_span\n elif ot_parent is not None and isinstance(ot_parent, SpanContext):\n # a span context is given to use to find the parent ddspan\n dd_parent = ot_parent._dd_context\n elif ot_parent is None:\n # user wants to create a new parent span we don't have to do\n # anything\n pass\n else:\n raise TypeError('invalid span configuration given')\n\n # create a new otspan and ddspan using the ddtracer and associate it\n # with the new otspan\n ddspan = self._dd_tracer.start_span(\n name=operation_name,\n child_of=dd_parent,\n service=self._service_name,\n )\n\n # set the start time if one is specified\n ddspan.start = start_time or ddspan.start\n if tags is not None:\n ddspan.set_tags(tags)\n\n otspan = Span(self, ot_parent_context, operation_name)\n # sync up the OT span with the DD span\n otspan._associate_dd_span(ddspan)\n\n return otspan\n\n def inject(self, span_context, format, carrier):\n \"\"\"Injects a span context into a carrier.\n\n :param span_context: span context to inject.\n :param format: format to encode the span context with.\n :param carrier: the carrier of the encoded span context.\n \"\"\"\n propagator = self._propagators.get(format, None)\n\n if propagator is None:\n raise opentracing.UnsupportedFormatException\n\n propagator.inject(span_context, carrier)\n\n def extract(self, format, carrier):\n \"\"\"Extracts a span context from a carrier.\n\n :param format: format that the carrier is encoded with.\n :param carrier: the carrier to extract from.\n \"\"\"\n propagator = self._propagators.get(format, None)\n\n if propagator is None:\n raise opentracing.UnsupportedFormatException\n\n # we have to manually activate the returned context from a distributed\n # trace\n ot_span_ctx = propagator.extract(carrier)\n dd_span_ctx = ot_span_ctx._dd_context\n self._dd_tracer.context_provider.activate(dd_span_ctx)\n return ot_span_ctx\n", "path": "ddtrace/opentracer/tracer.py"}]}
| 4,025 | 148 |
gh_patches_debug_23405
|
rasdani/github-patches
|
git_diff
|
Theano__Theano-5045
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Theano deprecation warning
I have started running my code with warnings as errors. Theano trips the deprecation warning:
```
File "/opt/local/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/configparser.py", line 1209, in __init__
DeprecationWarning, stacklevel=2
DeprecationWarning: The SafeConfigParser class has been renamed to ConfigParser in Python 3.2. This alias will be removed in future versions. Use ConfigParser directly instead.
```
Please use:
```
theano_cfg = (ConfigParser.ConfigParser if six.PY3 else ConfigParser.SafeConfigParser)(…
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `theano/misc/windows.py`
Content:
```
1 from __future__ import absolute_import, print_function, division
2 import os
3 import subprocess
4
5
6 def subprocess_Popen(command, **params):
7 """
8 Utility function to work around windows behavior that open windows.
9
10 :see: call_subprocess_Popen and output_subprocess_Popen
11 """
12 startupinfo = None
13 if os.name == 'nt':
14 startupinfo = subprocess.STARTUPINFO()
15 try:
16 startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
17 except AttributeError:
18 startupinfo.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW
19
20 # Anaconda for Windows does not always provide .exe files
21 # in the PATH, they also have .bat files that call the corresponding
22 # executable. For instance, "g++.bat" is in the PATH, not "g++.exe"
23 # Unless "shell=True", "g++.bat" is not executed when trying to
24 # execute "g++" without extensions.
25 # (Executing "g++.bat" explicitly would also work.)
26 params['shell'] = True
27 # "If shell is True, it is recommended to pass args as a string rather than as a sequence." (cite taken from https://docs.python.org/2/library/subprocess.html#frequently-used-arguments)
28 # In case when command arguments have spaces, passing a command as a list will result in incorrect arguments break down, and consequently
29 # in "The filename, directory name, or volume label syntax is incorrect" error message.
30 # Passing the command as a single string solves this problem.
31 if isinstance(command, list):
32 command = ' '.join(command)
33
34 # Using the dummy file descriptors below is a workaround for a
35 # crash experienced in an unusual Python 2.4.4 Windows environment
36 # with the default None values.
37 stdin = None
38 if "stdin" not in params:
39 stdin = open(os.devnull)
40 params['stdin'] = stdin.fileno()
41
42 try:
43 proc = subprocess.Popen(command, startupinfo=startupinfo, **params)
44 finally:
45 if stdin is not None:
46 del stdin
47 return proc
48
49
50 def call_subprocess_Popen(command, **params):
51 """
52 Calls subprocess_Popen and discards the output, returning only the
53 exit code.
54 """
55 if 'stdout' in params or 'stderr' in params:
56 raise TypeError("don't use stderr or stdout with call_subprocess_Popen")
57 with open(os.devnull, 'wb') as null:
58 # stdin to devnull is a workaround for a crash in a weird Windows
59 # environment where sys.stdin was None
60 params.setdefault('stdin', null)
61 params['stdout'] = null
62 params['stderr'] = null
63 p = subprocess_Popen(command, **params)
64 returncode = p.wait()
65 return returncode
66
67
68 def output_subprocess_Popen(command, **params):
69 """
70 Calls subprocess_Popen, returning the output, error and exit code
71 in a tuple.
72 """
73 if 'stdout' in params or 'stderr' in params:
74 raise TypeError("don't use stderr or stdout with output_subprocess_Popen")
75 # stdin to devnull is a workaround for a crash in a weird Windows
76 # environement where sys.stdin was None
77 if not hasattr(params, 'stdin'):
78 null = open(os.devnull, 'wb')
79 params['stdin'] = null
80 params['stdout'] = subprocess.PIPE
81 params['stderr'] = subprocess.PIPE
82 p = subprocess_Popen(command, **params)
83 # we need to use communicate to make sure we don't deadlock around
84 # the stdour/stderr pipe.
85 out = p.communicate()
86 return out + (p.returncode,)
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/theano/misc/windows.py b/theano/misc/windows.py
--- a/theano/misc/windows.py
+++ b/theano/misc/windows.py
@@ -43,7 +43,7 @@
proc = subprocess.Popen(command, startupinfo=startupinfo, **params)
finally:
if stdin is not None:
- del stdin
+ stdin.close()
return proc
@@ -72,15 +72,10 @@
"""
if 'stdout' in params or 'stderr' in params:
raise TypeError("don't use stderr or stdout with output_subprocess_Popen")
- # stdin to devnull is a workaround for a crash in a weird Windows
- # environement where sys.stdin was None
- if not hasattr(params, 'stdin'):
- null = open(os.devnull, 'wb')
- params['stdin'] = null
params['stdout'] = subprocess.PIPE
params['stderr'] = subprocess.PIPE
p = subprocess_Popen(command, **params)
# we need to use communicate to make sure we don't deadlock around
- # the stdour/stderr pipe.
+ # the stdout/stderr pipe.
out = p.communicate()
return out + (p.returncode,)
|
{"golden_diff": "diff --git a/theano/misc/windows.py b/theano/misc/windows.py\n--- a/theano/misc/windows.py\n+++ b/theano/misc/windows.py\n@@ -43,7 +43,7 @@\n proc = subprocess.Popen(command, startupinfo=startupinfo, **params)\n finally:\n if stdin is not None:\n- del stdin\n+ stdin.close()\n return proc\n \n \n@@ -72,15 +72,10 @@\n \"\"\"\n if 'stdout' in params or 'stderr' in params:\n raise TypeError(\"don't use stderr or stdout with output_subprocess_Popen\")\n- # stdin to devnull is a workaround for a crash in a weird Windows\n- # environement where sys.stdin was None\n- if not hasattr(params, 'stdin'):\n- null = open(os.devnull, 'wb')\n- params['stdin'] = null\n params['stdout'] = subprocess.PIPE\n params['stderr'] = subprocess.PIPE\n p = subprocess_Popen(command, **params)\n # we need to use communicate to make sure we don't deadlock around\n- # the stdour/stderr pipe.\n+ # the stdout/stderr pipe.\n out = p.communicate()\n return out + (p.returncode,)\n", "issue": "Theano deprecation warning\nI have started running my code with warnings as errors. Theano trips the deprecation warning:\n\n```\n File \"/opt/local/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/configparser.py\", line 1209, in __init__\n DeprecationWarning, stacklevel=2\nDeprecationWarning: The SafeConfigParser class has been renamed to ConfigParser in Python 3.2. This alias will be removed in future versions. Use ConfigParser directly instead.\n```\n\nPlease use:\n\n```\ntheano_cfg = (ConfigParser.ConfigParser if six.PY3 else ConfigParser.SafeConfigParser)(\u2026\n```\n\n", "before_files": [{"content": "from __future__ import absolute_import, print_function, division\nimport os\nimport subprocess\n\n\ndef subprocess_Popen(command, **params):\n \"\"\"\n Utility function to work around windows behavior that open windows.\n\n :see: call_subprocess_Popen and output_subprocess_Popen\n \"\"\"\n startupinfo = None\n if os.name == 'nt':\n startupinfo = subprocess.STARTUPINFO()\n try:\n startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW\n except AttributeError:\n startupinfo.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW\n\n # Anaconda for Windows does not always provide .exe files\n # in the PATH, they also have .bat files that call the corresponding\n # executable. For instance, \"g++.bat\" is in the PATH, not \"g++.exe\"\n # Unless \"shell=True\", \"g++.bat\" is not executed when trying to\n # execute \"g++\" without extensions.\n # (Executing \"g++.bat\" explicitly would also work.)\n params['shell'] = True\n # \"If shell is True, it is recommended to pass args as a string rather than as a sequence.\" (cite taken from https://docs.python.org/2/library/subprocess.html#frequently-used-arguments)\n # In case when command arguments have spaces, passing a command as a list will result in incorrect arguments break down, and consequently\n # in \"The filename, directory name, or volume label syntax is incorrect\" error message.\n # Passing the command as a single string solves this problem.\n if isinstance(command, list):\n command = ' '.join(command)\n\n # Using the dummy file descriptors below is a workaround for a\n # crash experienced in an unusual Python 2.4.4 Windows environment\n # with the default None values.\n stdin = None\n if \"stdin\" not in params:\n stdin = open(os.devnull)\n params['stdin'] = stdin.fileno()\n\n try:\n proc = subprocess.Popen(command, startupinfo=startupinfo, **params)\n finally:\n if stdin is not None:\n del stdin\n return proc\n\n\ndef call_subprocess_Popen(command, **params):\n \"\"\"\n Calls subprocess_Popen and discards the output, returning only the\n exit code.\n \"\"\"\n if 'stdout' in params or 'stderr' in params:\n raise TypeError(\"don't use stderr or stdout with call_subprocess_Popen\")\n with open(os.devnull, 'wb') as null:\n # stdin to devnull is a workaround for a crash in a weird Windows\n # environment where sys.stdin was None\n params.setdefault('stdin', null)\n params['stdout'] = null\n params['stderr'] = null\n p = subprocess_Popen(command, **params)\n returncode = p.wait()\n return returncode\n\n\ndef output_subprocess_Popen(command, **params):\n \"\"\"\n Calls subprocess_Popen, returning the output, error and exit code\n in a tuple.\n \"\"\"\n if 'stdout' in params or 'stderr' in params:\n raise TypeError(\"don't use stderr or stdout with output_subprocess_Popen\")\n # stdin to devnull is a workaround for a crash in a weird Windows\n # environement where sys.stdin was None\n if not hasattr(params, 'stdin'):\n null = open(os.devnull, 'wb')\n params['stdin'] = null\n params['stdout'] = subprocess.PIPE\n params['stderr'] = subprocess.PIPE\n p = subprocess_Popen(command, **params)\n # we need to use communicate to make sure we don't deadlock around\n # the stdour/stderr pipe.\n out = p.communicate()\n return out + (p.returncode,)\n", "path": "theano/misc/windows.py"}], "after_files": [{"content": "from __future__ import absolute_import, print_function, division\nimport os\nimport subprocess\n\n\ndef subprocess_Popen(command, **params):\n \"\"\"\n Utility function to work around windows behavior that open windows.\n\n :see: call_subprocess_Popen and output_subprocess_Popen\n \"\"\"\n startupinfo = None\n if os.name == 'nt':\n startupinfo = subprocess.STARTUPINFO()\n try:\n startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW\n except AttributeError:\n startupinfo.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW\n\n # Anaconda for Windows does not always provide .exe files\n # in the PATH, they also have .bat files that call the corresponding\n # executable. For instance, \"g++.bat\" is in the PATH, not \"g++.exe\"\n # Unless \"shell=True\", \"g++.bat\" is not executed when trying to\n # execute \"g++\" without extensions.\n # (Executing \"g++.bat\" explicitly would also work.)\n params['shell'] = True\n # \"If shell is True, it is recommended to pass args as a string rather than as a sequence.\" (cite taken from https://docs.python.org/2/library/subprocess.html#frequently-used-arguments)\n # In case when command arguments have spaces, passing a command as a list will result in incorrect arguments break down, and consequently\n # in \"The filename, directory name, or volume label syntax is incorrect\" error message.\n # Passing the command as a single string solves this problem.\n if isinstance(command, list):\n command = ' '.join(command)\n\n # Using the dummy file descriptors below is a workaround for a\n # crash experienced in an unusual Python 2.4.4 Windows environment\n # with the default None values.\n stdin = None\n if \"stdin\" not in params:\n stdin = open(os.devnull)\n params['stdin'] = stdin.fileno()\n\n try:\n proc = subprocess.Popen(command, startupinfo=startupinfo, **params)\n finally:\n if stdin is not None:\n stdin.close()\n return proc\n\n\ndef call_subprocess_Popen(command, **params):\n \"\"\"\n Calls subprocess_Popen and discards the output, returning only the\n exit code.\n \"\"\"\n if 'stdout' in params or 'stderr' in params:\n raise TypeError(\"don't use stderr or stdout with call_subprocess_Popen\")\n with open(os.devnull, 'wb') as null:\n # stdin to devnull is a workaround for a crash in a weird Windows\n # environment where sys.stdin was None\n params.setdefault('stdin', null)\n params['stdout'] = null\n params['stderr'] = null\n p = subprocess_Popen(command, **params)\n returncode = p.wait()\n return returncode\n\n\ndef output_subprocess_Popen(command, **params):\n \"\"\"\n Calls subprocess_Popen, returning the output, error and exit code\n in a tuple.\n \"\"\"\n if 'stdout' in params or 'stderr' in params:\n raise TypeError(\"don't use stderr or stdout with output_subprocess_Popen\")\n params['stdout'] = subprocess.PIPE\n params['stderr'] = subprocess.PIPE\n p = subprocess_Popen(command, **params)\n # we need to use communicate to make sure we don't deadlock around\n # the stdout/stderr pipe.\n out = p.communicate()\n return out + (p.returncode,)\n", "path": "theano/misc/windows.py"}]}
| 1,374 | 272 |
gh_patches_debug_42002
|
rasdani/github-patches
|
git_diff
|
great-expectations__great_expectations-3179
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
V2 Custom Expectation Custom Rendering No Longer Works
**Describe the bug**
V2 custom expectation custom rendering (as documented [here](https://docs.greatexpectations.io/en/latest/reference/spare_parts/data_docs_reference.html#customizing-data-docs)) no longer works.
**To Reproduce**
Steps to reproduce the behavior:
1. `git clone --branch jdimatteo/reproduce-custom-expectation-rendering-bug [email protected]:jdimatteo/great_expectations.git && cd great_expectations/tests/integration/docusaurus/connecting_to_your_data/database/`
2. `python3.9 -m venv venv0.13.25 && source venv0.13.25/bin/activate`
3. `pip install -r requirements-0.13.25.txt`
4. `python sqlite_python_example_with_v2_api.py`
This should run without error as it does with GE version 0.12.7 (see details below), however instead fails as shown in this AssertionError:
```
Traceback (most recent call last):
File "/home/jdimatteo/dev/great_expectations_fork_1/tests/integration/docusaurus/connecting_to_your_data/database/sqlite_python_example_with_v2_api.py", line 161, in <module>
main()
File "/home/jdimatteo/dev/great_expectations_fork_1/tests/integration/docusaurus/connecting_to_your_data/database/sqlite_python_example_with_v2_api.py", line 158, in main
assert_custom_rendering_present_in_html()
File "/home/jdimatteo/dev/great_expectations_fork_1/tests/integration/docusaurus/connecting_to_your_data/database/sqlite_python_example_with_v2_api.py", line 141, in assert_custom_rendering_present_in_html
assert "**{" not in html_file_contents
AssertionError
```
This `AssertionError` corresponds to the missing custom rendering as shown here:

**Expected behavior**
Custom rendering should work as it did in prior GE version 0.12.7:
1. `git clone --branch jdimatteo/reproduce-custom-expectation-rendering-bug [email protected]:jdimatteo/great_expectations.git && cd great_expectations/tests/integration/docusaurus/connecting_to_your_data/database/`
2. `python3.9 -m venv venv0.12.7 && source venv0.12.7/bin/activate`
3. `pip install -r requirements-0.12.7.txt`
4. `python sqlite_python_example_with_v2_api.py`
This runs without error and produces expected custom rendering:

**Environment (please complete the following information):**
- Operating System: Linux
- Great Expectations Version: 0.13.25
**Additional context**
I understand that the V2 API is going to be deprecated at some point, however it isn't deprecated yet and the V3 API isn't ready for all use cases because it is documented as experimental [here](https://docs.greatexpectations.io/en/latest/guides/how_to_guides/migrating_versions.html) and there are several bugs preventing usage that might not be fixed for several weeks (e.g. https://github.com/great-expectations/great_expectations/issues/3095). Using an old version of Great Expectations is also not an option, because doing so requires using older versions of other libraries as well such as urllib3 with security vulnerabilities including https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-33503.
See also discussion at https://greatexpectationstalk.slack.com/archives/CUTCNHN82/p1628046306055600.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `great_expectations/render/renderer/content_block/validation_results_table_content_block.py`
Content:
```
1 import logging
2 import traceback
3 from copy import deepcopy
4
5 from great_expectations.expectations.core.expect_column_kl_divergence_to_be_less_than import (
6 ExpectColumnKlDivergenceToBeLessThan,
7 )
8 from great_expectations.expectations.registry import get_renderer_impl
9 from great_expectations.render.renderer.content_block.expectation_string import (
10 ExpectationStringRenderer,
11 )
12 from great_expectations.render.types import (
13 CollapseContent,
14 RenderedContentBlockContainer,
15 RenderedStringTemplateContent,
16 RenderedTableContent,
17 )
18 from great_expectations.render.util import num_to_str
19
20 logger = logging.getLogger(__name__)
21
22
23 class ValidationResultsTableContentBlockRenderer(ExpectationStringRenderer):
24 _content_block_type = "table"
25 _rendered_component_type = RenderedTableContent
26 _rendered_component_default_init_kwargs = {
27 "table_options": {"search": True, "icon-size": "sm"}
28 }
29
30 _default_element_styling = {
31 "default": {"classes": ["badge", "badge-secondary"]},
32 "params": {"column": {"classes": ["badge", "badge-primary"]}},
33 }
34
35 _default_content_block_styling = {
36 "body": {
37 "classes": ["table"],
38 },
39 "classes": ["ml-2", "mr-2", "mt-0", "mb-0", "table-responsive"],
40 }
41
42 @classmethod
43 def _process_content_block(cls, content_block, has_failed_evr):
44 super()._process_content_block(content_block, has_failed_evr)
45 content_block.header_row = ["Status", "Expectation", "Observed Value"]
46 content_block.header_row_options = {"Status": {"sortable": True}}
47
48 if has_failed_evr is False:
49 styling = deepcopy(content_block.styling) if content_block.styling else {}
50 if styling.get("classes"):
51 styling["classes"].append(
52 "hide-succeeded-validations-column-section-target-child"
53 )
54 else:
55 styling["classes"] = [
56 "hide-succeeded-validations-column-section-target-child"
57 ]
58
59 content_block.styling = styling
60
61 @classmethod
62 def _get_content_block_fn(cls, expectation_type):
63 expectation_string_fn = get_renderer_impl(
64 object_name=expectation_type, renderer_type="renderer.prescriptive"
65 )
66 expectation_string_fn = (
67 expectation_string_fn[1] if expectation_string_fn else None
68 )
69 if expectation_string_fn is None:
70 expectation_string_fn = getattr(cls, "_missing_content_block_fn")
71
72 # This function wraps expect_* methods from ExpectationStringRenderer to generate table classes
73 def row_generator_fn(
74 configuration=None,
75 result=None,
76 language=None,
77 runtime_configuration=None,
78 **kwargs,
79 ):
80 eval_param_value_dict = kwargs.get("evaluation_parameters", None)
81 # loading into evaluation parameters to be passed onto prescriptive renderer
82 if eval_param_value_dict is not None:
83 runtime_configuration["evaluation_parameters"] = eval_param_value_dict
84
85 expectation = result.expectation_config
86 expectation_string_cell = expectation_string_fn(
87 configuration=expectation, runtime_configuration=runtime_configuration
88 )
89
90 status_icon_renderer = get_renderer_impl(
91 object_name=expectation_type,
92 renderer_type="renderer.diagnostic.status_icon",
93 )
94 status_cell = (
95 [status_icon_renderer[1](result=result)]
96 if status_icon_renderer
97 else [getattr(cls, "_diagnostic_status_icon_renderer")(result=result)]
98 )
99 unexpected_statement = []
100 unexpected_table = None
101 observed_value = ["--"]
102
103 data_docs_exception_message = f"""\
104 An unexpected Exception occurred during data docs rendering. Because of this error, certain parts of data docs will \
105 not be rendered properly and/or may not appear altogether. Please use the trace, included in this message, to \
106 diagnose and repair the underlying issue. Detailed information follows:
107 """
108 try:
109 unexpected_statement_renderer = get_renderer_impl(
110 object_name=expectation_type,
111 renderer_type="renderer.diagnostic.unexpected_statement",
112 )
113 unexpected_statement = (
114 unexpected_statement_renderer[1](result=result)
115 if unexpected_statement_renderer
116 else []
117 )
118 except Exception as e:
119 exception_traceback = traceback.format_exc()
120 exception_message = (
121 data_docs_exception_message
122 + f'{type(e).__name__}: "{str(e)}". Traceback: "{exception_traceback}".'
123 )
124 logger.error(exception_message)
125 try:
126 unexpected_table_renderer = get_renderer_impl(
127 object_name=expectation_type,
128 renderer_type="renderer.diagnostic.unexpected_table",
129 )
130 unexpected_table = (
131 unexpected_table_renderer[1](result=result)
132 if unexpected_table_renderer
133 else None
134 )
135 except Exception as e:
136 exception_traceback = traceback.format_exc()
137 exception_message = (
138 data_docs_exception_message
139 + f'{type(e).__name__}: "{str(e)}". Traceback: "{exception_traceback}".'
140 )
141 logger.error(exception_message)
142 try:
143 observed_value_renderer = get_renderer_impl(
144 object_name=expectation_type,
145 renderer_type="renderer.diagnostic.observed_value",
146 )
147 observed_value = [
148 observed_value_renderer[1](result=result)
149 if observed_value_renderer
150 else "--"
151 ]
152 except Exception as e:
153 exception_traceback = traceback.format_exc()
154 exception_message = (
155 data_docs_exception_message
156 + f'{type(e).__name__}: "{str(e)}". Traceback: "{exception_traceback}".'
157 )
158 logger.error(exception_message)
159
160 # If the expectation has some unexpected values...:
161 if unexpected_statement:
162 expectation_string_cell += unexpected_statement
163 if unexpected_table:
164 expectation_string_cell.append(unexpected_table)
165 if len(expectation_string_cell) > 1:
166 return [status_cell + [expectation_string_cell] + observed_value]
167 else:
168 return [status_cell + expectation_string_cell + observed_value]
169
170 return row_generator_fn
171
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/great_expectations/render/renderer/content_block/validation_results_table_content_block.py b/great_expectations/render/renderer/content_block/validation_results_table_content_block.py
--- a/great_expectations/render/renderer/content_block/validation_results_table_content_block.py
+++ b/great_expectations/render/renderer/content_block/validation_results_table_content_block.py
@@ -1,7 +1,9 @@
import logging
import traceback
+import warnings
from copy import deepcopy
+from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.expectations.core.expect_column_kl_divergence_to_be_less_than import (
ExpectColumnKlDivergenceToBeLessThan,
)
@@ -66,6 +68,10 @@
expectation_string_fn = (
expectation_string_fn[1] if expectation_string_fn else None
)
+ if expectation_string_fn is None:
+ expectation_string_fn = cls._get_legacy_v2_api_style_expectation_string_fn(
+ expectation_type
+ )
if expectation_string_fn is None:
expectation_string_fn = getattr(cls, "_missing_content_block_fn")
@@ -147,7 +153,12 @@
observed_value = [
observed_value_renderer[1](result=result)
if observed_value_renderer
- else "--"
+ else (
+ cls._get_legacy_v2_api_observed_value(
+ expectation_string_fn, result
+ )
+ or "--"
+ )
]
except Exception as e:
exception_traceback = traceback.format_exc()
@@ -168,3 +179,46 @@
return [status_cell + expectation_string_cell + observed_value]
return row_generator_fn
+
+ @classmethod
+ def _get_legacy_v2_api_style_expectation_string_fn(cls, expectation_type):
+ legacy_expectation_string_fn = getattr(cls, expectation_type, None)
+ if legacy_expectation_string_fn is None:
+ # With the V2 API, expectation rendering was implemented by defining a method with the same name as the expectation.
+ # If no legacy rendering is present, return None.
+ return None
+
+ warnings.warn(
+ "V2 API style custom rendering is deprecated and is not fully supported anymore; please switch to V3 API and associated rendering style",
+ DeprecationWarning,
+ )
+
+ def expectation_string_fn_with_legacy_translation(
+ configuration: ExpectationConfiguration, runtime_configuration: dict
+ ):
+ if runtime_configuration is None:
+ runtime_configuration = {}
+
+ # With the V2 API, the expectation string function had a different signature; the below translates from the new signature to the legacy signature.
+ return legacy_expectation_string_fn(
+ expectation=configuration,
+ styling=runtime_configuration.get("styling", None),
+ include_column_name=runtime_configuration.get(
+ "include_column_name", True
+ ),
+ )
+
+ return expectation_string_fn_with_legacy_translation
+
+ @staticmethod
+ def _get_legacy_v2_api_observed_value(expectation_string_fn, result):
+ if (
+ expectation_string_fn.__name__
+ != "expectation_string_fn_with_legacy_translation"
+ ):
+ # If legacy V2 API style rendering is used, "expectation_string_fn" will be the method defined in the above "_get_legacy_v2_api_style_expectation_string_fn".
+ # If this isn't the case, return None, so we don't do any legacy logic.
+ return None
+
+ # With V2 API style rendering, the result had an "observed_value" entry that could be rendered.
+ return result["result"].get("observed_value")
|
{"golden_diff": "diff --git a/great_expectations/render/renderer/content_block/validation_results_table_content_block.py b/great_expectations/render/renderer/content_block/validation_results_table_content_block.py\n--- a/great_expectations/render/renderer/content_block/validation_results_table_content_block.py\n+++ b/great_expectations/render/renderer/content_block/validation_results_table_content_block.py\n@@ -1,7 +1,9 @@\n import logging\n import traceback\n+import warnings\n from copy import deepcopy\n \n+from great_expectations.core.expectation_configuration import ExpectationConfiguration\n from great_expectations.expectations.core.expect_column_kl_divergence_to_be_less_than import (\n ExpectColumnKlDivergenceToBeLessThan,\n )\n@@ -66,6 +68,10 @@\n expectation_string_fn = (\n expectation_string_fn[1] if expectation_string_fn else None\n )\n+ if expectation_string_fn is None:\n+ expectation_string_fn = cls._get_legacy_v2_api_style_expectation_string_fn(\n+ expectation_type\n+ )\n if expectation_string_fn is None:\n expectation_string_fn = getattr(cls, \"_missing_content_block_fn\")\n \n@@ -147,7 +153,12 @@\n observed_value = [\n observed_value_renderer[1](result=result)\n if observed_value_renderer\n- else \"--\"\n+ else (\n+ cls._get_legacy_v2_api_observed_value(\n+ expectation_string_fn, result\n+ )\n+ or \"--\"\n+ )\n ]\n except Exception as e:\n exception_traceback = traceback.format_exc()\n@@ -168,3 +179,46 @@\n return [status_cell + expectation_string_cell + observed_value]\n \n return row_generator_fn\n+\n+ @classmethod\n+ def _get_legacy_v2_api_style_expectation_string_fn(cls, expectation_type):\n+ legacy_expectation_string_fn = getattr(cls, expectation_type, None)\n+ if legacy_expectation_string_fn is None:\n+ # With the V2 API, expectation rendering was implemented by defining a method with the same name as the expectation.\n+ # If no legacy rendering is present, return None.\n+ return None\n+\n+ warnings.warn(\n+ \"V2 API style custom rendering is deprecated and is not fully supported anymore; please switch to V3 API and associated rendering style\",\n+ DeprecationWarning,\n+ )\n+\n+ def expectation_string_fn_with_legacy_translation(\n+ configuration: ExpectationConfiguration, runtime_configuration: dict\n+ ):\n+ if runtime_configuration is None:\n+ runtime_configuration = {}\n+\n+ # With the V2 API, the expectation string function had a different signature; the below translates from the new signature to the legacy signature.\n+ return legacy_expectation_string_fn(\n+ expectation=configuration,\n+ styling=runtime_configuration.get(\"styling\", None),\n+ include_column_name=runtime_configuration.get(\n+ \"include_column_name\", True\n+ ),\n+ )\n+\n+ return expectation_string_fn_with_legacy_translation\n+\n+ @staticmethod\n+ def _get_legacy_v2_api_observed_value(expectation_string_fn, result):\n+ if (\n+ expectation_string_fn.__name__\n+ != \"expectation_string_fn_with_legacy_translation\"\n+ ):\n+ # If legacy V2 API style rendering is used, \"expectation_string_fn\" will be the method defined in the above \"_get_legacy_v2_api_style_expectation_string_fn\".\n+ # If this isn't the case, return None, so we don't do any legacy logic.\n+ return None\n+\n+ # With V2 API style rendering, the result had an \"observed_value\" entry that could be rendered.\n+ return result[\"result\"].get(\"observed_value\")\n", "issue": "V2 Custom Expectation Custom Rendering No Longer Works\n**Describe the bug**\r\nV2 custom expectation custom rendering (as documented [here](https://docs.greatexpectations.io/en/latest/reference/spare_parts/data_docs_reference.html#customizing-data-docs)) no longer works.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. `git clone --branch jdimatteo/reproduce-custom-expectation-rendering-bug [email protected]:jdimatteo/great_expectations.git && cd great_expectations/tests/integration/docusaurus/connecting_to_your_data/database/`\r\n2. `python3.9 -m venv venv0.13.25 && source venv0.13.25/bin/activate`\r\n3. `pip install -r requirements-0.13.25.txt`\r\n4. `python sqlite_python_example_with_v2_api.py`\r\n\r\nThis should run without error as it does with GE version 0.12.7 (see details below), however instead fails as shown in this AssertionError:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/jdimatteo/dev/great_expectations_fork_1/tests/integration/docusaurus/connecting_to_your_data/database/sqlite_python_example_with_v2_api.py\", line 161, in <module>\r\n main()\r\n File \"/home/jdimatteo/dev/great_expectations_fork_1/tests/integration/docusaurus/connecting_to_your_data/database/sqlite_python_example_with_v2_api.py\", line 158, in main\r\n assert_custom_rendering_present_in_html()\r\n File \"/home/jdimatteo/dev/great_expectations_fork_1/tests/integration/docusaurus/connecting_to_your_data/database/sqlite_python_example_with_v2_api.py\", line 141, in assert_custom_rendering_present_in_html\r\n assert \"**{\" not in html_file_contents\r\nAssertionError\r\n```\r\n\r\nThis `AssertionError` corresponds to the missing custom rendering as shown here:\r\n\r\n\r\n\r\n**Expected behavior**\r\n\r\nCustom rendering should work as it did in prior GE version 0.12.7:\r\n\r\n1. `git clone --branch jdimatteo/reproduce-custom-expectation-rendering-bug [email protected]:jdimatteo/great_expectations.git && cd great_expectations/tests/integration/docusaurus/connecting_to_your_data/database/`\r\n2. `python3.9 -m venv venv0.12.7 && source venv0.12.7/bin/activate`\r\n3. `pip install -r requirements-0.12.7.txt`\r\n4. `python sqlite_python_example_with_v2_api.py`\r\n\r\nThis runs without error and produces expected custom rendering:\r\n\r\n\r\n\r\n**Environment (please complete the following information):**\r\n - Operating System: Linux\r\n - Great Expectations Version: 0.13.25\r\n\r\n**Additional context**\r\n\r\nI understand that the V2 API is going to be deprecated at some point, however it isn't deprecated yet and the V3 API isn't ready for all use cases because it is documented as experimental [here](https://docs.greatexpectations.io/en/latest/guides/how_to_guides/migrating_versions.html) and there are several bugs preventing usage that might not be fixed for several weeks (e.g. https://github.com/great-expectations/great_expectations/issues/3095). Using an old version of Great Expectations is also not an option, because doing so requires using older versions of other libraries as well such as urllib3 with security vulnerabilities including https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-33503.\r\n\r\nSee also discussion at https://greatexpectationstalk.slack.com/archives/CUTCNHN82/p1628046306055600.\n", "before_files": [{"content": "import logging\nimport traceback\nfrom copy import deepcopy\n\nfrom great_expectations.expectations.core.expect_column_kl_divergence_to_be_less_than import (\n ExpectColumnKlDivergenceToBeLessThan,\n)\nfrom great_expectations.expectations.registry import get_renderer_impl\nfrom great_expectations.render.renderer.content_block.expectation_string import (\n ExpectationStringRenderer,\n)\nfrom great_expectations.render.types import (\n CollapseContent,\n RenderedContentBlockContainer,\n RenderedStringTemplateContent,\n RenderedTableContent,\n)\nfrom great_expectations.render.util import num_to_str\n\nlogger = logging.getLogger(__name__)\n\n\nclass ValidationResultsTableContentBlockRenderer(ExpectationStringRenderer):\n _content_block_type = \"table\"\n _rendered_component_type = RenderedTableContent\n _rendered_component_default_init_kwargs = {\n \"table_options\": {\"search\": True, \"icon-size\": \"sm\"}\n }\n\n _default_element_styling = {\n \"default\": {\"classes\": [\"badge\", \"badge-secondary\"]},\n \"params\": {\"column\": {\"classes\": [\"badge\", \"badge-primary\"]}},\n }\n\n _default_content_block_styling = {\n \"body\": {\n \"classes\": [\"table\"],\n },\n \"classes\": [\"ml-2\", \"mr-2\", \"mt-0\", \"mb-0\", \"table-responsive\"],\n }\n\n @classmethod\n def _process_content_block(cls, content_block, has_failed_evr):\n super()._process_content_block(content_block, has_failed_evr)\n content_block.header_row = [\"Status\", \"Expectation\", \"Observed Value\"]\n content_block.header_row_options = {\"Status\": {\"sortable\": True}}\n\n if has_failed_evr is False:\n styling = deepcopy(content_block.styling) if content_block.styling else {}\n if styling.get(\"classes\"):\n styling[\"classes\"].append(\n \"hide-succeeded-validations-column-section-target-child\"\n )\n else:\n styling[\"classes\"] = [\n \"hide-succeeded-validations-column-section-target-child\"\n ]\n\n content_block.styling = styling\n\n @classmethod\n def _get_content_block_fn(cls, expectation_type):\n expectation_string_fn = get_renderer_impl(\n object_name=expectation_type, renderer_type=\"renderer.prescriptive\"\n )\n expectation_string_fn = (\n expectation_string_fn[1] if expectation_string_fn else None\n )\n if expectation_string_fn is None:\n expectation_string_fn = getattr(cls, \"_missing_content_block_fn\")\n\n # This function wraps expect_* methods from ExpectationStringRenderer to generate table classes\n def row_generator_fn(\n configuration=None,\n result=None,\n language=None,\n runtime_configuration=None,\n **kwargs,\n ):\n eval_param_value_dict = kwargs.get(\"evaluation_parameters\", None)\n # loading into evaluation parameters to be passed onto prescriptive renderer\n if eval_param_value_dict is not None:\n runtime_configuration[\"evaluation_parameters\"] = eval_param_value_dict\n\n expectation = result.expectation_config\n expectation_string_cell = expectation_string_fn(\n configuration=expectation, runtime_configuration=runtime_configuration\n )\n\n status_icon_renderer = get_renderer_impl(\n object_name=expectation_type,\n renderer_type=\"renderer.diagnostic.status_icon\",\n )\n status_cell = (\n [status_icon_renderer[1](result=result)]\n if status_icon_renderer\n else [getattr(cls, \"_diagnostic_status_icon_renderer\")(result=result)]\n )\n unexpected_statement = []\n unexpected_table = None\n observed_value = [\"--\"]\n\n data_docs_exception_message = f\"\"\"\\\nAn unexpected Exception occurred during data docs rendering. Because of this error, certain parts of data docs will \\\nnot be rendered properly and/or may not appear altogether. Please use the trace, included in this message, to \\\ndiagnose and repair the underlying issue. Detailed information follows:\n \"\"\"\n try:\n unexpected_statement_renderer = get_renderer_impl(\n object_name=expectation_type,\n renderer_type=\"renderer.diagnostic.unexpected_statement\",\n )\n unexpected_statement = (\n unexpected_statement_renderer[1](result=result)\n if unexpected_statement_renderer\n else []\n )\n except Exception as e:\n exception_traceback = traceback.format_exc()\n exception_message = (\n data_docs_exception_message\n + f'{type(e).__name__}: \"{str(e)}\". Traceback: \"{exception_traceback}\".'\n )\n logger.error(exception_message)\n try:\n unexpected_table_renderer = get_renderer_impl(\n object_name=expectation_type,\n renderer_type=\"renderer.diagnostic.unexpected_table\",\n )\n unexpected_table = (\n unexpected_table_renderer[1](result=result)\n if unexpected_table_renderer\n else None\n )\n except Exception as e:\n exception_traceback = traceback.format_exc()\n exception_message = (\n data_docs_exception_message\n + f'{type(e).__name__}: \"{str(e)}\". Traceback: \"{exception_traceback}\".'\n )\n logger.error(exception_message)\n try:\n observed_value_renderer = get_renderer_impl(\n object_name=expectation_type,\n renderer_type=\"renderer.diagnostic.observed_value\",\n )\n observed_value = [\n observed_value_renderer[1](result=result)\n if observed_value_renderer\n else \"--\"\n ]\n except Exception as e:\n exception_traceback = traceback.format_exc()\n exception_message = (\n data_docs_exception_message\n + f'{type(e).__name__}: \"{str(e)}\". Traceback: \"{exception_traceback}\".'\n )\n logger.error(exception_message)\n\n # If the expectation has some unexpected values...:\n if unexpected_statement:\n expectation_string_cell += unexpected_statement\n if unexpected_table:\n expectation_string_cell.append(unexpected_table)\n if len(expectation_string_cell) > 1:\n return [status_cell + [expectation_string_cell] + observed_value]\n else:\n return [status_cell + expectation_string_cell + observed_value]\n\n return row_generator_fn\n", "path": "great_expectations/render/renderer/content_block/validation_results_table_content_block.py"}], "after_files": [{"content": "import logging\nimport traceback\nimport warnings\nfrom copy import deepcopy\n\nfrom great_expectations.core.expectation_configuration import ExpectationConfiguration\nfrom great_expectations.expectations.core.expect_column_kl_divergence_to_be_less_than import (\n ExpectColumnKlDivergenceToBeLessThan,\n)\nfrom great_expectations.expectations.registry import get_renderer_impl\nfrom great_expectations.render.renderer.content_block.expectation_string import (\n ExpectationStringRenderer,\n)\nfrom great_expectations.render.types import (\n CollapseContent,\n RenderedContentBlockContainer,\n RenderedStringTemplateContent,\n RenderedTableContent,\n)\nfrom great_expectations.render.util import num_to_str\n\nlogger = logging.getLogger(__name__)\n\n\nclass ValidationResultsTableContentBlockRenderer(ExpectationStringRenderer):\n _content_block_type = \"table\"\n _rendered_component_type = RenderedTableContent\n _rendered_component_default_init_kwargs = {\n \"table_options\": {\"search\": True, \"icon-size\": \"sm\"}\n }\n\n _default_element_styling = {\n \"default\": {\"classes\": [\"badge\", \"badge-secondary\"]},\n \"params\": {\"column\": {\"classes\": [\"badge\", \"badge-primary\"]}},\n }\n\n _default_content_block_styling = {\n \"body\": {\n \"classes\": [\"table\"],\n },\n \"classes\": [\"ml-2\", \"mr-2\", \"mt-0\", \"mb-0\", \"table-responsive\"],\n }\n\n @classmethod\n def _process_content_block(cls, content_block, has_failed_evr):\n super()._process_content_block(content_block, has_failed_evr)\n content_block.header_row = [\"Status\", \"Expectation\", \"Observed Value\"]\n content_block.header_row_options = {\"Status\": {\"sortable\": True}}\n\n if has_failed_evr is False:\n styling = deepcopy(content_block.styling) if content_block.styling else {}\n if styling.get(\"classes\"):\n styling[\"classes\"].append(\n \"hide-succeeded-validations-column-section-target-child\"\n )\n else:\n styling[\"classes\"] = [\n \"hide-succeeded-validations-column-section-target-child\"\n ]\n\n content_block.styling = styling\n\n @classmethod\n def _get_content_block_fn(cls, expectation_type):\n expectation_string_fn = get_renderer_impl(\n object_name=expectation_type, renderer_type=\"renderer.prescriptive\"\n )\n expectation_string_fn = (\n expectation_string_fn[1] if expectation_string_fn else None\n )\n if expectation_string_fn is None:\n expectation_string_fn = cls._get_legacy_v2_api_style_expectation_string_fn(\n expectation_type\n )\n if expectation_string_fn is None:\n expectation_string_fn = getattr(cls, \"_missing_content_block_fn\")\n\n # This function wraps expect_* methods from ExpectationStringRenderer to generate table classes\n def row_generator_fn(\n configuration=None,\n result=None,\n language=None,\n runtime_configuration=None,\n **kwargs,\n ):\n eval_param_value_dict = kwargs.get(\"evaluation_parameters\", None)\n # loading into evaluation parameters to be passed onto prescriptive renderer\n if eval_param_value_dict is not None:\n runtime_configuration[\"evaluation_parameters\"] = eval_param_value_dict\n\n expectation = result.expectation_config\n expectation_string_cell = expectation_string_fn(\n configuration=expectation, runtime_configuration=runtime_configuration\n )\n\n status_icon_renderer = get_renderer_impl(\n object_name=expectation_type,\n renderer_type=\"renderer.diagnostic.status_icon\",\n )\n status_cell = (\n [status_icon_renderer[1](result=result)]\n if status_icon_renderer\n else [getattr(cls, \"_diagnostic_status_icon_renderer\")(result=result)]\n )\n unexpected_statement = []\n unexpected_table = None\n observed_value = [\"--\"]\n\n data_docs_exception_message = f\"\"\"\\\nAn unexpected Exception occurred during data docs rendering. Because of this error, certain parts of data docs will \\\nnot be rendered properly and/or may not appear altogether. Please use the trace, included in this message, to \\\ndiagnose and repair the underlying issue. Detailed information follows:\n \"\"\"\n try:\n unexpected_statement_renderer = get_renderer_impl(\n object_name=expectation_type,\n renderer_type=\"renderer.diagnostic.unexpected_statement\",\n )\n unexpected_statement = (\n unexpected_statement_renderer[1](result=result)\n if unexpected_statement_renderer\n else []\n )\n except Exception as e:\n exception_traceback = traceback.format_exc()\n exception_message = (\n data_docs_exception_message\n + f'{type(e).__name__}: \"{str(e)}\". Traceback: \"{exception_traceback}\".'\n )\n logger.error(exception_message)\n try:\n unexpected_table_renderer = get_renderer_impl(\n object_name=expectation_type,\n renderer_type=\"renderer.diagnostic.unexpected_table\",\n )\n unexpected_table = (\n unexpected_table_renderer[1](result=result)\n if unexpected_table_renderer\n else None\n )\n except Exception as e:\n exception_traceback = traceback.format_exc()\n exception_message = (\n data_docs_exception_message\n + f'{type(e).__name__}: \"{str(e)}\". Traceback: \"{exception_traceback}\".'\n )\n logger.error(exception_message)\n try:\n observed_value_renderer = get_renderer_impl(\n object_name=expectation_type,\n renderer_type=\"renderer.diagnostic.observed_value\",\n )\n observed_value = [\n observed_value_renderer[1](result=result)\n if observed_value_renderer\n else (\n cls._get_legacy_v2_api_observed_value(\n expectation_string_fn, result\n )\n or \"--\"\n )\n ]\n except Exception as e:\n exception_traceback = traceback.format_exc()\n exception_message = (\n data_docs_exception_message\n + f'{type(e).__name__}: \"{str(e)}\". Traceback: \"{exception_traceback}\".'\n )\n logger.error(exception_message)\n\n # If the expectation has some unexpected values...:\n if unexpected_statement:\n expectation_string_cell += unexpected_statement\n if unexpected_table:\n expectation_string_cell.append(unexpected_table)\n if len(expectation_string_cell) > 1:\n return [status_cell + [expectation_string_cell] + observed_value]\n else:\n return [status_cell + expectation_string_cell + observed_value]\n\n return row_generator_fn\n\n @classmethod\n def _get_legacy_v2_api_style_expectation_string_fn(cls, expectation_type):\n legacy_expectation_string_fn = getattr(cls, expectation_type, None)\n if legacy_expectation_string_fn is None:\n # With the V2 API, expectation rendering was implemented by defining a method with the same name as the expectation.\n # If no legacy rendering is present, return None.\n return None\n\n warnings.warn(\n \"V2 API style custom rendering is deprecated and is not fully supported anymore; please switch to V3 API and associated rendering style\",\n DeprecationWarning,\n )\n\n def expectation_string_fn_with_legacy_translation(\n configuration: ExpectationConfiguration, runtime_configuration: dict\n ):\n if runtime_configuration is None:\n runtime_configuration = {}\n\n # With the V2 API, the expectation string function had a different signature; the below translates from the new signature to the legacy signature.\n return legacy_expectation_string_fn(\n expectation=configuration,\n styling=runtime_configuration.get(\"styling\", None),\n include_column_name=runtime_configuration.get(\n \"include_column_name\", True\n ),\n )\n\n return expectation_string_fn_with_legacy_translation\n\n @staticmethod\n def _get_legacy_v2_api_observed_value(expectation_string_fn, result):\n if (\n expectation_string_fn.__name__\n != \"expectation_string_fn_with_legacy_translation\"\n ):\n # If legacy V2 API style rendering is used, \"expectation_string_fn\" will be the method defined in the above \"_get_legacy_v2_api_style_expectation_string_fn\".\n # If this isn't the case, return None, so we don't do any legacy logic.\n return None\n\n # With V2 API style rendering, the result had an \"observed_value\" entry that could be rendered.\n return result[\"result\"].get(\"observed_value\")\n", "path": "great_expectations/render/renderer/content_block/validation_results_table_content_block.py"}]}
| 2,885 | 797 |
gh_patches_debug_35090
|
rasdani/github-patches
|
git_diff
|
chainer__chainer-1191
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Inconsistency of type checking between SoftmaxCrossEntropy and Accuracy
We often evaluate training procedure with both `softmax_cross_entropy` and `accuracy`, as default `chainer.links.Classifier` does. But there are cases `softmax_cross_entorpy(x, t)` is accepted but `accuracy(x, t)` raises error because of its type checking (and vise versa). Replicated softmax is one of a such examples (e.g. `x.shape = (B, C, N)` and `t.shape = (B, N)`). We need to reshape them manually in such a case.
It is convenient for users that shape checking of the two be more consistent.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/functions/evaluation/accuracy.py`
Content:
```
1 import numpy
2
3 from chainer import cuda
4 from chainer import function
5 from chainer.utils import type_check
6
7
8 class Accuracy(function.Function):
9
10 def __init__(self, ignore_label=None):
11 self.ignore_label = ignore_label
12
13 def check_type_forward(self, in_types):
14 type_check.expect(in_types.size() == 2)
15 x_type, t_type = in_types
16
17 type_check.expect(
18 x_type.dtype.kind == 'f',
19 x_type.ndim >= 2,
20 t_type.dtype == numpy.int32,
21 t_type.ndim == 1,
22 t_type.shape[0] == x_type.shape[0],
23 )
24 for i in range(2, x_type.ndim.eval()):
25 type_check.expect(x_type.shape[i] == 1)
26
27 def forward(self, inputs):
28 xp = cuda.get_array_module(*inputs)
29 y, t = inputs
30 y = y.reshape(len(y), -1) # flatten
31
32 if self.ignore_label is not None:
33 mask = (t == self.ignore_label)
34 ignore_cnt = mask.sum()
35
36 # will always be true when the true label is ignore_label
37 # TODO(henry0312)
38 # If cupy.where returns indexes, we could make the code better.
39 # Also, we would need Advanced Indexing.
40 pred = xp.where(mask, self.ignore_label, y.argmax(axis=1))
41 count = (pred == t).sum() - ignore_cnt
42 total = len(t) - ignore_cnt
43
44 if total == 0:
45 return xp.asarray(0.0, dtype=y.dtype),
46 else:
47 return xp.asarray(float(count) / total, dtype=y.dtype),
48 else:
49 pred = y.argmax(axis=1)
50 return xp.asarray((pred == t).mean(dtype=y.dtype)),
51
52
53 def accuracy(y, t, ignore_label=None):
54 """Computes muticlass classification accuracy of the minibatch.
55
56 Args:
57 y (Variable): Variable holding a matrix whose (i, j)-th element
58 indicates the score of the class j at the i-th example.
59 t (Variable): Variable holding an int32 vector of ground truth labels.
60 ignore_label (int or None): Skip calculating accuracy
61 if the ture label is ``ignore_label``.
62
63 Returns:
64 Variable: A variable holding a scalar array of the accuracy.
65
66 .. note:: This function is non-differentiable.
67
68 """
69 return Accuracy(ignore_label=ignore_label)(y, t)
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/chainer/functions/evaluation/accuracy.py b/chainer/functions/evaluation/accuracy.py
--- a/chainer/functions/evaluation/accuracy.py
+++ b/chainer/functions/evaluation/accuracy.py
@@ -1,4 +1,5 @@
import numpy
+import six
from chainer import cuda
from chainer import function
@@ -16,18 +17,21 @@
type_check.expect(
x_type.dtype.kind == 'f',
- x_type.ndim >= 2,
- t_type.dtype == numpy.int32,
- t_type.ndim == 1,
- t_type.shape[0] == x_type.shape[0],
+ t_type.dtype == numpy.int32
)
- for i in range(2, x_type.ndim.eval()):
+
+ t_ndim = t_type.ndim.eval()
+ type_check.expect(
+ x_type.ndim >= t_type.ndim,
+ x_type.shape[0] == t_type.shape[0],
+ x_type.shape[2: t_ndim + 1] == t_type.shape[1:]
+ )
+ for i in six.moves.range(t_ndim + 1, x_type.ndim.eval()):
type_check.expect(x_type.shape[i] == 1)
def forward(self, inputs):
xp = cuda.get_array_module(*inputs)
y, t = inputs
- y = y.reshape(len(y), -1) # flatten
if self.ignore_label is not None:
mask = (t == self.ignore_label)
@@ -37,16 +41,17 @@
# TODO(henry0312)
# If cupy.where returns indexes, we could make the code better.
# Also, we would need Advanced Indexing.
- pred = xp.where(mask, self.ignore_label, y.argmax(axis=1))
+ pred = xp.where(mask, self.ignore_label,
+ y.argmax(axis=1).reshape(t.shape))
count = (pred == t).sum() - ignore_cnt
- total = len(t) - ignore_cnt
+ total = t.size - ignore_cnt
if total == 0:
return xp.asarray(0.0, dtype=y.dtype),
else:
return xp.asarray(float(count) / total, dtype=y.dtype),
else:
- pred = y.argmax(axis=1)
+ pred = y.argmax(axis=1).reshape(t.shape)
return xp.asarray((pred == t).mean(dtype=y.dtype)),
|
{"golden_diff": "diff --git a/chainer/functions/evaluation/accuracy.py b/chainer/functions/evaluation/accuracy.py\n--- a/chainer/functions/evaluation/accuracy.py\n+++ b/chainer/functions/evaluation/accuracy.py\n@@ -1,4 +1,5 @@\n import numpy\n+import six\n \n from chainer import cuda\n from chainer import function\n@@ -16,18 +17,21 @@\n \n type_check.expect(\n x_type.dtype.kind == 'f',\n- x_type.ndim >= 2,\n- t_type.dtype == numpy.int32,\n- t_type.ndim == 1,\n- t_type.shape[0] == x_type.shape[0],\n+ t_type.dtype == numpy.int32\n )\n- for i in range(2, x_type.ndim.eval()):\n+\n+ t_ndim = t_type.ndim.eval()\n+ type_check.expect(\n+ x_type.ndim >= t_type.ndim,\n+ x_type.shape[0] == t_type.shape[0],\n+ x_type.shape[2: t_ndim + 1] == t_type.shape[1:]\n+ )\n+ for i in six.moves.range(t_ndim + 1, x_type.ndim.eval()):\n type_check.expect(x_type.shape[i] == 1)\n \n def forward(self, inputs):\n xp = cuda.get_array_module(*inputs)\n y, t = inputs\n- y = y.reshape(len(y), -1) # flatten\n \n if self.ignore_label is not None:\n mask = (t == self.ignore_label)\n@@ -37,16 +41,17 @@\n # TODO(henry0312)\n # If cupy.where returns indexes, we could make the code better.\n # Also, we would need Advanced Indexing.\n- pred = xp.where(mask, self.ignore_label, y.argmax(axis=1))\n+ pred = xp.where(mask, self.ignore_label,\n+ y.argmax(axis=1).reshape(t.shape))\n count = (pred == t).sum() - ignore_cnt\n- total = len(t) - ignore_cnt\n+ total = t.size - ignore_cnt\n \n if total == 0:\n return xp.asarray(0.0, dtype=y.dtype),\n else:\n return xp.asarray(float(count) / total, dtype=y.dtype),\n else:\n- pred = y.argmax(axis=1)\n+ pred = y.argmax(axis=1).reshape(t.shape)\n return xp.asarray((pred == t).mean(dtype=y.dtype)),\n", "issue": "Inconsistency of type checking between SoftmaxCrossEntropy and Accuracy\nWe often evaluate training procedure with both `softmax_cross_entropy` and `accuracy`, as default `chainer.links.Classifier` does. But there are cases `softmax_cross_entorpy(x, t)` is accepted but `accuracy(x, t)` raises error because of its type checking (and vise versa). Replicated softmax is one of a such examples (e.g. `x.shape = (B, C, N)` and `t.shape = (B, N)`). We need to reshape them manually in such a case.\n\nIt is convenient for users that shape checking of the two be more consistent.\n\n", "before_files": [{"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\nclass Accuracy(function.Function):\n\n def __init__(self, ignore_label=None):\n self.ignore_label = ignore_label\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 2)\n x_type, t_type = in_types\n\n type_check.expect(\n x_type.dtype.kind == 'f',\n x_type.ndim >= 2,\n t_type.dtype == numpy.int32,\n t_type.ndim == 1,\n t_type.shape[0] == x_type.shape[0],\n )\n for i in range(2, x_type.ndim.eval()):\n type_check.expect(x_type.shape[i] == 1)\n\n def forward(self, inputs):\n xp = cuda.get_array_module(*inputs)\n y, t = inputs\n y = y.reshape(len(y), -1) # flatten\n\n if self.ignore_label is not None:\n mask = (t == self.ignore_label)\n ignore_cnt = mask.sum()\n\n # will always be true when the true label is ignore_label\n # TODO(henry0312)\n # If cupy.where returns indexes, we could make the code better.\n # Also, we would need Advanced Indexing.\n pred = xp.where(mask, self.ignore_label, y.argmax(axis=1))\n count = (pred == t).sum() - ignore_cnt\n total = len(t) - ignore_cnt\n\n if total == 0:\n return xp.asarray(0.0, dtype=y.dtype),\n else:\n return xp.asarray(float(count) / total, dtype=y.dtype),\n else:\n pred = y.argmax(axis=1)\n return xp.asarray((pred == t).mean(dtype=y.dtype)),\n\n\ndef accuracy(y, t, ignore_label=None):\n \"\"\"Computes muticlass classification accuracy of the minibatch.\n\n Args:\n y (Variable): Variable holding a matrix whose (i, j)-th element\n indicates the score of the class j at the i-th example.\n t (Variable): Variable holding an int32 vector of ground truth labels.\n ignore_label (int or None): Skip calculating accuracy\n if the ture label is ``ignore_label``.\n\n Returns:\n Variable: A variable holding a scalar array of the accuracy.\n\n .. note:: This function is non-differentiable.\n\n \"\"\"\n return Accuracy(ignore_label=ignore_label)(y, t)\n", "path": "chainer/functions/evaluation/accuracy.py"}], "after_files": [{"content": "import numpy\nimport six\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\nclass Accuracy(function.Function):\n\n def __init__(self, ignore_label=None):\n self.ignore_label = ignore_label\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 2)\n x_type, t_type = in_types\n\n type_check.expect(\n x_type.dtype.kind == 'f',\n t_type.dtype == numpy.int32\n )\n\n t_ndim = t_type.ndim.eval()\n type_check.expect(\n x_type.ndim >= t_type.ndim,\n x_type.shape[0] == t_type.shape[0],\n x_type.shape[2: t_ndim + 1] == t_type.shape[1:]\n )\n for i in six.moves.range(t_ndim + 1, x_type.ndim.eval()):\n type_check.expect(x_type.shape[i] == 1)\n\n def forward(self, inputs):\n xp = cuda.get_array_module(*inputs)\n y, t = inputs\n\n if self.ignore_label is not None:\n mask = (t == self.ignore_label)\n ignore_cnt = mask.sum()\n\n # will always be true when the true label is ignore_label\n # TODO(henry0312)\n # If cupy.where returns indexes, we could make the code better.\n # Also, we would need Advanced Indexing.\n pred = xp.where(mask, self.ignore_label,\n y.argmax(axis=1).reshape(t.shape))\n count = (pred == t).sum() - ignore_cnt\n total = t.size - ignore_cnt\n\n if total == 0:\n return xp.asarray(0.0, dtype=y.dtype),\n else:\n return xp.asarray(float(count) / total, dtype=y.dtype),\n else:\n pred = y.argmax(axis=1).reshape(t.shape)\n return xp.asarray((pred == t).mean(dtype=y.dtype)),\n\n\ndef accuracy(y, t, ignore_label=None):\n \"\"\"Computes muticlass classification accuracy of the minibatch.\n\n Args:\n y (Variable): Variable holding a matrix whose (i, j)-th element\n indicates the score of the class j at the i-th example.\n t (Variable): Variable holding an int32 vector of ground truth labels.\n ignore_label (int or None): Skip calculating accuracy\n if the ture label is ``ignore_label``.\n\n Returns:\n Variable: A variable holding a scalar array of the accuracy.\n\n .. note:: This function is non-differentiable.\n\n \"\"\"\n return Accuracy(ignore_label=ignore_label)(y, t)\n", "path": "chainer/functions/evaluation/accuracy.py"}]}
| 1,073 | 543 |
gh_patches_debug_15087
|
rasdani/github-patches
|
git_diff
|
kedro-org__kedro-20
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
User home directory is not expanded for TextLocalDataSet
## Description
User home directory `~` is not automatically expanded for `TextLocalDataSet`, but it is automatically expanded for `ParquetLocalDataSet` and `CSVLocalDataSet `.
## Context
Trying to specify file paths relative to user home directory to simplify interoperability and handoff of Kedro pipelines between teammates - instead of manually replacing hardcoded absolute paths on each machine or user, each user will automatically recreate same directory structure relative to his/her home directory.
## Steps to Reproduce
```
from kedro.io import TextLocalDataSet
import os
string_to_write = "This will go in a file."
data_set = TextLocalDataSet(filepath="~/code/tmp/new_documentation.md")
data_set.save(string_to_write)
os.path.abspath(data_set._filepath)
```
## Expected Result
`~` should be replaced by user home directory and file should be saved relative to it in subdirectory `code/tmp/`.
## Actual Result
Directory `~/code/tmp/` is created in current working directory.
## Your Environment
Include as many relevant details about the environment in which you experienced the bug:
* Kedro version used (`pip show kedro` or `kedro -V`): v0.14.2
* Python version used (`python -V`): Python 3.6.8 :: Anaconda, Inc.
* Operating system and version: MacOS Mojave 10.14.3
## Checklist
Include labels so that we can categorise your issue:
- [ ] Add a "Component" label to the issue
- [ ] Add a "Priority" label to the issue
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kedro/io/text_local.py`
Content:
```
1 # Copyright 2018-2019 QuantumBlack Visual Analytics Limited
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
11 # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
12 # NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
13 # BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
14 # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
15 # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 #
17 # The QuantumBlack Visual Analytics Limited (“QuantumBlack”) name and logo
18 # (either separately or in combination, “QuantumBlack Trademarks”) are
19 # trademarks of QuantumBlack. The License does not grant you any right or
20 # license to the QuantumBlack Trademarks. You may not use the QuantumBlack
21 # Trademarks or any confusingly similar mark as a trademark for your product,
22 # or use the QuantumBlack Trademarks in any other manner that might cause
23 # confusion in the marketplace, including but not limited to in advertising,
24 # on websites, or on software.
25 #
26 # See the License for the specific language governing permissions and
27 # limitations under the License.
28 """``TextLocalDataSet`` loads and saves data to a local text file. The data is
29 accessed text data using the python open function.
30 """
31 from pathlib import Path
32 from typing import Any, Dict
33
34 from kedro.io import AbstractDataSet
35 from kedro.io.core import DataSetError, FilepathVersionMixIn, Version
36
37
38 class TextLocalDataSet(AbstractDataSet, FilepathVersionMixIn):
39 """``TextLocalDataSet`` loads and saves unstructured text files.
40
41 Example:
42 ::
43
44 >>> from kedro.io import TextLocalDataSet
45 >>>
46 >>> string_to_write = "This will go in a file."
47 >>>
48 >>> data_set = TextLocalDataSet(filepath="docs/new_documentation.md")
49 >>> data_set.save(string_to_write)
50 >>> reloaded = data_set.load()
51 """
52
53 def _describe(self) -> Dict[str, Any]:
54 return dict(
55 filepath=self._filepath,
56 load_args=self._load_args,
57 save_args=self._save_args,
58 version=self._version,
59 )
60
61 def __init__(
62 self,
63 filepath: str,
64 load_args: Dict[str, Any] = None,
65 save_args: Dict[str, Any] = None,
66 version: Version = None,
67 ) -> None:
68 """Creates a new instance of ``TextFile``.
69
70 Args:
71 filepath: path to a text file.
72 load_args: Load arguments should be specified in accordance with
73 the built in open function. This can be found at
74 https://docs.python.org/3/library/functions.html#open
75 save_args: Save arguments should be specified in accordance with
76 the built in open function. This can be found at
77 https://docs.python.org/3/library/functions.html#open
78 version: If specified, should be an instance of
79 ``kedro.io.core.Version``. If its ``load`` attribute is
80 None, the latest version will be loaded. If its ``save``
81 attribute is None, save version will be autogenerated.
82 """
83 default_save_args = {"mode": "w"}
84 default_load_args = {"mode": "r"}
85
86 self._filepath = filepath
87 self._load_args = (
88 {**default_load_args, **load_args}
89 if load_args is not None
90 else default_load_args
91 )
92 self._save_args = (
93 {**default_save_args, **save_args}
94 if save_args is not None
95 else default_save_args
96 )
97 self._version = version
98
99 def _load(self) -> str:
100 load_path = self._get_load_path(self._filepath, self._version)
101 with open(load_path, **self._load_args) as _file:
102 return _file.read()
103
104 def _save(self, data: str) -> None:
105 save_path = Path(self._get_save_path(self._filepath, self._version))
106 save_path.parent.mkdir(parents=True, exist_ok=True)
107 with open(str(save_path), **self._save_args) as _file:
108 _file.write(data)
109
110 load_path = Path(self._get_load_path(self._filepath, self._version))
111 self._check_paths_consistency(
112 str(load_path.absolute()), str(save_path.absolute())
113 )
114
115 def _exists(self) -> bool:
116 try:
117 path = self._get_load_path(self._filepath, self._version)
118 except DataSetError:
119 return False
120 return Path(path).is_file()
121
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kedro/io/text_local.py b/kedro/io/text_local.py
--- a/kedro/io/text_local.py
+++ b/kedro/io/text_local.py
@@ -28,6 +28,7 @@
"""``TextLocalDataSet`` loads and saves data to a local text file. The data is
accessed text data using the python open function.
"""
+import os
from pathlib import Path
from typing import Any, Dict
@@ -83,7 +84,7 @@
default_save_args = {"mode": "w"}
default_load_args = {"mode": "r"}
- self._filepath = filepath
+ self._filepath = os.path.expanduser(filepath)
self._load_args = (
{**default_load_args, **load_args}
if load_args is not None
|
{"golden_diff": "diff --git a/kedro/io/text_local.py b/kedro/io/text_local.py\n--- a/kedro/io/text_local.py\n+++ b/kedro/io/text_local.py\n@@ -28,6 +28,7 @@\n \"\"\"``TextLocalDataSet`` loads and saves data to a local text file. The data is\n accessed text data using the python open function.\n \"\"\"\n+import os\n from pathlib import Path\n from typing import Any, Dict\n \n@@ -83,7 +84,7 @@\n default_save_args = {\"mode\": \"w\"}\n default_load_args = {\"mode\": \"r\"}\n \n- self._filepath = filepath\n+ self._filepath = os.path.expanduser(filepath)\n self._load_args = (\n {**default_load_args, **load_args}\n if load_args is not None\n", "issue": "User home directory is not expanded for TextLocalDataSet\n## Description\r\nUser home directory `~` is not automatically expanded for `TextLocalDataSet`, but it is automatically expanded for `ParquetLocalDataSet` and `CSVLocalDataSet `.\r\n\r\n## Context\r\nTrying to specify file paths relative to user home directory to simplify interoperability and handoff of Kedro pipelines between teammates - instead of manually replacing hardcoded absolute paths on each machine or user, each user will automatically recreate same directory structure relative to his/her home directory. \r\n\r\n## Steps to Reproduce\r\n```\r\nfrom kedro.io import TextLocalDataSet\r\nimport os\r\n\r\n\r\nstring_to_write = \"This will go in a file.\"\r\n\r\ndata_set = TextLocalDataSet(filepath=\"~/code/tmp/new_documentation.md\")\r\ndata_set.save(string_to_write)\r\n\r\nos.path.abspath(data_set._filepath)\r\n```\r\n\r\n## Expected Result\r\n`~` should be replaced by user home directory and file should be saved relative to it in subdirectory `code/tmp/`.\r\n\r\n## Actual Result\r\nDirectory `~/code/tmp/` is created in current working directory.\r\n\r\n\r\n## Your Environment\r\nInclude as many relevant details about the environment in which you experienced the bug:\r\n\r\n* Kedro version used (`pip show kedro` or `kedro -V`): v0.14.2\r\n* Python version used (`python -V`): Python 3.6.8 :: Anaconda, Inc.\r\n* Operating system and version: MacOS Mojave 10.14.3\r\n\r\n## Checklist\r\nInclude labels so that we can categorise your issue:\r\n\r\n- [ ] Add a \"Component\" label to the issue\r\n- [ ] Add a \"Priority\" label to the issue\r\n\n", "before_files": [{"content": "# Copyright 2018-2019 QuantumBlack Visual Analytics Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND\n# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS\n# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN\n# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n# The QuantumBlack Visual Analytics Limited (\u201cQuantumBlack\u201d) name and logo\n# (either separately or in combination, \u201cQuantumBlack Trademarks\u201d) are\n# trademarks of QuantumBlack. The License does not grant you any right or\n# license to the QuantumBlack Trademarks. You may not use the QuantumBlack\n# Trademarks or any confusingly similar mark as a trademark for your product,\n# or use the QuantumBlack Trademarks in any other manner that might cause\n# confusion in the marketplace, including but not limited to in advertising,\n# on websites, or on software.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"``TextLocalDataSet`` loads and saves data to a local text file. The data is\naccessed text data using the python open function.\n\"\"\"\nfrom pathlib import Path\nfrom typing import Any, Dict\n\nfrom kedro.io import AbstractDataSet\nfrom kedro.io.core import DataSetError, FilepathVersionMixIn, Version\n\n\nclass TextLocalDataSet(AbstractDataSet, FilepathVersionMixIn):\n \"\"\"``TextLocalDataSet`` loads and saves unstructured text files.\n\n Example:\n ::\n\n >>> from kedro.io import TextLocalDataSet\n >>>\n >>> string_to_write = \"This will go in a file.\"\n >>>\n >>> data_set = TextLocalDataSet(filepath=\"docs/new_documentation.md\")\n >>> data_set.save(string_to_write)\n >>> reloaded = data_set.load()\n \"\"\"\n\n def _describe(self) -> Dict[str, Any]:\n return dict(\n filepath=self._filepath,\n load_args=self._load_args,\n save_args=self._save_args,\n version=self._version,\n )\n\n def __init__(\n self,\n filepath: str,\n load_args: Dict[str, Any] = None,\n save_args: Dict[str, Any] = None,\n version: Version = None,\n ) -> None:\n \"\"\"Creates a new instance of ``TextFile``.\n\n Args:\n filepath: path to a text file.\n load_args: Load arguments should be specified in accordance with\n the built in open function. This can be found at\n https://docs.python.org/3/library/functions.html#open\n save_args: Save arguments should be specified in accordance with\n the built in open function. This can be found at\n https://docs.python.org/3/library/functions.html#open\n version: If specified, should be an instance of\n ``kedro.io.core.Version``. If its ``load`` attribute is\n None, the latest version will be loaded. If its ``save``\n attribute is None, save version will be autogenerated.\n \"\"\"\n default_save_args = {\"mode\": \"w\"}\n default_load_args = {\"mode\": \"r\"}\n\n self._filepath = filepath\n self._load_args = (\n {**default_load_args, **load_args}\n if load_args is not None\n else default_load_args\n )\n self._save_args = (\n {**default_save_args, **save_args}\n if save_args is not None\n else default_save_args\n )\n self._version = version\n\n def _load(self) -> str:\n load_path = self._get_load_path(self._filepath, self._version)\n with open(load_path, **self._load_args) as _file:\n return _file.read()\n\n def _save(self, data: str) -> None:\n save_path = Path(self._get_save_path(self._filepath, self._version))\n save_path.parent.mkdir(parents=True, exist_ok=True)\n with open(str(save_path), **self._save_args) as _file:\n _file.write(data)\n\n load_path = Path(self._get_load_path(self._filepath, self._version))\n self._check_paths_consistency(\n str(load_path.absolute()), str(save_path.absolute())\n )\n\n def _exists(self) -> bool:\n try:\n path = self._get_load_path(self._filepath, self._version)\n except DataSetError:\n return False\n return Path(path).is_file()\n", "path": "kedro/io/text_local.py"}], "after_files": [{"content": "# Copyright 2018-2019 QuantumBlack Visual Analytics Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND\n# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS\n# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN\n# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n# The QuantumBlack Visual Analytics Limited (\u201cQuantumBlack\u201d) name and logo\n# (either separately or in combination, \u201cQuantumBlack Trademarks\u201d) are\n# trademarks of QuantumBlack. The License does not grant you any right or\n# license to the QuantumBlack Trademarks. You may not use the QuantumBlack\n# Trademarks or any confusingly similar mark as a trademark for your product,\n# or use the QuantumBlack Trademarks in any other manner that might cause\n# confusion in the marketplace, including but not limited to in advertising,\n# on websites, or on software.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"``TextLocalDataSet`` loads and saves data to a local text file. The data is\naccessed text data using the python open function.\n\"\"\"\nimport os\nfrom pathlib import Path\nfrom typing import Any, Dict\n\nfrom kedro.io import AbstractDataSet\nfrom kedro.io.core import DataSetError, FilepathVersionMixIn, Version\n\n\nclass TextLocalDataSet(AbstractDataSet, FilepathVersionMixIn):\n \"\"\"``TextLocalDataSet`` loads and saves unstructured text files.\n\n Example:\n ::\n\n >>> from kedro.io import TextLocalDataSet\n >>>\n >>> string_to_write = \"This will go in a file.\"\n >>>\n >>> data_set = TextLocalDataSet(filepath=\"docs/new_documentation.md\")\n >>> data_set.save(string_to_write)\n >>> reloaded = data_set.load()\n \"\"\"\n\n def _describe(self) -> Dict[str, Any]:\n return dict(\n filepath=self._filepath,\n load_args=self._load_args,\n save_args=self._save_args,\n version=self._version,\n )\n\n def __init__(\n self,\n filepath: str,\n load_args: Dict[str, Any] = None,\n save_args: Dict[str, Any] = None,\n version: Version = None,\n ) -> None:\n \"\"\"Creates a new instance of ``TextFile``.\n\n Args:\n filepath: path to a text file.\n load_args: Load arguments should be specified in accordance with\n the built in open function. This can be found at\n https://docs.python.org/3/library/functions.html#open\n save_args: Save arguments should be specified in accordance with\n the built in open function. This can be found at\n https://docs.python.org/3/library/functions.html#open\n version: If specified, should be an instance of\n ``kedro.io.core.Version``. If its ``load`` attribute is\n None, the latest version will be loaded. If its ``save``\n attribute is None, save version will be autogenerated.\n \"\"\"\n default_save_args = {\"mode\": \"w\"}\n default_load_args = {\"mode\": \"r\"}\n\n self._filepath = os.path.expanduser(filepath)\n self._load_args = (\n {**default_load_args, **load_args}\n if load_args is not None\n else default_load_args\n )\n self._save_args = (\n {**default_save_args, **save_args}\n if save_args is not None\n else default_save_args\n )\n self._version = version\n\n def _load(self) -> str:\n load_path = self._get_load_path(self._filepath, self._version)\n with open(load_path, **self._load_args) as _file:\n return _file.read()\n\n def _save(self, data: str) -> None:\n save_path = Path(self._get_save_path(self._filepath, self._version))\n save_path.parent.mkdir(parents=True, exist_ok=True)\n with open(str(save_path), **self._save_args) as _file:\n _file.write(data)\n\n load_path = Path(self._get_load_path(self._filepath, self._version))\n self._check_paths_consistency(\n str(load_path.absolute()), str(save_path.absolute())\n )\n\n def _exists(self) -> bool:\n try:\n path = self._get_load_path(self._filepath, self._version)\n except DataSetError:\n return False\n return Path(path).is_file()\n", "path": "kedro/io/text_local.py"}]}
| 1,927 | 179 |
gh_patches_debug_37533
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-3303
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider wyndham is broken
During the global build at 2021-10-20-14-42-48, spider **wyndham** failed with **116 features** and **1 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/logs/wyndham.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/wyndham.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/wyndham.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/wyndham.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import json
3 import re
4 import scrapy
5 from locations.items import GeojsonPointItem
6
7 BRAND_MAP = {
8 "hj": "hojo",
9 "HJ": "hojo",
10 "hojo": "HJ",
11 "lq": "laquinta",
12 "LQ": "laquinta",
13 "laquinta": "LQ",
14 "di": "days-inn",
15 "DI": "days-inn",
16 "days-inn": "DI",
17 "bh": "hawthorn-extended-stay",
18 "BH": "hawthorn-extended-stay",
19 "hawthorn-extended-stay": "BH",
20 "hr": "wyndham",
21 "HR": "wyndham",
22 "wyndham": "HR",
23 "wg": "wingate",
24 "WG": "wingate",
25 "wingate": "WG",
26 "se": "super-8",
27 "SE": "super-8",
28 "super-8": "SE",
29 "bu": "baymont",
30 "BU": "baymont",
31 "baymont": "BU",
32 "dx": "dolce",
33 "DX": "dolce",
34 "dolce": "DX",
35 "dz": "dazzler",
36 "DZ": "dazzler",
37 "dazzler": "DZ",
38 "wr": "wyndham-rewards",
39 "WR": "wyndham-rewards",
40 "wyndham-rewards": "WR",
41 "kg": "knights-inn",
42 "KG": "knights-inn",
43 "knights-inn": "KG",
44 "wt": "tryp",
45 "WT": "tryp",
46 "tryp": "WT",
47 "aa": "americinn",
48 "AA": "americinn",
49 "americinn": "AA",
50 "all": "wyndham-hotel-group",
51 "ALL": "wyndham-hotel-group",
52 "wyndham-hotel-group": "ALL",
53 "ce": "caesars-entertainment",
54 "CE": "caesars-entertainment",
55 "caesars-entertainment": "CE",
56 "mt": "microtel",
57 "MT": "microtel",
58 "microtel": "MT",
59 "gn": "wyndham-garden",
60 "GN": "wyndham-garden",
61 "wyndham-garden": "GN",
62 "gr": "wyndham-grand",
63 "GR": "wyndham-grand",
64 "wyndham-grand": "GR",
65 "es": "esplendor",
66 "ES": "esplendor",
67 "esplendor": "ES",
68 "ra": "ramada",
69 "RA": "ramada",
70 "ramada": "RA",
71 "re": "registry-collection",
72 "RE": "registry-collection",
73 "registry-collection": "RE",
74 "tl": "travelodge",
75 "TL": "travelodge",
76 "travelodge": "TL",
77 "vo": "wyndham-vacations",
78 "VO": "wyndham-vacations",
79 "wyndham-vacations": "VO",
80 "tq": "trademark",
81 "TQ": "trademark",
82 "trademark": "TQ",
83 }
84 BRAND_TIER_MAP = {"hr": "wy", "gr": "wy", "gn": "wy", "dz": "fe", "es": "fe"}
85 COUNTRIES = {
86 "Canada": "CA",
87 "Turkey": "TR",
88 "United States": "US",
89 "Mexico": "MX",
90 "Honduras": "HN",
91 "Chile": "CL",
92 "Colombia": "CO",
93 }
94 HEADERS = {
95 "Host": "www.wyndhamhotels.com",
96 "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:81.0) Gecko/20100101 Firefox/81.0",
97 "Accept": "*/*",
98 "Accept-Language": "en-US,en;q=0.5",
99 "Accept-Encoding": "gzip, deflate, br",
100 "X-Requested-With": "XMLHttpRequest",
101 "DNT": "1",
102 "Connection": "keep-alive",
103 "Referer": "https://www.wyndhamhotels.com/locations",
104 "TE": "Trailers",
105 }
106
107
108
109 def create_url(brand, city, state, unique_url, tier_id):
110 # In the html, there is a script which this recreates
111 brand_name = BRAND_MAP.get(brand)
112 if not brand_name:
113 brand_name = BRAND_MAP.get(tier_id.lower())
114 url = brand_name + "/"
115 url += city.replace(" ", "-").replace(".","").lower()
116 state_name = (
117 f"-{state.replace(' ','-').lower()}"
118 if state.lower() != "other than us/canada"
119 else ""
120 )
121 url += state_name + "/"
122 url += unique_url.lower() + "/"
123 url += "overview"
124 return url
125
126
127 class WyndhamSpider(scrapy.Spider):
128 name = "wyndham"
129 allowed_domains = ["www.wyndhamhotels.com"]
130 headers = HEADERS
131
132 def start_requests(self):
133 start_url = "https://www.wyndhamhotels.com/BWSServices/services/search/properties?recordsPerPage=&pageNumber=1&brandId=ALL&countryCode=&noPropertyData=false"
134 yield scrapy.Request(start_url, callback=self.parse, headers=self.headers)
135
136 def parse(self, response):
137 data = json.loads(response.text)
138 for country in data["countries"]:
139 country_code = country["countryCode"]
140 for state in country["states"]:
141 state_name = state["stateName"]
142 for city in state["cities"]:
143 city_name = city["cityName"]
144 for property_ in city["propertyList"]:
145 property_id = property_["propertyId"]
146 brand_id = property_["brandId"]
147 brand_name = property_["brand"]
148 brand_tier = property_["tierId"]
149 unique_url = property_["uniqueUrl"]
150 url = create_url(
151 brand_id, city_name, state_name, unique_url, brand_tier
152 )
153 yield scrapy.Request(
154 f"https://{self.allowed_domains[0]}/{url}",
155 self.parse_property,
156 meta={
157 "id": property_id,
158 "country_code": country_code,
159 "brand_name": brand_name,
160 },
161 headers=HEADERS,
162 )
163
164 def parse_property(self, response):
165 raw_json = re.search(
166 r'<script type="application\/ld\+json"\>(.+?)\<',
167 response.text,
168 flags=re.DOTALL,
169 )
170 if not raw_json:
171 return None
172 data = json.loads(raw_json.group(1).replace("\t"," "))
173 properties = {
174 "ref": response.meta["id"],
175 "lat": data["geo"]["latitude"],
176 "lon": data["geo"]["longitude"],
177 "name": data["name"],
178 "addr_full": data["address"]["streetAddress"],
179 "city": data["address"]["addressLocality"],
180 "state": data["address"].get("addressRegion"),
181 "postcode": data["address"].get("postalCode"),
182 "country": response.meta["country_code"],
183 "phone": data["telephone"],
184 "website": response.url,
185 "brand": response.meta["brand_name"],
186 }
187 yield GeojsonPointItem(**properties)
188
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/locations/spiders/wyndham.py b/locations/spiders/wyndham.py
--- a/locations/spiders/wyndham.py
+++ b/locations/spiders/wyndham.py
@@ -3,6 +3,7 @@
import re
import scrapy
from locations.items import GeojsonPointItem
+from urllib.parse import urlparse, parse_qs
BRAND_MAP = {
"hj": "hojo",
@@ -91,19 +92,6 @@
"Chile": "CL",
"Colombia": "CO",
}
-HEADERS = {
- "Host": "www.wyndhamhotels.com",
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:81.0) Gecko/20100101 Firefox/81.0",
- "Accept": "*/*",
- "Accept-Language": "en-US,en;q=0.5",
- "Accept-Encoding": "gzip, deflate, br",
- "X-Requested-With": "XMLHttpRequest",
- "DNT": "1",
- "Connection": "keep-alive",
- "Referer": "https://www.wyndhamhotels.com/locations",
- "TE": "Trailers",
-}
-
def create_url(brand, city, state, unique_url, tier_id):
@@ -126,15 +114,25 @@
class WyndhamSpider(scrapy.Spider):
name = "wyndham"
+ download_speed = 0.7
allowed_domains = ["www.wyndhamhotels.com"]
- headers = HEADERS
-
- def start_requests(self):
- start_url = "https://www.wyndhamhotels.com/BWSServices/services/search/properties?recordsPerPage=&pageNumber=1&brandId=ALL&countryCode=&noPropertyData=false"
- yield scrapy.Request(start_url, callback=self.parse, headers=self.headers)
+ start_urls = (
+ 'https://www.wyndhamhotels.com/BWSServices/services/search/properties?recordsPerPage=50&pageNumber=1&brandId=ALL&countryCode=US%2CCA%2CMX',
+ )
def parse(self, response):
data = json.loads(response.text)
+
+ page_count = data.get('pageCount')
+ parsed_url = urlparse(response.request.url)
+ parsed_args = parse_qs(parsed_url.query)
+ page_number = int(parsed_args['pageNumber'][0])
+ if page_number <= page_count:
+ next_page_number = page_number + 1
+ yield scrapy.Request(
+ f'https://www.wyndhamhotels.com/BWSServices/services/search/properties?recordsPerPage=50&pageNumber={next_page_number}&brandId=ALL&countryCode=US%2CCA%2CMX',
+ )
+
for country in data["countries"]:
country_code = country["countryCode"]
for state in country["states"]:
@@ -158,7 +156,6 @@
"country_code": country_code,
"brand_name": brand_name,
},
- headers=HEADERS,
)
def parse_property(self, response):
|
{"golden_diff": "diff --git a/locations/spiders/wyndham.py b/locations/spiders/wyndham.py\n--- a/locations/spiders/wyndham.py\n+++ b/locations/spiders/wyndham.py\n@@ -3,6 +3,7 @@\n import re\n import scrapy\n from locations.items import GeojsonPointItem\n+from urllib.parse import urlparse, parse_qs\n \n BRAND_MAP = {\n \"hj\": \"hojo\",\n@@ -91,19 +92,6 @@\n \"Chile\": \"CL\",\n \"Colombia\": \"CO\",\n }\n-HEADERS = {\n- \"Host\": \"www.wyndhamhotels.com\",\n- \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:81.0) Gecko/20100101 Firefox/81.0\",\n- \"Accept\": \"*/*\",\n- \"Accept-Language\": \"en-US,en;q=0.5\",\n- \"Accept-Encoding\": \"gzip, deflate, br\",\n- \"X-Requested-With\": \"XMLHttpRequest\",\n- \"DNT\": \"1\",\n- \"Connection\": \"keep-alive\",\n- \"Referer\": \"https://www.wyndhamhotels.com/locations\",\n- \"TE\": \"Trailers\",\n-}\n-\n \n \n def create_url(brand, city, state, unique_url, tier_id):\n@@ -126,15 +114,25 @@\n \n class WyndhamSpider(scrapy.Spider):\n name = \"wyndham\"\n+ download_speed = 0.7\n allowed_domains = [\"www.wyndhamhotels.com\"]\n- headers = HEADERS\n-\n- def start_requests(self):\n- start_url = \"https://www.wyndhamhotels.com/BWSServices/services/search/properties?recordsPerPage=&pageNumber=1&brandId=ALL&countryCode=&noPropertyData=false\"\n- yield scrapy.Request(start_url, callback=self.parse, headers=self.headers)\n+ start_urls = (\n+ 'https://www.wyndhamhotels.com/BWSServices/services/search/properties?recordsPerPage=50&pageNumber=1&brandId=ALL&countryCode=US%2CCA%2CMX',\n+ )\n \n def parse(self, response):\n data = json.loads(response.text)\n+\n+ page_count = data.get('pageCount')\n+ parsed_url = urlparse(response.request.url)\n+ parsed_args = parse_qs(parsed_url.query)\n+ page_number = int(parsed_args['pageNumber'][0])\n+ if page_number <= page_count:\n+ next_page_number = page_number + 1\n+ yield scrapy.Request(\n+ f'https://www.wyndhamhotels.com/BWSServices/services/search/properties?recordsPerPage=50&pageNumber={next_page_number}&brandId=ALL&countryCode=US%2CCA%2CMX',\n+ )\n+\n for country in data[\"countries\"]:\n country_code = country[\"countryCode\"]\n for state in country[\"states\"]:\n@@ -158,7 +156,6 @@\n \"country_code\": country_code,\n \"brand_name\": brand_name,\n },\n- headers=HEADERS,\n )\n \n def parse_property(self, response):\n", "issue": "Spider wyndham is broken\nDuring the global build at 2021-10-20-14-42-48, spider **wyndham** failed with **116 features** and **1 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/logs/wyndham.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/wyndham.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/wyndham.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport re\nimport scrapy\nfrom locations.items import GeojsonPointItem\n\nBRAND_MAP = {\n \"hj\": \"hojo\",\n \"HJ\": \"hojo\",\n \"hojo\": \"HJ\",\n \"lq\": \"laquinta\",\n \"LQ\": \"laquinta\",\n \"laquinta\": \"LQ\",\n \"di\": \"days-inn\",\n \"DI\": \"days-inn\",\n \"days-inn\": \"DI\",\n \"bh\": \"hawthorn-extended-stay\",\n \"BH\": \"hawthorn-extended-stay\",\n \"hawthorn-extended-stay\": \"BH\",\n \"hr\": \"wyndham\",\n \"HR\": \"wyndham\",\n \"wyndham\": \"HR\",\n \"wg\": \"wingate\",\n \"WG\": \"wingate\",\n \"wingate\": \"WG\",\n \"se\": \"super-8\",\n \"SE\": \"super-8\",\n \"super-8\": \"SE\",\n \"bu\": \"baymont\",\n \"BU\": \"baymont\",\n \"baymont\": \"BU\",\n \"dx\": \"dolce\",\n \"DX\": \"dolce\",\n \"dolce\": \"DX\",\n \"dz\": \"dazzler\",\n \"DZ\": \"dazzler\",\n \"dazzler\": \"DZ\",\n \"wr\": \"wyndham-rewards\",\n \"WR\": \"wyndham-rewards\",\n \"wyndham-rewards\": \"WR\",\n \"kg\": \"knights-inn\",\n \"KG\": \"knights-inn\",\n \"knights-inn\": \"KG\",\n \"wt\": \"tryp\",\n \"WT\": \"tryp\",\n \"tryp\": \"WT\",\n \"aa\": \"americinn\",\n \"AA\": \"americinn\",\n \"americinn\": \"AA\",\n \"all\": \"wyndham-hotel-group\",\n \"ALL\": \"wyndham-hotel-group\",\n \"wyndham-hotel-group\": \"ALL\",\n \"ce\": \"caesars-entertainment\",\n \"CE\": \"caesars-entertainment\",\n \"caesars-entertainment\": \"CE\",\n \"mt\": \"microtel\",\n \"MT\": \"microtel\",\n \"microtel\": \"MT\",\n \"gn\": \"wyndham-garden\",\n \"GN\": \"wyndham-garden\",\n \"wyndham-garden\": \"GN\",\n \"gr\": \"wyndham-grand\",\n \"GR\": \"wyndham-grand\",\n \"wyndham-grand\": \"GR\",\n \"es\": \"esplendor\",\n \"ES\": \"esplendor\",\n \"esplendor\": \"ES\",\n \"ra\": \"ramada\",\n \"RA\": \"ramada\",\n \"ramada\": \"RA\",\n \"re\": \"registry-collection\",\n \"RE\": \"registry-collection\",\n \"registry-collection\": \"RE\",\n \"tl\": \"travelodge\",\n \"TL\": \"travelodge\",\n \"travelodge\": \"TL\",\n \"vo\": \"wyndham-vacations\",\n \"VO\": \"wyndham-vacations\",\n \"wyndham-vacations\": \"VO\",\n \"tq\": \"trademark\",\n \"TQ\": \"trademark\",\n \"trademark\": \"TQ\",\n}\nBRAND_TIER_MAP = {\"hr\": \"wy\", \"gr\": \"wy\", \"gn\": \"wy\", \"dz\": \"fe\", \"es\": \"fe\"}\nCOUNTRIES = {\n \"Canada\": \"CA\",\n \"Turkey\": \"TR\",\n \"United States\": \"US\",\n \"Mexico\": \"MX\",\n \"Honduras\": \"HN\",\n \"Chile\": \"CL\",\n \"Colombia\": \"CO\",\n}\nHEADERS = {\n \"Host\": \"www.wyndhamhotels.com\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:81.0) Gecko/20100101 Firefox/81.0\",\n \"Accept\": \"*/*\",\n \"Accept-Language\": \"en-US,en;q=0.5\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n \"DNT\": \"1\",\n \"Connection\": \"keep-alive\",\n \"Referer\": \"https://www.wyndhamhotels.com/locations\",\n \"TE\": \"Trailers\",\n}\n\n\n\ndef create_url(brand, city, state, unique_url, tier_id):\n # In the html, there is a script which this recreates\n brand_name = BRAND_MAP.get(brand)\n if not brand_name:\n brand_name = BRAND_MAP.get(tier_id.lower())\n url = brand_name + \"/\"\n url += city.replace(\" \", \"-\").replace(\".\",\"\").lower()\n state_name = (\n f\"-{state.replace(' ','-').lower()}\"\n if state.lower() != \"other than us/canada\"\n else \"\"\n )\n url += state_name + \"/\"\n url += unique_url.lower() + \"/\"\n url += \"overview\"\n return url\n\n\nclass WyndhamSpider(scrapy.Spider):\n name = \"wyndham\"\n allowed_domains = [\"www.wyndhamhotels.com\"]\n headers = HEADERS\n\n def start_requests(self):\n start_url = \"https://www.wyndhamhotels.com/BWSServices/services/search/properties?recordsPerPage=&pageNumber=1&brandId=ALL&countryCode=&noPropertyData=false\"\n yield scrapy.Request(start_url, callback=self.parse, headers=self.headers)\n\n def parse(self, response):\n data = json.loads(response.text)\n for country in data[\"countries\"]:\n country_code = country[\"countryCode\"]\n for state in country[\"states\"]:\n state_name = state[\"stateName\"]\n for city in state[\"cities\"]:\n city_name = city[\"cityName\"]\n for property_ in city[\"propertyList\"]:\n property_id = property_[\"propertyId\"]\n brand_id = property_[\"brandId\"]\n brand_name = property_[\"brand\"]\n brand_tier = property_[\"tierId\"]\n unique_url = property_[\"uniqueUrl\"]\n url = create_url(\n brand_id, city_name, state_name, unique_url, brand_tier\n )\n yield scrapy.Request(\n f\"https://{self.allowed_domains[0]}/{url}\",\n self.parse_property,\n meta={\n \"id\": property_id,\n \"country_code\": country_code,\n \"brand_name\": brand_name,\n },\n headers=HEADERS,\n )\n\n def parse_property(self, response):\n raw_json = re.search(\n r'<script type=\"application\\/ld\\+json\"\\>(.+?)\\<',\n response.text,\n flags=re.DOTALL,\n )\n if not raw_json:\n return None\n data = json.loads(raw_json.group(1).replace(\"\\t\",\" \"))\n properties = {\n \"ref\": response.meta[\"id\"],\n \"lat\": data[\"geo\"][\"latitude\"],\n \"lon\": data[\"geo\"][\"longitude\"],\n \"name\": data[\"name\"],\n \"addr_full\": data[\"address\"][\"streetAddress\"],\n \"city\": data[\"address\"][\"addressLocality\"],\n \"state\": data[\"address\"].get(\"addressRegion\"),\n \"postcode\": data[\"address\"].get(\"postalCode\"),\n \"country\": response.meta[\"country_code\"],\n \"phone\": data[\"telephone\"],\n \"website\": response.url,\n \"brand\": response.meta[\"brand_name\"],\n }\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/wyndham.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport re\nimport scrapy\nfrom locations.items import GeojsonPointItem\nfrom urllib.parse import urlparse, parse_qs\n\nBRAND_MAP = {\n \"hj\": \"hojo\",\n \"HJ\": \"hojo\",\n \"hojo\": \"HJ\",\n \"lq\": \"laquinta\",\n \"LQ\": \"laquinta\",\n \"laquinta\": \"LQ\",\n \"di\": \"days-inn\",\n \"DI\": \"days-inn\",\n \"days-inn\": \"DI\",\n \"bh\": \"hawthorn-extended-stay\",\n \"BH\": \"hawthorn-extended-stay\",\n \"hawthorn-extended-stay\": \"BH\",\n \"hr\": \"wyndham\",\n \"HR\": \"wyndham\",\n \"wyndham\": \"HR\",\n \"wg\": \"wingate\",\n \"WG\": \"wingate\",\n \"wingate\": \"WG\",\n \"se\": \"super-8\",\n \"SE\": \"super-8\",\n \"super-8\": \"SE\",\n \"bu\": \"baymont\",\n \"BU\": \"baymont\",\n \"baymont\": \"BU\",\n \"dx\": \"dolce\",\n \"DX\": \"dolce\",\n \"dolce\": \"DX\",\n \"dz\": \"dazzler\",\n \"DZ\": \"dazzler\",\n \"dazzler\": \"DZ\",\n \"wr\": \"wyndham-rewards\",\n \"WR\": \"wyndham-rewards\",\n \"wyndham-rewards\": \"WR\",\n \"kg\": \"knights-inn\",\n \"KG\": \"knights-inn\",\n \"knights-inn\": \"KG\",\n \"wt\": \"tryp\",\n \"WT\": \"tryp\",\n \"tryp\": \"WT\",\n \"aa\": \"americinn\",\n \"AA\": \"americinn\",\n \"americinn\": \"AA\",\n \"all\": \"wyndham-hotel-group\",\n \"ALL\": \"wyndham-hotel-group\",\n \"wyndham-hotel-group\": \"ALL\",\n \"ce\": \"caesars-entertainment\",\n \"CE\": \"caesars-entertainment\",\n \"caesars-entertainment\": \"CE\",\n \"mt\": \"microtel\",\n \"MT\": \"microtel\",\n \"microtel\": \"MT\",\n \"gn\": \"wyndham-garden\",\n \"GN\": \"wyndham-garden\",\n \"wyndham-garden\": \"GN\",\n \"gr\": \"wyndham-grand\",\n \"GR\": \"wyndham-grand\",\n \"wyndham-grand\": \"GR\",\n \"es\": \"esplendor\",\n \"ES\": \"esplendor\",\n \"esplendor\": \"ES\",\n \"ra\": \"ramada\",\n \"RA\": \"ramada\",\n \"ramada\": \"RA\",\n \"re\": \"registry-collection\",\n \"RE\": \"registry-collection\",\n \"registry-collection\": \"RE\",\n \"tl\": \"travelodge\",\n \"TL\": \"travelodge\",\n \"travelodge\": \"TL\",\n \"vo\": \"wyndham-vacations\",\n \"VO\": \"wyndham-vacations\",\n \"wyndham-vacations\": \"VO\",\n \"tq\": \"trademark\",\n \"TQ\": \"trademark\",\n \"trademark\": \"TQ\",\n}\nBRAND_TIER_MAP = {\"hr\": \"wy\", \"gr\": \"wy\", \"gn\": \"wy\", \"dz\": \"fe\", \"es\": \"fe\"}\nCOUNTRIES = {\n \"Canada\": \"CA\",\n \"Turkey\": \"TR\",\n \"United States\": \"US\",\n \"Mexico\": \"MX\",\n \"Honduras\": \"HN\",\n \"Chile\": \"CL\",\n \"Colombia\": \"CO\",\n}\n\n\ndef create_url(brand, city, state, unique_url, tier_id):\n # In the html, there is a script which this recreates\n brand_name = BRAND_MAP.get(brand)\n if not brand_name:\n brand_name = BRAND_MAP.get(tier_id.lower())\n url = brand_name + \"/\"\n url += city.replace(\" \", \"-\").replace(\".\",\"\").lower()\n state_name = (\n f\"-{state.replace(' ','-').lower()}\"\n if state.lower() != \"other than us/canada\"\n else \"\"\n )\n url += state_name + \"/\"\n url += unique_url.lower() + \"/\"\n url += \"overview\"\n return url\n\n\nclass WyndhamSpider(scrapy.Spider):\n name = \"wyndham\"\n download_speed = 0.7\n allowed_domains = [\"www.wyndhamhotels.com\"]\n start_urls = (\n 'https://www.wyndhamhotels.com/BWSServices/services/search/properties?recordsPerPage=50&pageNumber=1&brandId=ALL&countryCode=US%2CCA%2CMX',\n )\n\n def parse(self, response):\n data = json.loads(response.text)\n\n page_count = data.get('pageCount')\n parsed_url = urlparse(response.request.url)\n parsed_args = parse_qs(parsed_url.query)\n page_number = int(parsed_args['pageNumber'][0])\n if page_number <= page_count:\n next_page_number = page_number + 1\n yield scrapy.Request(\n f'https://www.wyndhamhotels.com/BWSServices/services/search/properties?recordsPerPage=50&pageNumber={next_page_number}&brandId=ALL&countryCode=US%2CCA%2CMX',\n )\n\n for country in data[\"countries\"]:\n country_code = country[\"countryCode\"]\n for state in country[\"states\"]:\n state_name = state[\"stateName\"]\n for city in state[\"cities\"]:\n city_name = city[\"cityName\"]\n for property_ in city[\"propertyList\"]:\n property_id = property_[\"propertyId\"]\n brand_id = property_[\"brandId\"]\n brand_name = property_[\"brand\"]\n brand_tier = property_[\"tierId\"]\n unique_url = property_[\"uniqueUrl\"]\n url = create_url(\n brand_id, city_name, state_name, unique_url, brand_tier\n )\n yield scrapy.Request(\n f\"https://{self.allowed_domains[0]}/{url}\",\n self.parse_property,\n meta={\n \"id\": property_id,\n \"country_code\": country_code,\n \"brand_name\": brand_name,\n },\n )\n\n def parse_property(self, response):\n raw_json = re.search(\n r'<script type=\"application\\/ld\\+json\"\\>(.+?)\\<',\n response.text,\n flags=re.DOTALL,\n )\n if not raw_json:\n return None\n data = json.loads(raw_json.group(1).replace(\"\\t\",\" \"))\n properties = {\n \"ref\": response.meta[\"id\"],\n \"lat\": data[\"geo\"][\"latitude\"],\n \"lon\": data[\"geo\"][\"longitude\"],\n \"name\": data[\"name\"],\n \"addr_full\": data[\"address\"][\"streetAddress\"],\n \"city\": data[\"address\"][\"addressLocality\"],\n \"state\": data[\"address\"].get(\"addressRegion\"),\n \"postcode\": data[\"address\"].get(\"postalCode\"),\n \"country\": response.meta[\"country_code\"],\n \"phone\": data[\"telephone\"],\n \"website\": response.url,\n \"brand\": response.meta[\"brand_name\"],\n }\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/wyndham.py"}]}
| 2,588 | 728 |
gh_patches_debug_20777
|
rasdani/github-patches
|
git_diff
|
kserve__kserve-3089
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Kserve ModelServer().start(list_of_models) fails on Windows
/kind bug
**What steps did you take and what happened:**
When ModelServer().start(list_of_models) is called, exception is raised at this location: https://github.com/kserve/kserve/blob/0ba642f9b6fa40b38fe272d04c8dfde3598bfb05/python/kserve/kserve/model_server.py#L154
**What did you expect to happen:**
This simple workaround makes it possible to run the code on Windows:
```
# SIGQUIT is not available on Windows
try:
sig_list = [signal.SIGINT, signal.SIGTERM, signal.SIGQUIT]
except:
sig_list = [signal.SIGINT, signal.SIGTERM]
# add_signal_handler may not have been implemented
try:
for sig in sig_list:
loop.add_signal_handler(
sig, lambda s=sig: asyncio.create_task(self.stop(sig=s))
)
except:
pass
```
Even though Windows-based containers are very unlikely to be used for deploying KServe inference services, the ability to run kserve-based code locally on Windows-based machines for development/debugging purposes often comes very handy. I suggest fixing the Windows incompatibility issue in the next release of the kserve Python package.
**Environment:**
- OS (e.g. from `/etc/os-release`): Windows 10
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/kserve/kserve/model_server.py`
Content:
```
1 # Copyright 2022 The KServe Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import argparse
16 import asyncio
17 import concurrent.futures
18 import multiprocessing
19 import signal
20 import socket
21 from multiprocessing import Process
22 from typing import Dict, List, Optional, Union
23
24 from ray import serve as rayserve
25 from ray.serve.api import Deployment
26 from ray.serve.handle import RayServeHandle
27
28 from .logging import KSERVE_LOG_CONFIG, logger
29 from .model import Model
30 from .model_repository import ModelRepository
31 from .protocol.dataplane import DataPlane
32 from .protocol.grpc.server import GRPCServer
33 from .protocol.model_repository_extension import ModelRepositoryExtension
34 from .protocol.rest.server import UvicornServer
35 from .utils import utils
36
37 DEFAULT_HTTP_PORT = 8080
38 DEFAULT_GRPC_PORT = 8081
39
40 parser = argparse.ArgumentParser(add_help=False)
41 parser.add_argument("--http_port", default=DEFAULT_HTTP_PORT, type=int,
42 help="The HTTP Port listened to by the model server.")
43 parser.add_argument("--grpc_port", default=DEFAULT_GRPC_PORT, type=int,
44 help="The GRPC Port listened to by the model server.")
45 parser.add_argument("--workers", default=1, type=int,
46 help="The number of workers for multi-processing.")
47 parser.add_argument("--max_threads", default=4, type=int,
48 help="The number of max processing threads in each worker.")
49 parser.add_argument('--max_asyncio_workers', default=None, type=int,
50 help='Max number of asyncio workers to spawn')
51 parser.add_argument("--enable_grpc", default=True, type=lambda x: utils.strtobool(x),
52 help="Enable gRPC for the model server")
53 parser.add_argument("--enable_docs_url", default=False, type=lambda x: utils.strtobool(x),
54 help="Enable docs url '/docs' to display Swagger UI.")
55 parser.add_argument("--enable_latency_logging", default=True, type=lambda x: utils.strtobool(x),
56 help="Output a log per request with latency metrics.")
57 parser.add_argument("--configure_logging", default=True, type=lambda x: utils.strtobool(x),
58 help="Whether to configure KServe and Uvicorn logging")
59 parser.add_argument("--log_config_file", default=None, type=str,
60 help="File path containing UvicornServer's log config. Needs to be a yaml or json file.")
61 parser.add_argument("--access_log_format", default=None, type=str,
62 help="Format to set for the access log (provided by asgi-logger).")
63
64 args, _ = parser.parse_known_args()
65
66
67 class ModelServer:
68 """KServe ModelServer
69
70 Args:
71 http_port (int): HTTP port. Default: ``8080``.
72 grpc_port (int): GRPC port. Default: ``8081``.
73 workers (int): Number of workers for uvicorn. Default: ``1``.
74 max_threads (int): Max number of processing threads. Default: ``4``
75 max_asyncio_workers (int): Max number of AsyncIO threads. Default: ``None``
76 registered_models (ModelRepository): Model repository with registered models.
77 enable_grpc (bool): Whether to turn on grpc server. Default: ``True``
78 enable_docs_url (bool): Whether to turn on ``/docs`` Swagger UI. Default: ``False``.
79 enable_latency_logging (bool): Whether to log latency metric. Default: ``True``.
80 configure_logging (bool): Whether to configure KServe and Uvicorn logging. Default: ``True``.
81 log_config (dict or str): File path or dict containing log config. Default: ``None``.
82 access_log_format (string): Format to set for the access log (provided by asgi-logger). Default: ``None``
83 """
84
85 def __init__(self, http_port: int = args.http_port,
86 grpc_port: int = args.grpc_port,
87 workers: int = args.workers,
88 max_threads: int = args.max_threads,
89 max_asyncio_workers: int = args.max_asyncio_workers,
90 registered_models: ModelRepository = ModelRepository(),
91 enable_grpc: bool = args.enable_grpc,
92 enable_docs_url: bool = args.enable_docs_url,
93 enable_latency_logging: bool = args.enable_latency_logging,
94 configure_logging: bool = args.configure_logging,
95 log_config: Optional[Union[Dict, str]] = args.log_config_file,
96 access_log_format: str = args.access_log_format):
97 self.registered_models = registered_models
98 self.http_port = http_port
99 self.grpc_port = grpc_port
100 self.workers = workers
101 self.max_threads = max_threads
102 self.max_asyncio_workers = max_asyncio_workers
103 self.enable_grpc = enable_grpc
104 self.enable_docs_url = enable_docs_url
105 self.enable_latency_logging = enable_latency_logging
106 self.dataplane = DataPlane(model_registry=registered_models)
107 self.model_repository_extension = ModelRepositoryExtension(
108 model_registry=self.registered_models)
109 self._grpc_server = None
110 if self.enable_grpc:
111 self._grpc_server = GRPCServer(grpc_port, self.dataplane,
112 self.model_repository_extension)
113
114 # Logs can be passed as a path to a file or a dictConfig.
115 # We rely on Uvicorn to configure the loggers for us.
116 if configure_logging:
117 self.log_config = log_config if log_config is not None else KSERVE_LOG_CONFIG
118 else:
119 # By setting log_config to None we tell Uvicorn not to configure logging
120 self.log_config = None
121
122 self.access_log_format = access_log_format
123
124 def start(self, models: Union[List[Model], Dict[str, Deployment]]) -> None:
125 if isinstance(models, list):
126 for model in models:
127 if isinstance(model, Model):
128 self.register_model(model)
129 # pass whether to log request latency into the model
130 model.enable_latency_logging = self.enable_latency_logging
131 else:
132 raise RuntimeError("Model type should be 'Model'")
133 elif isinstance(models, dict):
134 if all([isinstance(v, Deployment) for v in models.values()]):
135 # TODO: make this port number a variable
136 rayserve.start(detached=True, http_options={"host": "0.0.0.0", "port": 9071})
137 for key in models:
138 models[key].deploy()
139 handle = models[key].get_handle()
140 self.register_model_handle(key, handle)
141 else:
142 raise RuntimeError("Model type should be RayServe Deployment")
143 else:
144 raise RuntimeError("Unknown model collection types")
145
146 if self.max_asyncio_workers is None:
147 # formula as suggest in https://bugs.python.org/issue35279
148 self.max_asyncio_workers = min(32, utils.cpu_count() + 4)
149 logger.info(f"Setting max asyncio worker threads as {self.max_asyncio_workers}")
150 asyncio.get_event_loop().set_default_executor(
151 concurrent.futures.ThreadPoolExecutor(max_workers=self.max_asyncio_workers))
152
153 async def serve():
154 logger.info(f"Starting uvicorn with {self.workers} workers")
155 loop = asyncio.get_event_loop()
156 for sig in [signal.SIGINT, signal.SIGTERM, signal.SIGQUIT]:
157 loop.add_signal_handler(
158 sig, lambda s=sig: asyncio.create_task(self.stop(sig=s))
159 )
160 if self.workers == 1:
161 self._rest_server = UvicornServer(self.http_port, [],
162 self.dataplane, self.model_repository_extension,
163 self.enable_docs_url,
164 log_config=self.log_config,
165 access_log_format=self.access_log_format)
166 await self._rest_server.run()
167 else:
168 # Since py38 MacOS/Windows defaults to use spawn for starting multiprocessing.
169 # https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
170 # Spawn does not work with FastAPI/uvicorn in multiprocessing mode, use fork for multiprocessing
171 # https://github.com/tiangolo/fastapi/issues/1586
172 serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
173 serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
174 serversocket.bind(('0.0.0.0', self.http_port))
175 serversocket.listen(5)
176 multiprocessing.set_start_method('fork')
177 server = UvicornServer(self.http_port, [serversocket],
178 self.dataplane, self.model_repository_extension,
179 self.enable_docs_url, log_config=self.log_config,
180 access_log_format=self.access_log_format)
181 for _ in range(self.workers):
182 p = Process(target=server.run_sync)
183 p.start()
184
185 async def servers_task():
186 servers = [serve()]
187 if self.enable_grpc:
188 servers.append(self._grpc_server.start(self.max_threads))
189 await asyncio.gather(*servers)
190
191 asyncio.run(servers_task())
192
193 async def stop(self, sig: Optional[int] = None):
194 logger.info("Stopping the model server")
195 if self._rest_server:
196 logger.info("Stopping the rest server")
197 await self._rest_server.stop()
198 if self._grpc_server:
199 logger.info("Stopping the grpc server")
200 await self._grpc_server.stop(sig)
201
202 def register_model_handle(self, name: str, model_handle: RayServeHandle):
203 self.registered_models.update_handle(name, model_handle)
204 logger.info("Registering model handle: %s", name)
205
206 def register_model(self, model: Model):
207 if not model.name:
208 raise Exception(
209 "Failed to register model, model.name must be provided.")
210 self.registered_models.update(model)
211 logger.info("Registering model: %s", model.name)
212
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/python/kserve/kserve/model_server.py b/python/kserve/kserve/model_server.py
--- a/python/kserve/kserve/model_server.py
+++ b/python/kserve/kserve/model_server.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import sys
import argparse
import asyncio
import concurrent.futures
@@ -153,7 +154,12 @@
async def serve():
logger.info(f"Starting uvicorn with {self.workers} workers")
loop = asyncio.get_event_loop()
- for sig in [signal.SIGINT, signal.SIGTERM, signal.SIGQUIT]:
+ if sys.platform not in ['win32', 'win64']:
+ sig_list = [signal.SIGINT, signal.SIGTERM, signal.SIGQUIT]
+ else:
+ sig_list = [signal.SIGINT, signal.SIGTERM]
+
+ for sig in sig_list:
loop.add_signal_handler(
sig, lambda s=sig: asyncio.create_task(self.stop(sig=s))
)
|
{"golden_diff": "diff --git a/python/kserve/kserve/model_server.py b/python/kserve/kserve/model_server.py\n--- a/python/kserve/kserve/model_server.py\n+++ b/python/kserve/kserve/model_server.py\n@@ -12,6 +12,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import sys\n import argparse\n import asyncio\n import concurrent.futures\n@@ -153,7 +154,12 @@\n async def serve():\n logger.info(f\"Starting uvicorn with {self.workers} workers\")\n loop = asyncio.get_event_loop()\n- for sig in [signal.SIGINT, signal.SIGTERM, signal.SIGQUIT]:\n+ if sys.platform not in ['win32', 'win64']:\n+ sig_list = [signal.SIGINT, signal.SIGTERM, signal.SIGQUIT]\n+ else:\n+ sig_list = [signal.SIGINT, signal.SIGTERM]\n+\n+ for sig in sig_list:\n loop.add_signal_handler(\n sig, lambda s=sig: asyncio.create_task(self.stop(sig=s))\n )\n", "issue": "Kserve ModelServer().start(list_of_models) fails on Windows\n/kind bug\r\n\r\n**What steps did you take and what happened:**\r\nWhen ModelServer().start(list_of_models) is called, exception is raised at this location: https://github.com/kserve/kserve/blob/0ba642f9b6fa40b38fe272d04c8dfde3598bfb05/python/kserve/kserve/model_server.py#L154\r\n\r\n**What did you expect to happen:**\r\n\r\nThis simple workaround makes it possible to run the code on Windows:\r\n```\r\n # SIGQUIT is not available on Windows\r\n try:\r\n sig_list = [signal.SIGINT, signal.SIGTERM, signal.SIGQUIT]\r\n except:\r\n sig_list = [signal.SIGINT, signal.SIGTERM]\r\n\r\n # add_signal_handler may not have been implemented\r\n try:\r\n for sig in sig_list:\r\n loop.add_signal_handler(\r\n sig, lambda s=sig: asyncio.create_task(self.stop(sig=s))\r\n )\r\n except:\r\n pass\r\n```\r\n\r\nEven though Windows-based containers are very unlikely to be used for deploying KServe inference services, the ability to run kserve-based code locally on Windows-based machines for development/debugging purposes often comes very handy. I suggest fixing the Windows incompatibility issue in the next release of the kserve Python package.\r\n\r\n**Environment:**\r\n\r\n- OS (e.g. from `/etc/os-release`): Windows 10\r\n\n", "before_files": [{"content": "# Copyright 2022 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport asyncio\nimport concurrent.futures\nimport multiprocessing\nimport signal\nimport socket\nfrom multiprocessing import Process\nfrom typing import Dict, List, Optional, Union\n\nfrom ray import serve as rayserve\nfrom ray.serve.api import Deployment\nfrom ray.serve.handle import RayServeHandle\n\nfrom .logging import KSERVE_LOG_CONFIG, logger\nfrom .model import Model\nfrom .model_repository import ModelRepository\nfrom .protocol.dataplane import DataPlane\nfrom .protocol.grpc.server import GRPCServer\nfrom .protocol.model_repository_extension import ModelRepositoryExtension\nfrom .protocol.rest.server import UvicornServer\nfrom .utils import utils\n\nDEFAULT_HTTP_PORT = 8080\nDEFAULT_GRPC_PORT = 8081\n\nparser = argparse.ArgumentParser(add_help=False)\nparser.add_argument(\"--http_port\", default=DEFAULT_HTTP_PORT, type=int,\n help=\"The HTTP Port listened to by the model server.\")\nparser.add_argument(\"--grpc_port\", default=DEFAULT_GRPC_PORT, type=int,\n help=\"The GRPC Port listened to by the model server.\")\nparser.add_argument(\"--workers\", default=1, type=int,\n help=\"The number of workers for multi-processing.\")\nparser.add_argument(\"--max_threads\", default=4, type=int,\n help=\"The number of max processing threads in each worker.\")\nparser.add_argument('--max_asyncio_workers', default=None, type=int,\n help='Max number of asyncio workers to spawn')\nparser.add_argument(\"--enable_grpc\", default=True, type=lambda x: utils.strtobool(x),\n help=\"Enable gRPC for the model server\")\nparser.add_argument(\"--enable_docs_url\", default=False, type=lambda x: utils.strtobool(x),\n help=\"Enable docs url '/docs' to display Swagger UI.\")\nparser.add_argument(\"--enable_latency_logging\", default=True, type=lambda x: utils.strtobool(x),\n help=\"Output a log per request with latency metrics.\")\nparser.add_argument(\"--configure_logging\", default=True, type=lambda x: utils.strtobool(x),\n help=\"Whether to configure KServe and Uvicorn logging\")\nparser.add_argument(\"--log_config_file\", default=None, type=str,\n help=\"File path containing UvicornServer's log config. Needs to be a yaml or json file.\")\nparser.add_argument(\"--access_log_format\", default=None, type=str,\n help=\"Format to set for the access log (provided by asgi-logger).\")\n\nargs, _ = parser.parse_known_args()\n\n\nclass ModelServer:\n \"\"\"KServe ModelServer\n\n Args:\n http_port (int): HTTP port. Default: ``8080``.\n grpc_port (int): GRPC port. Default: ``8081``.\n workers (int): Number of workers for uvicorn. Default: ``1``.\n max_threads (int): Max number of processing threads. Default: ``4``\n max_asyncio_workers (int): Max number of AsyncIO threads. Default: ``None``\n registered_models (ModelRepository): Model repository with registered models.\n enable_grpc (bool): Whether to turn on grpc server. Default: ``True``\n enable_docs_url (bool): Whether to turn on ``/docs`` Swagger UI. Default: ``False``.\n enable_latency_logging (bool): Whether to log latency metric. Default: ``True``.\n configure_logging (bool): Whether to configure KServe and Uvicorn logging. Default: ``True``.\n log_config (dict or str): File path or dict containing log config. Default: ``None``.\n access_log_format (string): Format to set for the access log (provided by asgi-logger). Default: ``None``\n \"\"\"\n\n def __init__(self, http_port: int = args.http_port,\n grpc_port: int = args.grpc_port,\n workers: int = args.workers,\n max_threads: int = args.max_threads,\n max_asyncio_workers: int = args.max_asyncio_workers,\n registered_models: ModelRepository = ModelRepository(),\n enable_grpc: bool = args.enable_grpc,\n enable_docs_url: bool = args.enable_docs_url,\n enable_latency_logging: bool = args.enable_latency_logging,\n configure_logging: bool = args.configure_logging,\n log_config: Optional[Union[Dict, str]] = args.log_config_file,\n access_log_format: str = args.access_log_format):\n self.registered_models = registered_models\n self.http_port = http_port\n self.grpc_port = grpc_port\n self.workers = workers\n self.max_threads = max_threads\n self.max_asyncio_workers = max_asyncio_workers\n self.enable_grpc = enable_grpc\n self.enable_docs_url = enable_docs_url\n self.enable_latency_logging = enable_latency_logging\n self.dataplane = DataPlane(model_registry=registered_models)\n self.model_repository_extension = ModelRepositoryExtension(\n model_registry=self.registered_models)\n self._grpc_server = None\n if self.enable_grpc:\n self._grpc_server = GRPCServer(grpc_port, self.dataplane,\n self.model_repository_extension)\n\n # Logs can be passed as a path to a file or a dictConfig.\n # We rely on Uvicorn to configure the loggers for us.\n if configure_logging:\n self.log_config = log_config if log_config is not None else KSERVE_LOG_CONFIG\n else:\n # By setting log_config to None we tell Uvicorn not to configure logging\n self.log_config = None\n\n self.access_log_format = access_log_format\n\n def start(self, models: Union[List[Model], Dict[str, Deployment]]) -> None:\n if isinstance(models, list):\n for model in models:\n if isinstance(model, Model):\n self.register_model(model)\n # pass whether to log request latency into the model\n model.enable_latency_logging = self.enable_latency_logging\n else:\n raise RuntimeError(\"Model type should be 'Model'\")\n elif isinstance(models, dict):\n if all([isinstance(v, Deployment) for v in models.values()]):\n # TODO: make this port number a variable\n rayserve.start(detached=True, http_options={\"host\": \"0.0.0.0\", \"port\": 9071})\n for key in models:\n models[key].deploy()\n handle = models[key].get_handle()\n self.register_model_handle(key, handle)\n else:\n raise RuntimeError(\"Model type should be RayServe Deployment\")\n else:\n raise RuntimeError(\"Unknown model collection types\")\n\n if self.max_asyncio_workers is None:\n # formula as suggest in https://bugs.python.org/issue35279\n self.max_asyncio_workers = min(32, utils.cpu_count() + 4)\n logger.info(f\"Setting max asyncio worker threads as {self.max_asyncio_workers}\")\n asyncio.get_event_loop().set_default_executor(\n concurrent.futures.ThreadPoolExecutor(max_workers=self.max_asyncio_workers))\n\n async def serve():\n logger.info(f\"Starting uvicorn with {self.workers} workers\")\n loop = asyncio.get_event_loop()\n for sig in [signal.SIGINT, signal.SIGTERM, signal.SIGQUIT]:\n loop.add_signal_handler(\n sig, lambda s=sig: asyncio.create_task(self.stop(sig=s))\n )\n if self.workers == 1:\n self._rest_server = UvicornServer(self.http_port, [],\n self.dataplane, self.model_repository_extension,\n self.enable_docs_url,\n log_config=self.log_config,\n access_log_format=self.access_log_format)\n await self._rest_server.run()\n else:\n # Since py38 MacOS/Windows defaults to use spawn for starting multiprocessing.\n # https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods\n # Spawn does not work with FastAPI/uvicorn in multiprocessing mode, use fork for multiprocessing\n # https://github.com/tiangolo/fastapi/issues/1586\n serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n serversocket.bind(('0.0.0.0', self.http_port))\n serversocket.listen(5)\n multiprocessing.set_start_method('fork')\n server = UvicornServer(self.http_port, [serversocket],\n self.dataplane, self.model_repository_extension,\n self.enable_docs_url, log_config=self.log_config,\n access_log_format=self.access_log_format)\n for _ in range(self.workers):\n p = Process(target=server.run_sync)\n p.start()\n\n async def servers_task():\n servers = [serve()]\n if self.enable_grpc:\n servers.append(self._grpc_server.start(self.max_threads))\n await asyncio.gather(*servers)\n\n asyncio.run(servers_task())\n\n async def stop(self, sig: Optional[int] = None):\n logger.info(\"Stopping the model server\")\n if self._rest_server:\n logger.info(\"Stopping the rest server\")\n await self._rest_server.stop()\n if self._grpc_server:\n logger.info(\"Stopping the grpc server\")\n await self._grpc_server.stop(sig)\n\n def register_model_handle(self, name: str, model_handle: RayServeHandle):\n self.registered_models.update_handle(name, model_handle)\n logger.info(\"Registering model handle: %s\", name)\n\n def register_model(self, model: Model):\n if not model.name:\n raise Exception(\n \"Failed to register model, model.name must be provided.\")\n self.registered_models.update(model)\n logger.info(\"Registering model: %s\", model.name)\n", "path": "python/kserve/kserve/model_server.py"}], "after_files": [{"content": "# Copyright 2022 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nimport argparse\nimport asyncio\nimport concurrent.futures\nimport multiprocessing\nimport signal\nimport socket\nfrom multiprocessing import Process\nfrom typing import Dict, List, Optional, Union\n\nfrom ray import serve as rayserve\nfrom ray.serve.api import Deployment\nfrom ray.serve.handle import RayServeHandle\n\nfrom .logging import KSERVE_LOG_CONFIG, logger\nfrom .model import Model\nfrom .model_repository import ModelRepository\nfrom .protocol.dataplane import DataPlane\nfrom .protocol.grpc.server import GRPCServer\nfrom .protocol.model_repository_extension import ModelRepositoryExtension\nfrom .protocol.rest.server import UvicornServer\nfrom .utils import utils\n\nDEFAULT_HTTP_PORT = 8080\nDEFAULT_GRPC_PORT = 8081\n\nparser = argparse.ArgumentParser(add_help=False)\nparser.add_argument(\"--http_port\", default=DEFAULT_HTTP_PORT, type=int,\n help=\"The HTTP Port listened to by the model server.\")\nparser.add_argument(\"--grpc_port\", default=DEFAULT_GRPC_PORT, type=int,\n help=\"The GRPC Port listened to by the model server.\")\nparser.add_argument(\"--workers\", default=1, type=int,\n help=\"The number of workers for multi-processing.\")\nparser.add_argument(\"--max_threads\", default=4, type=int,\n help=\"The number of max processing threads in each worker.\")\nparser.add_argument('--max_asyncio_workers', default=None, type=int,\n help='Max number of asyncio workers to spawn')\nparser.add_argument(\"--enable_grpc\", default=True, type=lambda x: utils.strtobool(x),\n help=\"Enable gRPC for the model server\")\nparser.add_argument(\"--enable_docs_url\", default=False, type=lambda x: utils.strtobool(x),\n help=\"Enable docs url '/docs' to display Swagger UI.\")\nparser.add_argument(\"--enable_latency_logging\", default=True, type=lambda x: utils.strtobool(x),\n help=\"Output a log per request with latency metrics.\")\nparser.add_argument(\"--configure_logging\", default=True, type=lambda x: utils.strtobool(x),\n help=\"Whether to configure KServe and Uvicorn logging\")\nparser.add_argument(\"--log_config_file\", default=None, type=str,\n help=\"File path containing UvicornServer's log config. Needs to be a yaml or json file.\")\nparser.add_argument(\"--access_log_format\", default=None, type=str,\n help=\"Format to set for the access log (provided by asgi-logger).\")\n\nargs, _ = parser.parse_known_args()\n\n\nclass ModelServer:\n \"\"\"KServe ModelServer\n\n Args:\n http_port (int): HTTP port. Default: ``8080``.\n grpc_port (int): GRPC port. Default: ``8081``.\n workers (int): Number of workers for uvicorn. Default: ``1``.\n max_threads (int): Max number of processing threads. Default: ``4``\n max_asyncio_workers (int): Max number of AsyncIO threads. Default: ``None``\n registered_models (ModelRepository): Model repository with registered models.\n enable_grpc (bool): Whether to turn on grpc server. Default: ``True``\n enable_docs_url (bool): Whether to turn on ``/docs`` Swagger UI. Default: ``False``.\n enable_latency_logging (bool): Whether to log latency metric. Default: ``True``.\n configure_logging (bool): Whether to configure KServe and Uvicorn logging. Default: ``True``.\n log_config (dict or str): File path or dict containing log config. Default: ``None``.\n access_log_format (string): Format to set for the access log (provided by asgi-logger). Default: ``None``\n \"\"\"\n\n def __init__(self, http_port: int = args.http_port,\n grpc_port: int = args.grpc_port,\n workers: int = args.workers,\n max_threads: int = args.max_threads,\n max_asyncio_workers: int = args.max_asyncio_workers,\n registered_models: ModelRepository = ModelRepository(),\n enable_grpc: bool = args.enable_grpc,\n enable_docs_url: bool = args.enable_docs_url,\n enable_latency_logging: bool = args.enable_latency_logging,\n configure_logging: bool = args.configure_logging,\n log_config: Optional[Union[Dict, str]] = args.log_config_file,\n access_log_format: str = args.access_log_format):\n self.registered_models = registered_models\n self.http_port = http_port\n self.grpc_port = grpc_port\n self.workers = workers\n self.max_threads = max_threads\n self.max_asyncio_workers = max_asyncio_workers\n self.enable_grpc = enable_grpc\n self.enable_docs_url = enable_docs_url\n self.enable_latency_logging = enable_latency_logging\n self.dataplane = DataPlane(model_registry=registered_models)\n self.model_repository_extension = ModelRepositoryExtension(\n model_registry=self.registered_models)\n self._grpc_server = None\n if self.enable_grpc:\n self._grpc_server = GRPCServer(grpc_port, self.dataplane,\n self.model_repository_extension)\n\n # Logs can be passed as a path to a file or a dictConfig.\n # We rely on Uvicorn to configure the loggers for us.\n if configure_logging:\n self.log_config = log_config if log_config is not None else KSERVE_LOG_CONFIG\n else:\n # By setting log_config to None we tell Uvicorn not to configure logging\n self.log_config = None\n\n self.access_log_format = access_log_format\n\n def start(self, models: Union[List[Model], Dict[str, Deployment]]) -> None:\n if isinstance(models, list):\n for model in models:\n if isinstance(model, Model):\n self.register_model(model)\n # pass whether to log request latency into the model\n model.enable_latency_logging = self.enable_latency_logging\n else:\n raise RuntimeError(\"Model type should be 'Model'\")\n elif isinstance(models, dict):\n if all([isinstance(v, Deployment) for v in models.values()]):\n # TODO: make this port number a variable\n rayserve.start(detached=True, http_options={\"host\": \"0.0.0.0\", \"port\": 9071})\n for key in models:\n models[key].deploy()\n handle = models[key].get_handle()\n self.register_model_handle(key, handle)\n else:\n raise RuntimeError(\"Model type should be RayServe Deployment\")\n else:\n raise RuntimeError(\"Unknown model collection types\")\n\n if self.max_asyncio_workers is None:\n # formula as suggest in https://bugs.python.org/issue35279\n self.max_asyncio_workers = min(32, utils.cpu_count() + 4)\n logger.info(f\"Setting max asyncio worker threads as {self.max_asyncio_workers}\")\n asyncio.get_event_loop().set_default_executor(\n concurrent.futures.ThreadPoolExecutor(max_workers=self.max_asyncio_workers))\n\n async def serve():\n logger.info(f\"Starting uvicorn with {self.workers} workers\")\n loop = asyncio.get_event_loop()\n if sys.platform not in ['win32', 'win64']:\n sig_list = [signal.SIGINT, signal.SIGTERM, signal.SIGQUIT]\n else:\n sig_list = [signal.SIGINT, signal.SIGTERM]\n\n for sig in sig_list:\n loop.add_signal_handler(\n sig, lambda s=sig: asyncio.create_task(self.stop(sig=s))\n )\n if self.workers == 1:\n self._rest_server = UvicornServer(self.http_port, [],\n self.dataplane, self.model_repository_extension,\n self.enable_docs_url,\n log_config=self.log_config,\n access_log_format=self.access_log_format)\n await self._rest_server.run()\n else:\n # Since py38 MacOS/Windows defaults to use spawn for starting multiprocessing.\n # https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods\n # Spawn does not work with FastAPI/uvicorn in multiprocessing mode, use fork for multiprocessing\n # https://github.com/tiangolo/fastapi/issues/1586\n serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n serversocket.bind(('0.0.0.0', self.http_port))\n serversocket.listen(5)\n multiprocessing.set_start_method('fork')\n server = UvicornServer(self.http_port, [serversocket],\n self.dataplane, self.model_repository_extension,\n self.enable_docs_url, log_config=self.log_config,\n access_log_format=self.access_log_format)\n for _ in range(self.workers):\n p = Process(target=server.run_sync)\n p.start()\n\n async def servers_task():\n servers = [serve()]\n if self.enable_grpc:\n servers.append(self._grpc_server.start(self.max_threads))\n await asyncio.gather(*servers)\n\n asyncio.run(servers_task())\n\n async def stop(self, sig: Optional[int] = None):\n logger.info(\"Stopping the model server\")\n if self._rest_server:\n logger.info(\"Stopping the rest server\")\n await self._rest_server.stop()\n if self._grpc_server:\n logger.info(\"Stopping the grpc server\")\n await self._grpc_server.stop(sig)\n\n def register_model_handle(self, name: str, model_handle: RayServeHandle):\n self.registered_models.update_handle(name, model_handle)\n logger.info(\"Registering model handle: %s\", name)\n\n def register_model(self, model: Model):\n if not model.name:\n raise Exception(\n \"Failed to register model, model.name must be provided.\")\n self.registered_models.update(model)\n logger.info(\"Registering model: %s\", model.name)\n", "path": "python/kserve/kserve/model_server.py"}]}
| 3,287 | 238 |
gh_patches_debug_12920
|
rasdani/github-patches
|
git_diff
|
Mailu__Mailu-1067
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update user password in commandline
A feature request, the ability to set/reset a user's password from the Mailu command line. Similar to the [user](https://mailu.io/master/cli.html#user) command:
```
docker-compose exec admin flask mailu password myuser example.net 'password123'
```
My particular interest is for the Demo server, some people find it necessary to change the admin password from time to time. Executing a full reset takes over a minute, in which period the Demo is not available. I don't want to execute it more frequent than daily. It would be nice to can just call above command every few minutes and always override the admin's password with the correct one. See also #987.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/admin/mailu/manage.py`
Content:
```
1 from mailu import models
2
3 from flask import current_app as app
4 from flask import cli as flask_cli
5
6 import flask
7 import os
8 import socket
9 import uuid
10 import click
11
12
13 db = models.db
14
15
16 @click.group()
17 def mailu(cls=flask_cli.FlaskGroup):
18 """ Mailu command line
19 """
20
21
22 @mailu.command()
23 @flask_cli.with_appcontext
24 def advertise():
25 """ Advertise this server against statistic services.
26 """
27 if os.path.isfile(app.config["INSTANCE_ID_PATH"]):
28 with open(app.config["INSTANCE_ID_PATH"], "r") as handle:
29 instance_id = handle.read()
30 else:
31 instance_id = str(uuid.uuid4())
32 with open(app.config["INSTANCE_ID_PATH"], "w") as handle:
33 handle.write(instance_id)
34 if not app.config["DISABLE_STATISTICS"]:
35 try:
36 socket.gethostbyname(app.config["STATS_ENDPOINT"].format(instance_id))
37 except:
38 pass
39
40
41 @mailu.command()
42 @click.argument('localpart')
43 @click.argument('domain_name')
44 @click.argument('password')
45 @flask_cli.with_appcontext
46 def admin(localpart, domain_name, password):
47 """ Create an admin user
48 """
49 domain = models.Domain.query.get(domain_name)
50 if not domain:
51 domain = models.Domain(name=domain_name)
52 db.session.add(domain)
53 user = models.User(
54 localpart=localpart,
55 domain=domain,
56 global_admin=True
57 )
58 user.set_password(password)
59 db.session.add(user)
60 db.session.commit()
61
62
63 @mailu.command()
64 @click.argument('localpart')
65 @click.argument('domain_name')
66 @click.argument('password')
67 @click.argument('hash_scheme', required=False)
68 @flask_cli.with_appcontext
69 def user(localpart, domain_name, password, hash_scheme=None):
70 """ Create a user
71 """
72 if hash_scheme is None:
73 hash_scheme = app.config['PASSWORD_SCHEME']
74 domain = models.Domain.query.get(domain_name)
75 if not domain:
76 domain = models.Domain(name=domain_name)
77 db.session.add(domain)
78 user = models.User(
79 localpart=localpart,
80 domain=domain,
81 global_admin=False
82 )
83 user.set_password(password, hash_scheme=hash_scheme)
84 db.session.add(user)
85 db.session.commit()
86
87
88 @mailu.command()
89 @click.argument('domain_name')
90 @click.option('-u', '--max-users')
91 @click.option('-a', '--max-aliases')
92 @click.option('-q', '--max-quota-bytes')
93 @flask_cli.with_appcontext
94 def domain(domain_name, max_users=-1, max_aliases=-1, max_quota_bytes=0):
95 """ Create a domain
96 """
97 domain = models.Domain.query.get(domain_name)
98 if not domain:
99 domain = models.Domain(name=domain_name, max_users=max_users,
100 max_aliases=max_aliases, max_quota_bytes=max_quota_bytes)
101 db.session.add(domain)
102 db.session.commit()
103
104
105 @mailu.command()
106 @click.argument('localpart')
107 @click.argument('domain_name')
108 @click.argument('password_hash')
109 @click.argument('hash_scheme')
110 @flask_cli.with_appcontext
111 def user_import(localpart, domain_name, password_hash, hash_scheme = None):
112 """ Import a user along with password hash.
113 """
114 if hash_scheme is None:
115 hash_scheme = app.config['PASSWORD_SCHEME']
116 domain = models.Domain.query.get(domain_name)
117 if not domain:
118 domain = models.Domain(name=domain_name)
119 db.session.add(domain)
120 user = models.User(
121 localpart=localpart,
122 domain=domain,
123 global_admin=False
124 )
125 user.set_password(password_hash, hash_scheme=hash_scheme, raw=True)
126 db.session.add(user)
127 db.session.commit()
128
129
130 @mailu.command()
131 @click.option('-v', '--verbose')
132 @click.option('-d', '--delete-objects')
133 @flask_cli.with_appcontext
134 def config_update(verbose=False, delete_objects=False):
135 """sync configuration with data from YAML-formatted stdin"""
136 import yaml
137 import sys
138 new_config = yaml.load(sys.stdin)
139 # print new_config
140 domains = new_config.get('domains', [])
141 tracked_domains = set()
142 for domain_config in domains:
143 if verbose:
144 print(str(domain_config))
145 domain_name = domain_config['name']
146 max_users = domain_config.get('max_users', -1)
147 max_aliases = domain_config.get('max_aliases', -1)
148 max_quota_bytes = domain_config.get('max_quota_bytes', 0)
149 tracked_domains.add(domain_name)
150 domain = models.Domain.query.get(domain_name)
151 if not domain:
152 domain = models.Domain(name=domain_name,
153 max_users=max_users,
154 max_aliases=max_aliases,
155 max_quota_bytes=max_quota_bytes)
156 db.session.add(domain)
157 print("Added " + str(domain_config))
158 else:
159 domain.max_users = max_users
160 domain.max_aliases = max_aliases
161 domain.max_quota_bytes = max_quota_bytes
162 db.session.add(domain)
163 print("Updated " + str(domain_config))
164
165 users = new_config.get('users', [])
166 tracked_users = set()
167 user_optional_params = ('comment', 'quota_bytes', 'global_admin',
168 'enable_imap', 'enable_pop', 'forward_enabled',
169 'forward_destination', 'reply_enabled',
170 'reply_subject', 'reply_body', 'displayed_name',
171 'spam_enabled', 'email', 'spam_threshold')
172 for user_config in users:
173 if verbose:
174 print(str(user_config))
175 localpart = user_config['localpart']
176 domain_name = user_config['domain']
177 password_hash = user_config.get('password_hash', None)
178 hash_scheme = user_config.get('hash_scheme', None)
179 domain = models.Domain.query.get(domain_name)
180 email = '{0}@{1}'.format(localpart, domain_name)
181 optional_params = {}
182 for k in user_optional_params:
183 if k in user_config:
184 optional_params[k] = user_config[k]
185 if not domain:
186 domain = models.Domain(name=domain_name)
187 db.session.add(domain)
188 user = models.User.query.get(email)
189 tracked_users.add(email)
190 tracked_domains.add(domain_name)
191 if not user:
192 user = models.User(
193 localpart=localpart,
194 domain=domain,
195 **optional_params
196 )
197 else:
198 for k in optional_params:
199 setattr(user, k, optional_params[k])
200 user.set_password(password_hash, hash_scheme=hash_scheme, raw=True)
201 db.session.add(user)
202
203 aliases = new_config.get('aliases', [])
204 tracked_aliases = set()
205 for alias_config in aliases:
206 if verbose:
207 print(str(alias_config))
208 localpart = alias_config['localpart']
209 domain_name = alias_config['domain']
210 if type(alias_config['destination']) is str:
211 destination = alias_config['destination'].split(',')
212 else:
213 destination = alias_config['destination']
214 wildcard = alias_config.get('wildcard', False)
215 domain = models.Domain.query.get(domain_name)
216 email = '{0}@{1}'.format(localpart, domain_name)
217 if not domain:
218 domain = models.Domain(name=domain_name)
219 db.session.add(domain)
220 alias = models.Alias.query.get(email)
221 tracked_aliases.add(email)
222 tracked_domains.add(domain_name)
223 if not alias:
224 alias = models.Alias(
225 localpart=localpart,
226 domain=domain,
227 wildcard=wildcard,
228 destination=destination,
229 email=email
230 )
231 else:
232 alias.destination = destination
233 alias.wildcard = wildcard
234 db.session.add(alias)
235
236 db.session.commit()
237
238 managers = new_config.get('managers', [])
239 # tracked_managers=set()
240 for manager_config in managers:
241 if verbose:
242 print(str(manager_config))
243 domain_name = manager_config['domain']
244 user_name = manager_config['user']
245 domain = models.Domain.query.get(domain_name)
246 manageruser = models.User.query.get(user_name + '@' + domain_name)
247 if manageruser not in domain.managers:
248 domain.managers.append(manageruser)
249 db.session.add(domain)
250
251 db.session.commit()
252
253 if delete_objects:
254 for user in db.session.query(models.User).all():
255 if not (user.email in tracked_users):
256 if verbose:
257 print("Deleting user: " + str(user.email))
258 db.session.delete(user)
259 for alias in db.session.query(models.Alias).all():
260 if not (alias.email in tracked_aliases):
261 if verbose:
262 print("Deleting alias: " + str(alias.email))
263 db.session.delete(alias)
264 for domain in db.session.query(models.Domain).all():
265 if not (domain.name in tracked_domains):
266 if verbose:
267 print("Deleting domain: " + str(domain.name))
268 db.session.delete(domain)
269 db.session.commit()
270
271
272 @mailu.command()
273 @click.argument('email')
274 @flask_cli.with_appcontext
275 def user_delete(email):
276 """delete user"""
277 user = models.User.query.get(email)
278 if user:
279 db.session.delete(user)
280 db.session.commit()
281
282
283 @mailu.command()
284 @click.argument('email')
285 @flask_cli.with_appcontext
286 def alias_delete(email):
287 """delete alias"""
288 alias = models.Alias.query.get(email)
289 if alias:
290 db.session.delete(alias)
291 db.session.commit()
292
293
294 @mailu.command()
295 @click.argument('localpart')
296 @click.argument('domain_name')
297 @click.argument('destination')
298 @click.option('-w', '--wildcard', is_flag=True)
299 @flask_cli.with_appcontext
300 def alias(localpart, domain_name, destination, wildcard=False):
301 """ Create an alias
302 """
303 domain = models.Domain.query.get(domain_name)
304 if not domain:
305 domain = models.Domain(name=domain_name)
306 db.session.add(domain)
307 alias = models.Alias(
308 localpart=localpart,
309 domain=domain,
310 wildcard=wildcard,
311 destination=destination.split(','),
312 email="%s@%s" % (localpart, domain_name)
313 )
314 db.session.add(alias)
315 db.session.commit()
316
317
318 @mailu.command()
319 @click.argument('domain_name')
320 @click.argument('max_users')
321 @click.argument('max_aliases')
322 @click.argument('max_quota_bytes')
323 @flask_cli.with_appcontext
324 def setlimits(domain_name, max_users, max_aliases, max_quota_bytes):
325 """ Set domain limits
326 """
327 domain = models.Domain.query.get(domain_name)
328 domain.max_users = max_users
329 domain.max_aliases = max_aliases
330 domain.max_quota_bytes = max_quota_bytes
331 db.session.add(domain)
332 db.session.commit()
333
334
335 @mailu.command()
336 @click.argument('domain_name')
337 @click.argument('user_name')
338 @flask_cli.with_appcontext
339 def setmanager(domain_name, user_name='manager'):
340 """ Make a user manager of a domain
341 """
342 domain = models.Domain.query.get(domain_name)
343 manageruser = models.User.query.get(user_name + '@' + domain_name)
344 domain.managers.append(manageruser)
345 db.session.add(domain)
346 db.session.commit()
347
348
349 if __name__ == '__main__':
350 cli()
351
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/core/admin/mailu/manage.py b/core/admin/mailu/manage.py
--- a/core/admin/mailu/manage.py
+++ b/core/admin/mailu/manage.py
@@ -85,6 +85,26 @@
db.session.commit()
[email protected]()
[email protected]('localpart')
[email protected]('domain_name')
[email protected]('password')
[email protected]('hash_scheme', required=False)
+@flask_cli.with_appcontext
+def password(localpart, domain_name, password, hash_scheme=None):
+ """ Change the password of an user
+ """
+ email = '{0}@{1}'.format(localpart, domain_name)
+ user = models.User.query.get(email)
+ if hash_scheme is None:
+ hash_scheme = app.config['PASSWORD_SCHEME']
+ if user:
+ user.set_password(password, hash_scheme=hash_scheme)
+ else:
+ print("User " + email + " not found.")
+ db.session.commit()
+
+
@mailu.command()
@click.argument('domain_name')
@click.option('-u', '--max-users')
|
{"golden_diff": "diff --git a/core/admin/mailu/manage.py b/core/admin/mailu/manage.py\n--- a/core/admin/mailu/manage.py\n+++ b/core/admin/mailu/manage.py\n@@ -85,6 +85,26 @@\n db.session.commit()\n \n \[email protected]()\[email protected]('localpart')\[email protected]('domain_name')\[email protected]('password')\[email protected]('hash_scheme', required=False)\n+@flask_cli.with_appcontext\n+def password(localpart, domain_name, password, hash_scheme=None):\n+ \"\"\" Change the password of an user\n+ \"\"\"\n+ email = '{0}@{1}'.format(localpart, domain_name)\n+ user = models.User.query.get(email)\n+ if hash_scheme is None:\n+ hash_scheme = app.config['PASSWORD_SCHEME']\n+ if user:\n+ user.set_password(password, hash_scheme=hash_scheme)\n+ else:\n+ print(\"User \" + email + \" not found.\")\n+ db.session.commit()\n+\n+\n @mailu.command()\n @click.argument('domain_name')\n @click.option('-u', '--max-users')\n", "issue": "Update user password in commandline\nA feature request, the ability to set/reset a user's password from the Mailu command line. Similar to the [user](https://mailu.io/master/cli.html#user) command:\r\n\r\n```\r\ndocker-compose exec admin flask mailu password myuser example.net 'password123'\r\n```\r\n\r\nMy particular interest is for the Demo server, some people find it necessary to change the admin password from time to time. Executing a full reset takes over a minute, in which period the Demo is not available. I don't want to execute it more frequent than daily. It would be nice to can just call above command every few minutes and always override the admin's password with the correct one. See also #987.\r\n\n", "before_files": [{"content": "from mailu import models\n\nfrom flask import current_app as app\nfrom flask import cli as flask_cli\n\nimport flask\nimport os\nimport socket\nimport uuid\nimport click\n\n\ndb = models.db\n\n\[email protected]()\ndef mailu(cls=flask_cli.FlaskGroup):\n \"\"\" Mailu command line\n \"\"\"\n\n\[email protected]()\n@flask_cli.with_appcontext\ndef advertise():\n \"\"\" Advertise this server against statistic services.\n \"\"\"\n if os.path.isfile(app.config[\"INSTANCE_ID_PATH\"]):\n with open(app.config[\"INSTANCE_ID_PATH\"], \"r\") as handle:\n instance_id = handle.read()\n else:\n instance_id = str(uuid.uuid4())\n with open(app.config[\"INSTANCE_ID_PATH\"], \"w\") as handle:\n handle.write(instance_id)\n if not app.config[\"DISABLE_STATISTICS\"]:\n try:\n socket.gethostbyname(app.config[\"STATS_ENDPOINT\"].format(instance_id))\n except:\n pass\n\n\[email protected]()\[email protected]('localpart')\[email protected]('domain_name')\[email protected]('password')\n@flask_cli.with_appcontext\ndef admin(localpart, domain_name, password):\n \"\"\" Create an admin user\n \"\"\"\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n user = models.User(\n localpart=localpart,\n domain=domain,\n global_admin=True\n )\n user.set_password(password)\n db.session.add(user)\n db.session.commit()\n\n\[email protected]()\[email protected]('localpart')\[email protected]('domain_name')\[email protected]('password')\[email protected]('hash_scheme', required=False)\n@flask_cli.with_appcontext\ndef user(localpart, domain_name, password, hash_scheme=None):\n \"\"\" Create a user\n \"\"\"\n if hash_scheme is None:\n hash_scheme = app.config['PASSWORD_SCHEME']\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n user = models.User(\n localpart=localpart,\n domain=domain,\n global_admin=False\n )\n user.set_password(password, hash_scheme=hash_scheme)\n db.session.add(user)\n db.session.commit()\n\n\[email protected]()\[email protected]('domain_name')\[email protected]('-u', '--max-users')\[email protected]('-a', '--max-aliases')\[email protected]('-q', '--max-quota-bytes')\n@flask_cli.with_appcontext\ndef domain(domain_name, max_users=-1, max_aliases=-1, max_quota_bytes=0):\n \"\"\" Create a domain\n \"\"\"\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name, max_users=max_users,\n max_aliases=max_aliases, max_quota_bytes=max_quota_bytes)\n db.session.add(domain)\n db.session.commit()\n\n\[email protected]()\[email protected]('localpart')\[email protected]('domain_name')\[email protected]('password_hash')\[email protected]('hash_scheme')\n@flask_cli.with_appcontext\ndef user_import(localpart, domain_name, password_hash, hash_scheme = None):\n \"\"\" Import a user along with password hash.\n \"\"\"\n if hash_scheme is None:\n hash_scheme = app.config['PASSWORD_SCHEME']\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n user = models.User(\n localpart=localpart,\n domain=domain,\n global_admin=False\n )\n user.set_password(password_hash, hash_scheme=hash_scheme, raw=True)\n db.session.add(user)\n db.session.commit()\n\n\[email protected]()\[email protected]('-v', '--verbose')\[email protected]('-d', '--delete-objects')\n@flask_cli.with_appcontext\ndef config_update(verbose=False, delete_objects=False):\n \"\"\"sync configuration with data from YAML-formatted stdin\"\"\"\n import yaml\n import sys\n new_config = yaml.load(sys.stdin)\n # print new_config\n domains = new_config.get('domains', [])\n tracked_domains = set()\n for domain_config in domains:\n if verbose:\n print(str(domain_config))\n domain_name = domain_config['name']\n max_users = domain_config.get('max_users', -1)\n max_aliases = domain_config.get('max_aliases', -1)\n max_quota_bytes = domain_config.get('max_quota_bytes', 0)\n tracked_domains.add(domain_name)\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name,\n max_users=max_users,\n max_aliases=max_aliases,\n max_quota_bytes=max_quota_bytes)\n db.session.add(domain)\n print(\"Added \" + str(domain_config))\n else:\n domain.max_users = max_users\n domain.max_aliases = max_aliases\n domain.max_quota_bytes = max_quota_bytes\n db.session.add(domain)\n print(\"Updated \" + str(domain_config))\n\n users = new_config.get('users', [])\n tracked_users = set()\n user_optional_params = ('comment', 'quota_bytes', 'global_admin',\n 'enable_imap', 'enable_pop', 'forward_enabled',\n 'forward_destination', 'reply_enabled',\n 'reply_subject', 'reply_body', 'displayed_name',\n 'spam_enabled', 'email', 'spam_threshold')\n for user_config in users:\n if verbose:\n print(str(user_config))\n localpart = user_config['localpart']\n domain_name = user_config['domain']\n password_hash = user_config.get('password_hash', None)\n hash_scheme = user_config.get('hash_scheme', None)\n domain = models.Domain.query.get(domain_name)\n email = '{0}@{1}'.format(localpart, domain_name)\n optional_params = {}\n for k in user_optional_params:\n if k in user_config:\n optional_params[k] = user_config[k]\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n user = models.User.query.get(email)\n tracked_users.add(email)\n tracked_domains.add(domain_name)\n if not user:\n user = models.User(\n localpart=localpart,\n domain=domain,\n **optional_params\n )\n else:\n for k in optional_params:\n setattr(user, k, optional_params[k])\n user.set_password(password_hash, hash_scheme=hash_scheme, raw=True)\n db.session.add(user)\n\n aliases = new_config.get('aliases', [])\n tracked_aliases = set()\n for alias_config in aliases:\n if verbose:\n print(str(alias_config))\n localpart = alias_config['localpart']\n domain_name = alias_config['domain']\n if type(alias_config['destination']) is str:\n destination = alias_config['destination'].split(',')\n else:\n destination = alias_config['destination']\n wildcard = alias_config.get('wildcard', False)\n domain = models.Domain.query.get(domain_name)\n email = '{0}@{1}'.format(localpart, domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n alias = models.Alias.query.get(email)\n tracked_aliases.add(email)\n tracked_domains.add(domain_name)\n if not alias:\n alias = models.Alias(\n localpart=localpart,\n domain=domain,\n wildcard=wildcard,\n destination=destination,\n email=email\n )\n else:\n alias.destination = destination\n alias.wildcard = wildcard\n db.session.add(alias)\n\n db.session.commit()\n\n managers = new_config.get('managers', [])\n # tracked_managers=set()\n for manager_config in managers:\n if verbose:\n print(str(manager_config))\n domain_name = manager_config['domain']\n user_name = manager_config['user']\n domain = models.Domain.query.get(domain_name)\n manageruser = models.User.query.get(user_name + '@' + domain_name)\n if manageruser not in domain.managers:\n domain.managers.append(manageruser)\n db.session.add(domain)\n\n db.session.commit()\n\n if delete_objects:\n for user in db.session.query(models.User).all():\n if not (user.email in tracked_users):\n if verbose:\n print(\"Deleting user: \" + str(user.email))\n db.session.delete(user)\n for alias in db.session.query(models.Alias).all():\n if not (alias.email in tracked_aliases):\n if verbose:\n print(\"Deleting alias: \" + str(alias.email))\n db.session.delete(alias)\n for domain in db.session.query(models.Domain).all():\n if not (domain.name in tracked_domains):\n if verbose:\n print(\"Deleting domain: \" + str(domain.name))\n db.session.delete(domain)\n db.session.commit()\n\n\[email protected]()\[email protected]('email')\n@flask_cli.with_appcontext\ndef user_delete(email):\n \"\"\"delete user\"\"\"\n user = models.User.query.get(email)\n if user:\n db.session.delete(user)\n db.session.commit()\n\n\[email protected]()\[email protected]('email')\n@flask_cli.with_appcontext\ndef alias_delete(email):\n \"\"\"delete alias\"\"\"\n alias = models.Alias.query.get(email)\n if alias:\n db.session.delete(alias)\n db.session.commit()\n\n\[email protected]()\[email protected]('localpart')\[email protected]('domain_name')\[email protected]('destination')\[email protected]('-w', '--wildcard', is_flag=True)\n@flask_cli.with_appcontext\ndef alias(localpart, domain_name, destination, wildcard=False):\n \"\"\" Create an alias\n \"\"\"\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n alias = models.Alias(\n localpart=localpart,\n domain=domain,\n wildcard=wildcard,\n destination=destination.split(','),\n email=\"%s@%s\" % (localpart, domain_name)\n )\n db.session.add(alias)\n db.session.commit()\n\n\[email protected]()\[email protected]('domain_name')\[email protected]('max_users')\[email protected]('max_aliases')\[email protected]('max_quota_bytes')\n@flask_cli.with_appcontext\ndef setlimits(domain_name, max_users, max_aliases, max_quota_bytes):\n \"\"\" Set domain limits\n \"\"\"\n domain = models.Domain.query.get(domain_name)\n domain.max_users = max_users\n domain.max_aliases = max_aliases\n domain.max_quota_bytes = max_quota_bytes\n db.session.add(domain)\n db.session.commit()\n\n\[email protected]()\[email protected]('domain_name')\[email protected]('user_name')\n@flask_cli.with_appcontext\ndef setmanager(domain_name, user_name='manager'):\n \"\"\" Make a user manager of a domain\n \"\"\"\n domain = models.Domain.query.get(domain_name)\n manageruser = models.User.query.get(user_name + '@' + domain_name)\n domain.managers.append(manageruser)\n db.session.add(domain)\n db.session.commit()\n\n\nif __name__ == '__main__':\n cli()\n", "path": "core/admin/mailu/manage.py"}], "after_files": [{"content": "from mailu import models\n\nfrom flask import current_app as app\nfrom flask import cli as flask_cli\n\nimport flask\nimport os\nimport socket\nimport uuid\nimport click\n\n\ndb = models.db\n\n\[email protected]()\ndef mailu(cls=flask_cli.FlaskGroup):\n \"\"\" Mailu command line\n \"\"\"\n\n\[email protected]()\n@flask_cli.with_appcontext\ndef advertise():\n \"\"\" Advertise this server against statistic services.\n \"\"\"\n if os.path.isfile(app.config[\"INSTANCE_ID_PATH\"]):\n with open(app.config[\"INSTANCE_ID_PATH\"], \"r\") as handle:\n instance_id = handle.read()\n else:\n instance_id = str(uuid.uuid4())\n with open(app.config[\"INSTANCE_ID_PATH\"], \"w\") as handle:\n handle.write(instance_id)\n if not app.config[\"DISABLE_STATISTICS\"]:\n try:\n socket.gethostbyname(app.config[\"STATS_ENDPOINT\"].format(instance_id))\n except:\n pass\n\n\[email protected]()\[email protected]('localpart')\[email protected]('domain_name')\[email protected]('password')\n@flask_cli.with_appcontext\ndef admin(localpart, domain_name, password):\n \"\"\" Create an admin user\n \"\"\"\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n user = models.User(\n localpart=localpart,\n domain=domain,\n global_admin=True\n )\n user.set_password(password)\n db.session.add(user)\n db.session.commit()\n\n\[email protected]()\[email protected]('localpart')\[email protected]('domain_name')\[email protected]('password')\[email protected]('hash_scheme', required=False)\n@flask_cli.with_appcontext\ndef user(localpart, domain_name, password, hash_scheme=None):\n \"\"\" Create a user\n \"\"\"\n if hash_scheme is None:\n hash_scheme = app.config['PASSWORD_SCHEME']\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n user = models.User(\n localpart=localpart,\n domain=domain,\n global_admin=False\n )\n user.set_password(password, hash_scheme=hash_scheme)\n db.session.add(user)\n db.session.commit()\n\n\[email protected]()\[email protected]('localpart')\[email protected]('domain_name')\[email protected]('password')\[email protected]('hash_scheme', required=False)\n@flask_cli.with_appcontext\ndef password(localpart, domain_name, password, hash_scheme=None):\n \"\"\" Change the password of an user\n \"\"\"\n email = '{0}@{1}'.format(localpart, domain_name)\n user = models.User.query.get(email)\n if hash_scheme is None:\n hash_scheme = app.config['PASSWORD_SCHEME']\n if user:\n user.set_password(password, hash_scheme=hash_scheme)\n else:\n print(\"User \" + email + \" not found.\")\n db.session.commit()\n\n\[email protected]()\[email protected]('domain_name')\[email protected]('-u', '--max-users')\[email protected]('-a', '--max-aliases')\[email protected]('-q', '--max-quota-bytes')\n@flask_cli.with_appcontext\ndef domain(domain_name, max_users=-1, max_aliases=-1, max_quota_bytes=0):\n \"\"\" Create a domain\n \"\"\"\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name, max_users=max_users,\n max_aliases=max_aliases, max_quota_bytes=max_quota_bytes)\n db.session.add(domain)\n db.session.commit()\n\n\[email protected]()\[email protected]('localpart')\[email protected]('domain_name')\[email protected]('password_hash')\[email protected]('hash_scheme')\n@flask_cli.with_appcontext\ndef user_import(localpart, domain_name, password_hash, hash_scheme = None):\n \"\"\" Import a user along with password hash.\n \"\"\"\n if hash_scheme is None:\n hash_scheme = app.config['PASSWORD_SCHEME']\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n user = models.User(\n localpart=localpart,\n domain=domain,\n global_admin=False\n )\n user.set_password(password_hash, hash_scheme=hash_scheme, raw=True)\n db.session.add(user)\n db.session.commit()\n\n\[email protected]()\[email protected]('-v', '--verbose')\[email protected]('-d', '--delete-objects')\n@flask_cli.with_appcontext\ndef config_update(verbose=False, delete_objects=False):\n \"\"\"sync configuration with data from YAML-formatted stdin\"\"\"\n import yaml\n import sys\n new_config = yaml.load(sys.stdin)\n # print new_config\n domains = new_config.get('domains', [])\n tracked_domains = set()\n for domain_config in domains:\n if verbose:\n print(str(domain_config))\n domain_name = domain_config['name']\n max_users = domain_config.get('max_users', -1)\n max_aliases = domain_config.get('max_aliases', -1)\n max_quota_bytes = domain_config.get('max_quota_bytes', 0)\n tracked_domains.add(domain_name)\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name,\n max_users=max_users,\n max_aliases=max_aliases,\n max_quota_bytes=max_quota_bytes)\n db.session.add(domain)\n print(\"Added \" + str(domain_config))\n else:\n domain.max_users = max_users\n domain.max_aliases = max_aliases\n domain.max_quota_bytes = max_quota_bytes\n db.session.add(domain)\n print(\"Updated \" + str(domain_config))\n\n users = new_config.get('users', [])\n tracked_users = set()\n user_optional_params = ('comment', 'quota_bytes', 'global_admin',\n 'enable_imap', 'enable_pop', 'forward_enabled',\n 'forward_destination', 'reply_enabled',\n 'reply_subject', 'reply_body', 'displayed_name',\n 'spam_enabled', 'email', 'spam_threshold')\n for user_config in users:\n if verbose:\n print(str(user_config))\n localpart = user_config['localpart']\n domain_name = user_config['domain']\n password_hash = user_config.get('password_hash', None)\n hash_scheme = user_config.get('hash_scheme', None)\n domain = models.Domain.query.get(domain_name)\n email = '{0}@{1}'.format(localpart, domain_name)\n optional_params = {}\n for k in user_optional_params:\n if k in user_config:\n optional_params[k] = user_config[k]\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n user = models.User.query.get(email)\n tracked_users.add(email)\n tracked_domains.add(domain_name)\n if not user:\n user = models.User(\n localpart=localpart,\n domain=domain,\n **optional_params\n )\n else:\n for k in optional_params:\n setattr(user, k, optional_params[k])\n user.set_password(password_hash, hash_scheme=hash_scheme, raw=True)\n db.session.add(user)\n\n aliases = new_config.get('aliases', [])\n tracked_aliases = set()\n for alias_config in aliases:\n if verbose:\n print(str(alias_config))\n localpart = alias_config['localpart']\n domain_name = alias_config['domain']\n if type(alias_config['destination']) is str:\n destination = alias_config['destination'].split(',')\n else:\n destination = alias_config['destination']\n wildcard = alias_config.get('wildcard', False)\n domain = models.Domain.query.get(domain_name)\n email = '{0}@{1}'.format(localpart, domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n alias = models.Alias.query.get(email)\n tracked_aliases.add(email)\n tracked_domains.add(domain_name)\n if not alias:\n alias = models.Alias(\n localpart=localpart,\n domain=domain,\n wildcard=wildcard,\n destination=destination,\n email=email\n )\n else:\n alias.destination = destination\n alias.wildcard = wildcard\n db.session.add(alias)\n\n db.session.commit()\n\n managers = new_config.get('managers', [])\n # tracked_managers=set()\n for manager_config in managers:\n if verbose:\n print(str(manager_config))\n domain_name = manager_config['domain']\n user_name = manager_config['user']\n domain = models.Domain.query.get(domain_name)\n manageruser = models.User.query.get(user_name + '@' + domain_name)\n if manageruser not in domain.managers:\n domain.managers.append(manageruser)\n db.session.add(domain)\n\n db.session.commit()\n\n if delete_objects:\n for user in db.session.query(models.User).all():\n if not (user.email in tracked_users):\n if verbose:\n print(\"Deleting user: \" + str(user.email))\n db.session.delete(user)\n for alias in db.session.query(models.Alias).all():\n if not (alias.email in tracked_aliases):\n if verbose:\n print(\"Deleting alias: \" + str(alias.email))\n db.session.delete(alias)\n for domain in db.session.query(models.Domain).all():\n if not (domain.name in tracked_domains):\n if verbose:\n print(\"Deleting domain: \" + str(domain.name))\n db.session.delete(domain)\n db.session.commit()\n\n\[email protected]()\[email protected]('email')\n@flask_cli.with_appcontext\ndef user_delete(email):\n \"\"\"delete user\"\"\"\n user = models.User.query.get(email)\n if user:\n db.session.delete(user)\n db.session.commit()\n\n\[email protected]()\[email protected]('email')\n@flask_cli.with_appcontext\ndef alias_delete(email):\n \"\"\"delete alias\"\"\"\n alias = models.Alias.query.get(email)\n if alias:\n db.session.delete(alias)\n db.session.commit()\n\n\[email protected]()\[email protected]('localpart')\[email protected]('domain_name')\[email protected]('destination')\[email protected]('-w', '--wildcard', is_flag=True)\n@flask_cli.with_appcontext\ndef alias(localpart, domain_name, destination, wildcard=False):\n \"\"\" Create an alias\n \"\"\"\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n alias = models.Alias(\n localpart=localpart,\n domain=domain,\n wildcard=wildcard,\n destination=destination.split(','),\n email=\"%s@%s\" % (localpart, domain_name)\n )\n db.session.add(alias)\n db.session.commit()\n\n\[email protected]()\[email protected]('domain_name')\[email protected]('max_users')\[email protected]('max_aliases')\[email protected]('max_quota_bytes')\n@flask_cli.with_appcontext\ndef setlimits(domain_name, max_users, max_aliases, max_quota_bytes):\n \"\"\" Set domain limits\n \"\"\"\n domain = models.Domain.query.get(domain_name)\n domain.max_users = max_users\n domain.max_aliases = max_aliases\n domain.max_quota_bytes = max_quota_bytes\n db.session.add(domain)\n db.session.commit()\n\n\[email protected]()\[email protected]('domain_name')\[email protected]('user_name')\n@flask_cli.with_appcontext\ndef setmanager(domain_name, user_name='manager'):\n \"\"\" Make a user manager of a domain\n \"\"\"\n domain = models.Domain.query.get(domain_name)\n manageruser = models.User.query.get(user_name + '@' + domain_name)\n domain.managers.append(manageruser)\n db.session.add(domain)\n db.session.commit()\n\n\nif __name__ == '__main__':\n cli()\n", "path": "core/admin/mailu/manage.py"}]}
| 3,758 | 245 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.