problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_30321 | rasdani/github-patches | git_diff | pwndbg__pwndbg-433 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bug of function get_load_segment_info in readelf.py
when I got readelf result like below
```
readelf --program-headers /bin/ls | grep "LOAD" -A 10
LOAD 0x0000000000000000 0x0000000000400000 0x0000000000400000
0x000000000001da64 0x000000000001da64 R E 200000
```
the function crashed at line 65
```
65 fsize, msize, read, write, execute, align = re_secnd.match(line).groups()
```
the reason is in the regex format
```python
re_secnd = re.compile(r"\s+(0x[0-9A-Fa-f]+) (0x[0-9A-Fa-f]+) (.)(.)(.)\s+(**0x**[0-9A-Fa-f]+)"
```
I mean, "0x" should not a absolute prefix of align number
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwndbg/wrappers/readelf.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 from __future__ import absolute_import
4 from __future__ import division
5 from __future__ import print_function
6 from __future__ import unicode_literals
7
8 import re
9
10 import pwndbg.wrappers
11
12 cmd_name = "readelf"
13
14 @pwndbg.wrappers.OnlyWithCommand(cmd_name)
15 def get_jmpslots():
16 local_path = pwndbg.file.get_file(pwndbg.proc.exe)
17 cmd = [get_jmpslots.cmd_path, "--relocs", local_path]
18 readelf_out = pwndbg.wrappers.call_cmd(cmd)
19
20 return filter(_extract_jumps, readelf_out.splitlines())
21
22 def _extract_jumps(line):
23 '''
24 Checks for records in `readelf --relocs <binary>` which has type e.g. `R_X86_64_JUMP_SLO`
25 NOTE: Because of that we DO NOT display entries that are not writeable (due to FULL RELRO)
26 as they have `R_X86_64_GLOB_DAT` type.
27
28 It might be good to display them seperately in the future.
29 '''
30 try:
31 if "JUMP" in line.split()[2]:
32 return line
33 else:
34 return False
35 except IndexError:
36 return False
37
38 @pwndbg.wrappers.OnlyWithCommand(cmd_name)
39 def get_load_segment_info():
40 '''
41 Looks for LOAD sections by parsing the output of `readelf --program-headers <binary>`
42 '''
43 local_path = pwndbg.file.get_file(pwndbg.proc.exe)
44 cmd = [get_jmpslots.cmd_path, "--program-headers", local_path]
45 readelf_out = pwndbg.wrappers.call_cmd(cmd)
46
47 segments = []
48 load_found = False
49
50 # Output from readelf is
51 # Type Offset VirtAddr PhysAddr
52 # FileSiz MemSiz Flags Align
53 # LOAD 0x0000000000000000 0x0000000000000000 0x0000000000000000
54 # 0x0000000000000830 0x0000000000000830 R E 0x200000
55 # Account for this using two regular expressions
56 re_first = re.compile(r"\s+LOAD\s+(0x[0-9A-Fa-f]+) (0x[0-9A-Fa-f]+) (0x[0-9A-Fa-f]+)")
57 re_secnd = re.compile(r"\s+(0x[0-9A-Fa-f]+) (0x[0-9A-Fa-f]+) (.)(.)(.)\s+(0x[0-9A-Fa-f]+)")
58 hex2int = lambda x: int(x, 16)
59
60 for line in readelf_out.splitlines():
61 if "LOAD" in line:
62 load_found = True
63 offset, vaddr, paddr = map(hex2int, re_first.match(line).groups())
64 elif load_found:
65 fsize, msize, read, write, execute, align = re_secnd.match(line).groups()
66 fsize, msize, align = map(hex2int, (fsize, msize, align))
67 read = read == "R"
68 write = write == "W"
69 execute = execute == "E"
70
71 segments.append({"Offset": offset,
72 "VirtAddr": vaddr,
73 "PhysAddr": paddr,
74 "FileSiz": fsize,
75 "MemSiz": msize,
76 "FlagsRead": read,
77 "FlagsWrite": write,
78 "FlagsExecute": execute})
79
80 load_found = False
81
82 return segments
83
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pwndbg/wrappers/readelf.py b/pwndbg/wrappers/readelf.py
--- a/pwndbg/wrappers/readelf.py
+++ b/pwndbg/wrappers/readelf.py
@@ -52,9 +52,15 @@
# FileSiz MemSiz Flags Align
# LOAD 0x0000000000000000 0x0000000000000000 0x0000000000000000
# 0x0000000000000830 0x0000000000000830 R E 0x200000
+ #
+ ############################################################################
+ #
+ # NOTE: On some readelf versions the Align column might not be prefixed with 0x
+ # See https://github.com/pwndbg/pwndbg/issues/427
+ #
# Account for this using two regular expressions
re_first = re.compile(r"\s+LOAD\s+(0x[0-9A-Fa-f]+) (0x[0-9A-Fa-f]+) (0x[0-9A-Fa-f]+)")
- re_secnd = re.compile(r"\s+(0x[0-9A-Fa-f]+) (0x[0-9A-Fa-f]+) (.)(.)(.)\s+(0x[0-9A-Fa-f]+)")
+ re_secnd = re.compile(r"\s+(0x[0-9A-Fa-f]+) (0x[0-9A-Fa-f]+) (.)(.)(.)\s+(0x)?([0-9A-Fa-f]+)")
hex2int = lambda x: int(x, 16)
for line in readelf_out.splitlines():
@@ -62,8 +68,8 @@
load_found = True
offset, vaddr, paddr = map(hex2int, re_first.match(line).groups())
elif load_found:
- fsize, msize, read, write, execute, align = re_secnd.match(line).groups()
- fsize, msize, align = map(hex2int, (fsize, msize, align))
+ fsize, msize, read, write, execute, _optional_prefix, align = re_secnd.match(line).groups()
+ fsize, msize, align = map(hex2int, (fsize, msize, '0x' + align))
read = read == "R"
write = write == "W"
execute = execute == "E"
| {"golden_diff": "diff --git a/pwndbg/wrappers/readelf.py b/pwndbg/wrappers/readelf.py\n--- a/pwndbg/wrappers/readelf.py\n+++ b/pwndbg/wrappers/readelf.py\n@@ -52,9 +52,15 @@\n # FileSiz MemSiz Flags Align\n # LOAD 0x0000000000000000 0x0000000000000000 0x0000000000000000\n # 0x0000000000000830 0x0000000000000830 R E 0x200000\n+ #\n+ ############################################################################\n+ #\n+ # NOTE: On some readelf versions the Align column might not be prefixed with 0x\n+ # See https://github.com/pwndbg/pwndbg/issues/427\n+ #\n # Account for this using two regular expressions\n re_first = re.compile(r\"\\s+LOAD\\s+(0x[0-9A-Fa-f]+) (0x[0-9A-Fa-f]+) (0x[0-9A-Fa-f]+)\")\n- re_secnd = re.compile(r\"\\s+(0x[0-9A-Fa-f]+) (0x[0-9A-Fa-f]+) (.)(.)(.)\\s+(0x[0-9A-Fa-f]+)\")\n+ re_secnd = re.compile(r\"\\s+(0x[0-9A-Fa-f]+) (0x[0-9A-Fa-f]+) (.)(.)(.)\\s+(0x)?([0-9A-Fa-f]+)\")\n hex2int = lambda x: int(x, 16)\n \n for line in readelf_out.splitlines():\n@@ -62,8 +68,8 @@\n load_found = True\n offset, vaddr, paddr = map(hex2int, re_first.match(line).groups())\n elif load_found:\n- fsize, msize, read, write, execute, align = re_secnd.match(line).groups()\n- fsize, msize, align = map(hex2int, (fsize, msize, align))\n+ fsize, msize, read, write, execute, _optional_prefix, align = re_secnd.match(line).groups()\n+ fsize, msize, align = map(hex2int, (fsize, msize, '0x' + align))\n read = read == \"R\"\n write = write == \"W\"\n execute = execute == \"E\"\n", "issue": "bug of function get_load_segment_info in readelf.py\nwhen I got readelf result like below\r\n```\r\nreadelf --program-headers /bin/ls | grep \"LOAD\" -A 10\r\n LOAD 0x0000000000000000 0x0000000000400000 0x0000000000400000\r\n 0x000000000001da64 0x000000000001da64 R E 200000\r\n```\r\nthe function crashed at line 65\r\n```\r\n65 fsize, msize, read, write, execute, align = re_secnd.match(line).groups()\r\n```\r\nthe reason is in the regex format\r\n```python\r\nre_secnd = re.compile(r\"\\s+(0x[0-9A-Fa-f]+) (0x[0-9A-Fa-f]+) (.)(.)(.)\\s+(**0x**[0-9A-Fa-f]+)\"\r\n```\r\nI mean, \"0x\" should not a absolute prefix of align number\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport re\n\nimport pwndbg.wrappers\n\ncmd_name = \"readelf\"\n\[email protected](cmd_name)\ndef get_jmpslots():\n local_path = pwndbg.file.get_file(pwndbg.proc.exe)\n cmd = [get_jmpslots.cmd_path, \"--relocs\", local_path]\n readelf_out = pwndbg.wrappers.call_cmd(cmd)\n\n return filter(_extract_jumps, readelf_out.splitlines())\n\ndef _extract_jumps(line):\n '''\n Checks for records in `readelf --relocs <binary>` which has type e.g. `R_X86_64_JUMP_SLO`\n NOTE: Because of that we DO NOT display entries that are not writeable (due to FULL RELRO)\n as they have `R_X86_64_GLOB_DAT` type.\n\n It might be good to display them seperately in the future.\n '''\n try:\n if \"JUMP\" in line.split()[2]:\n return line\n else:\n return False\n except IndexError:\n return False\n\[email protected](cmd_name)\ndef get_load_segment_info():\n '''\n Looks for LOAD sections by parsing the output of `readelf --program-headers <binary>`\n '''\n local_path = pwndbg.file.get_file(pwndbg.proc.exe)\n cmd = [get_jmpslots.cmd_path, \"--program-headers\", local_path]\n readelf_out = pwndbg.wrappers.call_cmd(cmd)\n\n segments = []\n load_found = False\n\n # Output from readelf is \n # Type Offset VirtAddr PhysAddr\n # FileSiz MemSiz Flags Align\n # LOAD 0x0000000000000000 0x0000000000000000 0x0000000000000000\n # 0x0000000000000830 0x0000000000000830 R E 0x200000\n # Account for this using two regular expressions\n re_first = re.compile(r\"\\s+LOAD\\s+(0x[0-9A-Fa-f]+) (0x[0-9A-Fa-f]+) (0x[0-9A-Fa-f]+)\")\n re_secnd = re.compile(r\"\\s+(0x[0-9A-Fa-f]+) (0x[0-9A-Fa-f]+) (.)(.)(.)\\s+(0x[0-9A-Fa-f]+)\")\n hex2int = lambda x: int(x, 16)\n\n for line in readelf_out.splitlines():\n if \"LOAD\" in line:\n load_found = True\n offset, vaddr, paddr = map(hex2int, re_first.match(line).groups())\n elif load_found:\n fsize, msize, read, write, execute, align = re_secnd.match(line).groups()\n fsize, msize, align = map(hex2int, (fsize, msize, align))\n read = read == \"R\"\n write = write == \"W\"\n execute = execute == \"E\"\n\n segments.append({\"Offset\": offset,\n \"VirtAddr\": vaddr,\n \"PhysAddr\": paddr,\n \"FileSiz\": fsize,\n \"MemSiz\": msize,\n \"FlagsRead\": read,\n \"FlagsWrite\": write,\n \"FlagsExecute\": execute})\n\n load_found = False\n\n return segments\n", "path": "pwndbg/wrappers/readelf.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport re\n\nimport pwndbg.wrappers\n\ncmd_name = \"readelf\"\n\[email protected](cmd_name)\ndef get_jmpslots():\n local_path = pwndbg.file.get_file(pwndbg.proc.exe)\n cmd = [get_jmpslots.cmd_path, \"--relocs\", local_path]\n readelf_out = pwndbg.wrappers.call_cmd(cmd)\n\n return filter(_extract_jumps, readelf_out.splitlines())\n\ndef _extract_jumps(line):\n '''\n Checks for records in `readelf --relocs <binary>` which has type e.g. `R_X86_64_JUMP_SLO`\n NOTE: Because of that we DO NOT display entries that are not writeable (due to FULL RELRO)\n as they have `R_X86_64_GLOB_DAT` type.\n\n It might be good to display them seperately in the future.\n '''\n try:\n if \"JUMP\" in line.split()[2]:\n return line\n else:\n return False\n except IndexError:\n return False\n\[email protected](cmd_name)\ndef get_load_segment_info():\n '''\n Looks for LOAD sections by parsing the output of `readelf --program-headers <binary>`\n '''\n local_path = pwndbg.file.get_file(pwndbg.proc.exe)\n cmd = [get_jmpslots.cmd_path, \"--program-headers\", local_path]\n readelf_out = pwndbg.wrappers.call_cmd(cmd)\n\n segments = []\n load_found = False\n\n # Output from readelf is \n # Type Offset VirtAddr PhysAddr\n # FileSiz MemSiz Flags Align\n # LOAD 0x0000000000000000 0x0000000000000000 0x0000000000000000\n # 0x0000000000000830 0x0000000000000830 R E 0x200000\n #\n ############################################################################\n #\n # NOTE: On some readelf versions the Align column might not be prefixed with 0x\n # See https://github.com/pwndbg/pwndbg/issues/427\n #\n # Account for this using two regular expressions\n re_first = re.compile(r\"\\s+LOAD\\s+(0x[0-9A-Fa-f]+) (0x[0-9A-Fa-f]+) (0x[0-9A-Fa-f]+)\")\n re_secnd = re.compile(r\"\\s+(0x[0-9A-Fa-f]+) (0x[0-9A-Fa-f]+) (.)(.)(.)\\s+(0x)?([0-9A-Fa-f]+)\")\n hex2int = lambda x: int(x, 16)\n\n for line in readelf_out.splitlines():\n if \"LOAD\" in line:\n load_found = True\n offset, vaddr, paddr = map(hex2int, re_first.match(line).groups())\n elif load_found:\n fsize, msize, read, write, execute, _optional_prefix, align = re_secnd.match(line).groups()\n fsize, msize, align = map(hex2int, (fsize, msize, '0x' + align))\n read = read == \"R\"\n write = write == \"W\"\n execute = execute == \"E\"\n\n segments.append({\"Offset\": offset,\n \"VirtAddr\": vaddr,\n \"PhysAddr\": paddr,\n \"FileSiz\": fsize,\n \"MemSiz\": msize,\n \"FlagsRead\": read,\n \"FlagsWrite\": write,\n \"FlagsExecute\": execute})\n\n load_found = False\n\n return segments\n", "path": "pwndbg/wrappers/readelf.py"}]} | 1,574 | 625 |
gh_patches_debug_17862 | rasdani/github-patches | git_diff | kivy__kivy-2700 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SDL2 - crash on loading asyncimage loading gif?
relevant log:
Traceback (most recent call last):
File "/home/chozabu/git/KivEntEd/main.py", line 1289, in <module>
KivEntEd().run()
File "/usr/local/lib/python2.7/dist-packages/kivy/app.py", line 825, in run
runTouchApp()
File "/usr/local/lib/python2.7/dist-packages/kivy/base.py", line 484, in runTouchApp
EventLoop.window.mainloop()
File "/usr/local/lib/python2.7/dist-packages/kivy/core/window/window_pygame.py", line 364, in mainloop
self._mainloop()
File "/usr/local/lib/python2.7/dist-packages/kivy/core/window/window_pygame.py", line 268, in _mainloop
EventLoop.idle()
File "/usr/local/lib/python2.7/dist-packages/kivy/base.py", line 324, in idle
Clock.tick()
File "/usr/local/lib/python2.7/dist-packages/kivy/clock.py", line 482, in tick
self._process_events()
File "/usr/local/lib/python2.7/dist-packages/kivy/clock.py", line 614, in _process_events
event.tick(self._last_tick, remove)
File "/usr/local/lib/python2.7/dist-packages/kivy/clock.py", line 373, in tick
ret = callback(self._dt)
File "/home/chozabu/git/KivEntEd/ui_elements.py", line 121, in initUI
self.screenShot.source = serverURL+"/downloadSS?fullname="+self.info['filename']+".png"
File "kivy/properties.pyx", line 377, in kivy.properties.Property.__set__ (kivy/properties.c:4346)
File "kivy/properties.pyx", line 409, in kivy.properties.Property.set (kivy/properties.c:4861)
File "kivy/properties.pyx", line 460, in kivy.properties.Property.dispatch (kivy/properties.c:5437)
File "kivy/_event.pyx", line 1046, in kivy._event.EventObservers.dispatch (kivy/_event.c:10980)
File "/usr/local/lib/python2.7/dist-packages/kivy/uix/image.py", line 327, in _load_source
anim_delay=self.anim_delay)
File "/usr/local/lib/python2.7/dist-packages/kivy/loader.py", line 432, in image
client = ProxyImage(self.loading_image,
File "/usr/local/lib/python2.7/dist-packages/kivy/loader.py", line 163, in _get_loading_image
self._loading_image = ImageLoader.load(filename=loading_png_fn)
File "/usr/local/lib/python2.7/dist-packages/kivy/core/image/__init__.py", line 385, in load
im = loader(filename, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/kivy/core/image/**init**.py", line 164, in **init**
self._data = self.load(filename)
File "/usr/local/lib/python2.7/dist-packages/kivy/core/image/img_sdl2.py", line 34, in load
raise Exception('SDL2: Unable to load image')
Exception: SDL2: Unable to load image
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kivy/core/image/img_sdl2.py`
Content:
```
1 '''
2 SDL2 image loader
3 =================
4 '''
5
6 __all__ = ('ImageLoaderSDL2', )
7
8 from kivy.compat import PY2
9 from kivy.logger import Logger
10 from kivy.core.image import ImageLoaderBase, ImageData, ImageLoader
11 from kivy.core.image import _img_sdl2
12
13
14 class ImageLoaderSDL2(ImageLoaderBase):
15 '''Image loader based on the PIL library'''
16
17 def _ensure_ext(self):
18 _img_sdl2.init()
19
20 @staticmethod
21 def extensions():
22 '''Return accepted extensions for this loader'''
23 return ('bmp', 'gif', 'jpg', 'jpeg', 'lbm', 'pcx', 'png', 'pnm', 'tga', 'tiff',
24 'webp', 'xcf', 'xpm', 'xv')
25
26 @staticmethod
27 def can_save():
28 return True
29
30 def load(self, filename):
31 info = _img_sdl2.load(filename)
32 if not info:
33 Logger.warning('Image: Unable to load image <%s>' % filename)
34 raise Exception('SDL2: Unable to load image')
35
36 w, h, fmt, pixels, rowlength = info
37
38 # update internals
39 self.filename = filename
40 return [ImageData(
41 w, h, fmt, pixels, source=filename,
42 rowlength=rowlength)]
43
44 @staticmethod
45 def save(filename, width, height, fmt, pixels, flipped):
46 # TODO implement the save for sdl2
47 #surface = SDL2.image.fromstring(
48 # pixels, (width, height), fmt.upper(), False)
49 #SDL2.image.save(surface, filename)
50 _img_sdl2.save(filename, width, height, fmt, pixels, flipped)
51 return True
52
53
54 # register
55 ImageLoader.register(ImageLoaderSDL2)
56
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kivy/core/image/img_sdl2.py b/kivy/core/image/img_sdl2.py
--- a/kivy/core/image/img_sdl2.py
+++ b/kivy/core/image/img_sdl2.py
@@ -20,7 +20,7 @@
@staticmethod
def extensions():
'''Return accepted extensions for this loader'''
- return ('bmp', 'gif', 'jpg', 'jpeg', 'lbm', 'pcx', 'png', 'pnm', 'tga', 'tiff',
+ return ('bmp', 'jpg', 'jpeg', 'lbm', 'pcx', 'png', 'pnm', 'tga', 'tiff',
'webp', 'xcf', 'xpm', 'xv')
@staticmethod
@@ -43,10 +43,6 @@
@staticmethod
def save(filename, width, height, fmt, pixels, flipped):
- # TODO implement the save for sdl2
- #surface = SDL2.image.fromstring(
- # pixels, (width, height), fmt.upper(), False)
- #SDL2.image.save(surface, filename)
_img_sdl2.save(filename, width, height, fmt, pixels, flipped)
return True
| {"golden_diff": "diff --git a/kivy/core/image/img_sdl2.py b/kivy/core/image/img_sdl2.py\n--- a/kivy/core/image/img_sdl2.py\n+++ b/kivy/core/image/img_sdl2.py\n@@ -20,7 +20,7 @@\n @staticmethod\n def extensions():\n '''Return accepted extensions for this loader'''\n- return ('bmp', 'gif', 'jpg', 'jpeg', 'lbm', 'pcx', 'png', 'pnm', 'tga', 'tiff',\n+ return ('bmp', 'jpg', 'jpeg', 'lbm', 'pcx', 'png', 'pnm', 'tga', 'tiff',\n 'webp', 'xcf', 'xpm', 'xv')\n \n @staticmethod\n@@ -43,10 +43,6 @@\n \n @staticmethod\n def save(filename, width, height, fmt, pixels, flipped):\n- # TODO implement the save for sdl2\n- #surface = SDL2.image.fromstring(\n- # pixels, (width, height), fmt.upper(), False)\n- #SDL2.image.save(surface, filename)\n _img_sdl2.save(filename, width, height, fmt, pixels, flipped)\n return True\n", "issue": "SDL2 - crash on loading asyncimage loading gif?\nrelevant log:\n\nTraceback (most recent call last):\n File \"/home/chozabu/git/KivEntEd/main.py\", line 1289, in <module>\n KivEntEd().run()\n File \"/usr/local/lib/python2.7/dist-packages/kivy/app.py\", line 825, in run\n runTouchApp()\n File \"/usr/local/lib/python2.7/dist-packages/kivy/base.py\", line 484, in runTouchApp\n EventLoop.window.mainloop()\n File \"/usr/local/lib/python2.7/dist-packages/kivy/core/window/window_pygame.py\", line 364, in mainloop\n self._mainloop()\n File \"/usr/local/lib/python2.7/dist-packages/kivy/core/window/window_pygame.py\", line 268, in _mainloop\n EventLoop.idle()\n File \"/usr/local/lib/python2.7/dist-packages/kivy/base.py\", line 324, in idle\n Clock.tick()\n File \"/usr/local/lib/python2.7/dist-packages/kivy/clock.py\", line 482, in tick\n self._process_events()\n File \"/usr/local/lib/python2.7/dist-packages/kivy/clock.py\", line 614, in _process_events\n event.tick(self._last_tick, remove)\n File \"/usr/local/lib/python2.7/dist-packages/kivy/clock.py\", line 373, in tick\n ret = callback(self._dt)\n File \"/home/chozabu/git/KivEntEd/ui_elements.py\", line 121, in initUI\n self.screenShot.source = serverURL+\"/downloadSS?fullname=\"+self.info['filename']+\".png\"\n File \"kivy/properties.pyx\", line 377, in kivy.properties.Property.__set__ (kivy/properties.c:4346)\n File \"kivy/properties.pyx\", line 409, in kivy.properties.Property.set (kivy/properties.c:4861)\n File \"kivy/properties.pyx\", line 460, in kivy.properties.Property.dispatch (kivy/properties.c:5437)\n File \"kivy/_event.pyx\", line 1046, in kivy._event.EventObservers.dispatch (kivy/_event.c:10980)\n File \"/usr/local/lib/python2.7/dist-packages/kivy/uix/image.py\", line 327, in _load_source\n anim_delay=self.anim_delay)\n File \"/usr/local/lib/python2.7/dist-packages/kivy/loader.py\", line 432, in image\n client = ProxyImage(self.loading_image,\n File \"/usr/local/lib/python2.7/dist-packages/kivy/loader.py\", line 163, in _get_loading_image\n self._loading_image = ImageLoader.load(filename=loading_png_fn)\n File \"/usr/local/lib/python2.7/dist-packages/kivy/core/image/__init__.py\", line 385, in load\n im = loader(filename, **kwargs)\n File \"/usr/local/lib/python2.7/dist-packages/kivy/core/image/**init**.py\", line 164, in **init**\n self._data = self.load(filename)\n File \"/usr/local/lib/python2.7/dist-packages/kivy/core/image/img_sdl2.py\", line 34, in load\n raise Exception('SDL2: Unable to load image')\nException: SDL2: Unable to load image\n\n", "before_files": [{"content": "'''\nSDL2 image loader\n=================\n'''\n\n__all__ = ('ImageLoaderSDL2', )\n\nfrom kivy.compat import PY2\nfrom kivy.logger import Logger\nfrom kivy.core.image import ImageLoaderBase, ImageData, ImageLoader\nfrom kivy.core.image import _img_sdl2\n\n\nclass ImageLoaderSDL2(ImageLoaderBase):\n '''Image loader based on the PIL library'''\n\n def _ensure_ext(self):\n _img_sdl2.init()\n\n @staticmethod\n def extensions():\n '''Return accepted extensions for this loader'''\n return ('bmp', 'gif', 'jpg', 'jpeg', 'lbm', 'pcx', 'png', 'pnm', 'tga', 'tiff',\n 'webp', 'xcf', 'xpm', 'xv')\n\n @staticmethod\n def can_save():\n return True\n\n def load(self, filename):\n info = _img_sdl2.load(filename)\n if not info:\n Logger.warning('Image: Unable to load image <%s>' % filename)\n raise Exception('SDL2: Unable to load image')\n\n w, h, fmt, pixels, rowlength = info\n\n # update internals\n self.filename = filename\n return [ImageData(\n w, h, fmt, pixels, source=filename,\n rowlength=rowlength)]\n\n @staticmethod\n def save(filename, width, height, fmt, pixels, flipped):\n # TODO implement the save for sdl2\n #surface = SDL2.image.fromstring(\n # pixels, (width, height), fmt.upper(), False)\n #SDL2.image.save(surface, filename)\n _img_sdl2.save(filename, width, height, fmt, pixels, flipped)\n return True\n\n\n# register\nImageLoader.register(ImageLoaderSDL2)\n", "path": "kivy/core/image/img_sdl2.py"}], "after_files": [{"content": "'''\nSDL2 image loader\n=================\n'''\n\n__all__ = ('ImageLoaderSDL2', )\n\nfrom kivy.compat import PY2\nfrom kivy.logger import Logger\nfrom kivy.core.image import ImageLoaderBase, ImageData, ImageLoader\nfrom kivy.core.image import _img_sdl2\n\n\nclass ImageLoaderSDL2(ImageLoaderBase):\n '''Image loader based on the PIL library'''\n\n def _ensure_ext(self):\n _img_sdl2.init()\n\n @staticmethod\n def extensions():\n '''Return accepted extensions for this loader'''\n return ('bmp', 'jpg', 'jpeg', 'lbm', 'pcx', 'png', 'pnm', 'tga', 'tiff',\n 'webp', 'xcf', 'xpm', 'xv')\n\n @staticmethod\n def can_save():\n return True\n\n def load(self, filename):\n info = _img_sdl2.load(filename)\n if not info:\n Logger.warning('Image: Unable to load image <%s>' % filename)\n raise Exception('SDL2: Unable to load image')\n\n w, h, fmt, pixels, rowlength = info\n\n # update internals\n self.filename = filename\n return [ImageData(\n w, h, fmt, pixels, source=filename,\n rowlength=rowlength)]\n\n @staticmethod\n def save(filename, width, height, fmt, pixels, flipped):\n _img_sdl2.save(filename, width, height, fmt, pixels, flipped)\n return True\n\n\n# register\nImageLoader.register(ImageLoaderSDL2)\n", "path": "kivy/core/image/img_sdl2.py"}]} | 1,532 | 276 |
gh_patches_debug_19620 | rasdani/github-patches | git_diff | sopel-irc__sopel-2166 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
announce error on python3
<!-- Before reporting a bug, please search both open *and closed* issues to
see if it has already been reported. If you can, try to reproduce the problem
on an unmodified copy of the `master` branch first, as sometimes bugs are found
and fixed without a report. If the problem is unreported and persists in
`master`, please help us fix it quickly by filling out as much of this
information as you can. Thanks! -->
### Description
.announce results in an error on python 3
### Reproduction steps
1. Setup a instance on python3 (specifically I got the error on v3.7)
2. Try to use .announce
3. there will be a error
### Expected behavior
Works without errors.
### Logs
```
If applicable, add logs to help us figure out what's happening. Raw logs are
super helpful! Logs are usually found in ~/.sopel/logs, depending on your
configuration.
```
Jul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: [2021-07-22 17:04:51,684] sopel.bot ERROR - Unexpected error ('dict_keys' object is not subscriptable) from MacFan4000 at 2
Jul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: Traceback (most recent call last):
Jul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: File "/srv/sopelbots/prodvenv/lib/python3.7/site-packages/sopel/bot.py", line 757, in call_rule
Jul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: rule.execute(sopel, trigger)
Jul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: File "/srv/sopelbots/prodvenv/lib/python3.7/site-packages/sopel/plugins/rules.py", line 1057, in execute
Jul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: exit_code = self._handler(bot, trigger)
Jul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: File "/srv/sopelbots/prodvenv/lib/python3.7/site-packages/sopel/plugin.py", line 1071, in guarded
Jul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: return function(bot, trigger, *args, **kwargs)
Jul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: File "/srv/sopelbots/prodvenv/lib/python3.7/site-packages/sopel/modules/announce.py", line 44, in announce
Jul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: for cgroup in channels:
Jul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: File "/srv/sopelbots/prodvenv/lib/python3.7/site-packages/sopel/modules/announce.py", line 24, in _chunks
Jul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: yield items[delim:delim + size]
Jul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: TypeError: 'dict_keys' object is not subscriptable
### Environment
- Sopel `.version`: [e.g. 7.0.0 or d416e19] 7.1.2
- Sopel installed via: [apt, pip, `setup.py install`, source, ?] pip
- Python version: [e.g. 3.6.9] 3.7
- Operating system: [e.g. Debian 10] Debian Buster
- IRCd `/version`: [e.g. InspIRCd 3.0.1] Libera Chat
- Relevant plugins: [adminchannel, weather, custom\_thing.py, ?] announce
### Notes
I believe https://github.com/sopel-irc/sopel/commit/b7b6b46a84e29e26a6a6b921debf57735661a4c0#diff-a9aa50736c17c299dac1ad9cb5ea1b835fb638c91bbd8c547990ffd9d67daa74 broke it due to .keys() not working the same way on python3 as it does on python2.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sopel/modules/announce.py`
Content:
```
1 """
2 announce.py - Sopel Announcement Plugin
3 Sends announcements to all channels the bot has joined.
4 Copyright © 2013, Elad Alfassa, <[email protected]>
5 Licensed under the Eiffel Forum License 2.
6
7 https://sopel.chat
8 """
9 from __future__ import generator_stop
10
11 from sopel import plugin
12
13
14 def _chunks(items, size):
15 """Break a list of items into groups.
16
17 :param items: the collection of items to chunk
18 :type items: :term:`iterable`
19 :param int size: the size of each chunk
20 """
21 # from https://stackoverflow.com/a/312464/5991 with modified names for readability
22 for delim in range(0, len(items), size):
23 yield items[delim:delim + size]
24
25
26 @plugin.command('announce')
27 @plugin.example('.announce Some important message here')
28 @plugin.require_admin('Sorry, I can\'t let you do that', reply=True)
29 @plugin.output_prefix('[ANNOUNCEMENT] ')
30 def announce(bot, trigger):
31 """Send an announcement to all channels the bot is in."""
32 if trigger.group(2) is None:
33 bot.reply('Announce what? I need a message to say.')
34 return
35
36 size = 1
37 try:
38 size = bot.isupport.TARGMAX.get('PRIVMSG', size)
39 except AttributeError:
40 pass
41
42 channels = _chunks(bot.channels.keys(), size)
43 for cgroup in channels:
44 bot.say(trigger.group(2), ','.join(cgroup))
45
46 bot.reply('Announce complete.')
47
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sopel/modules/announce.py b/sopel/modules/announce.py
--- a/sopel/modules/announce.py
+++ b/sopel/modules/announce.py
@@ -8,6 +8,8 @@
"""
from __future__ import generator_stop
+import itertools
+
from sopel import plugin
@@ -17,10 +19,18 @@
:param items: the collection of items to chunk
:type items: :term:`iterable`
:param int size: the size of each chunk
+ :return: a :term:`generator` of chunks
+ :rtype: :term:`generator` of :class:`tuple`
"""
- # from https://stackoverflow.com/a/312464/5991 with modified names for readability
- for delim in range(0, len(items), size):
- yield items[delim:delim + size]
+ # This approach is safer than slicing with non-subscriptable types,
+ # for example `dict_keys` objects
+ iterator = iter(items)
+ # TODO: Simplify to assignment expression (`while cond := expr`)
+ # when dropping Python 3.7
+ chunk = tuple(itertools.islice(iterator, size))
+ while chunk:
+ yield chunk
+ chunk = tuple(itertools.islice(iterator, size))
@plugin.command('announce')
| {"golden_diff": "diff --git a/sopel/modules/announce.py b/sopel/modules/announce.py\n--- a/sopel/modules/announce.py\n+++ b/sopel/modules/announce.py\n@@ -8,6 +8,8 @@\n \"\"\"\n from __future__ import generator_stop\n \n+import itertools\n+\n from sopel import plugin\n \n \n@@ -17,10 +19,18 @@\n :param items: the collection of items to chunk\n :type items: :term:`iterable`\n :param int size: the size of each chunk\n+ :return: a :term:`generator` of chunks\n+ :rtype: :term:`generator` of :class:`tuple`\n \"\"\"\n- # from https://stackoverflow.com/a/312464/5991 with modified names for readability\n- for delim in range(0, len(items), size):\n- yield items[delim:delim + size]\n+ # This approach is safer than slicing with non-subscriptable types,\n+ # for example `dict_keys` objects\n+ iterator = iter(items)\n+ # TODO: Simplify to assignment expression (`while cond := expr`)\n+ # when dropping Python 3.7\n+ chunk = tuple(itertools.islice(iterator, size))\n+ while chunk:\n+ yield chunk\n+ chunk = tuple(itertools.islice(iterator, size))\n \n \n @plugin.command('announce')\n", "issue": "announce error on python3\n<!-- Before reporting a bug, please search both open *and closed* issues to\r\nsee if it has already been reported. If you can, try to reproduce the problem\r\non an unmodified copy of the `master` branch first, as sometimes bugs are found\r\nand fixed without a report. If the problem is unreported and persists in\r\n`master`, please help us fix it quickly by filling out as much of this\r\ninformation as you can. Thanks! -->\r\n\r\n### Description\r\n.announce results in an error on python 3\r\n\r\n### Reproduction steps\r\n1. Setup a instance on python3 (specifically I got the error on v3.7)\r\n2. Try to use .announce\r\n3. there will be a error\r\n\r\n### Expected behavior\r\nWorks without errors.\r\n\r\n### Logs\r\n```\r\nIf applicable, add logs to help us figure out what's happening. Raw logs are\r\nsuper helpful! Logs are usually found in ~/.sopel/logs, depending on your\r\nconfiguration.\r\n```\r\nJul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: [2021-07-22 17:04:51,684] sopel.bot ERROR - Unexpected error ('dict_keys' object is not subscriptable) from MacFan4000 at 2\r\nJul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: Traceback (most recent call last):\r\nJul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: File \"/srv/sopelbots/prodvenv/lib/python3.7/site-packages/sopel/bot.py\", line 757, in call_rule\r\nJul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: rule.execute(sopel, trigger)\r\nJul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: File \"/srv/sopelbots/prodvenv/lib/python3.7/site-packages/sopel/plugins/rules.py\", line 1057, in execute\r\nJul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: exit_code = self._handler(bot, trigger)\r\nJul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: File \"/srv/sopelbots/prodvenv/lib/python3.7/site-packages/sopel/plugin.py\", line 1071, in guarded\r\nJul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: return function(bot, trigger, *args, **kwargs)\r\nJul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: File \"/srv/sopelbots/prodvenv/lib/python3.7/site-packages/sopel/modules/announce.py\", line 44, in announce\r\nJul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: for cgroup in channels:\r\nJul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: File \"/srv/sopelbots/prodvenv/lib/python3.7/site-packages/sopel/modules/announce.py\", line 24, in _chunks\r\nJul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: yield items[delim:delim + size]\r\nJul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: TypeError: 'dict_keys' object is not subscriptable\r\n### Environment\r\n- Sopel `.version`: [e.g. 7.0.0 or d416e19] 7.1.2\r\n- Sopel installed via: [apt, pip, `setup.py install`, source, ?] pip\r\n- Python version: [e.g. 3.6.9] 3.7\r\n- Operating system: [e.g. Debian 10] Debian Buster\r\n- IRCd `/version`: [e.g. InspIRCd 3.0.1] Libera Chat\r\n- Relevant plugins: [adminchannel, weather, custom\\_thing.py, ?] announce\r\n\r\n### Notes\r\nI believe https://github.com/sopel-irc/sopel/commit/b7b6b46a84e29e26a6a6b921debf57735661a4c0#diff-a9aa50736c17c299dac1ad9cb5ea1b835fb638c91bbd8c547990ffd9d67daa74 broke it due to .keys() not working the same way on python3 as it does on python2.\r\n\n", "before_files": [{"content": "\"\"\"\nannounce.py - Sopel Announcement Plugin\nSends announcements to all channels the bot has joined.\nCopyright \u00a9 2013, Elad Alfassa, <[email protected]>\nLicensed under the Eiffel Forum License 2.\n\nhttps://sopel.chat\n\"\"\"\nfrom __future__ import generator_stop\n\nfrom sopel import plugin\n\n\ndef _chunks(items, size):\n \"\"\"Break a list of items into groups.\n\n :param items: the collection of items to chunk\n :type items: :term:`iterable`\n :param int size: the size of each chunk\n \"\"\"\n # from https://stackoverflow.com/a/312464/5991 with modified names for readability\n for delim in range(0, len(items), size):\n yield items[delim:delim + size]\n\n\[email protected]('announce')\[email protected]('.announce Some important message here')\[email protected]_admin('Sorry, I can\\'t let you do that', reply=True)\[email protected]_prefix('[ANNOUNCEMENT] ')\ndef announce(bot, trigger):\n \"\"\"Send an announcement to all channels the bot is in.\"\"\"\n if trigger.group(2) is None:\n bot.reply('Announce what? I need a message to say.')\n return\n\n size = 1\n try:\n size = bot.isupport.TARGMAX.get('PRIVMSG', size)\n except AttributeError:\n pass\n\n channels = _chunks(bot.channels.keys(), size)\n for cgroup in channels:\n bot.say(trigger.group(2), ','.join(cgroup))\n\n bot.reply('Announce complete.')\n", "path": "sopel/modules/announce.py"}], "after_files": [{"content": "\"\"\"\nannounce.py - Sopel Announcement Plugin\nSends announcements to all channels the bot has joined.\nCopyright \u00a9 2013, Elad Alfassa, <[email protected]>\nLicensed under the Eiffel Forum License 2.\n\nhttps://sopel.chat\n\"\"\"\nfrom __future__ import generator_stop\n\nimport itertools\n\nfrom sopel import plugin\n\n\ndef _chunks(items, size):\n \"\"\"Break a list of items into groups.\n\n :param items: the collection of items to chunk\n :type items: :term:`iterable`\n :param int size: the size of each chunk\n :return: a :term:`generator` of chunks\n :rtype: :term:`generator` of :class:`tuple`\n \"\"\"\n # This approach is safer than slicing with non-subscriptable types,\n # for example `dict_keys` objects\n iterator = iter(items)\n # TODO: Simplify to assignment expression (`while cond := expr`)\n # when dropping Python 3.7\n chunk = tuple(itertools.islice(iterator, size))\n while chunk:\n yield chunk\n chunk = tuple(itertools.islice(iterator, size))\n\n\[email protected]('announce')\[email protected]('.announce Some important message here')\[email protected]_admin('Sorry, I can\\'t let you do that', reply=True)\[email protected]_prefix('[ANNOUNCEMENT] ')\ndef announce(bot, trigger):\n \"\"\"Send an announcement to all channels the bot is in.\"\"\"\n if trigger.group(2) is None:\n bot.reply('Announce what? I need a message to say.')\n return\n\n size = 1\n try:\n size = bot.isupport.TARGMAX.get('PRIVMSG', size)\n except AttributeError:\n pass\n\n channels = _chunks(bot.channels.keys(), size)\n for cgroup in channels:\n bot.say(trigger.group(2), ','.join(cgroup))\n\n bot.reply('Announce complete.')\n", "path": "sopel/modules/announce.py"}]} | 1,880 | 310 |
gh_patches_debug_6235 | rasdani/github-patches | git_diff | statsmodels__statsmodels-970 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
lowes test failure Ubuntu
https://launchpadlibrarian.net/144807626/buildlog_ubuntu-raring-amd64.statsmodels_0.5.0~ppa17~revno-1430~raring1_UPLOADING.txt.gz
```
======================================================================
ERROR: statsmodels.nonparametric.tests.test_lowess.TestLowess.test_options
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/lib/python2.7/dist-packages/nose/case.py", line 197, in runTest
self.test(*self.arg)
File "/build/buildd/statsmodels-0.5.0~ppa17~revno/debian/python-statsmodels/usr/lib/python2.7/dist-packages/statsmodels/nonparametric/tests/test_lowess.py", line 144, in test_options
return_sorted=False)
File "/build/buildd/statsmodels-0.5.0~ppa17~revno/debian/python-statsmodels/usr/lib/python2.7/dist-packages/statsmodels/nonparametric/smoothers_lowess.py", line 182, in lowess
yfitted_[mask_valid] = yfitted
ValueError: NumPy boolean array indexing assignment cannot assign 20 input values to the 17 output values where the mask is true
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `statsmodels/nonparametric/smoothers_lowess.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """Lowess - wrapper for cythonized extension
3
4 Author : Chris Jordan-Squire
5 Author : Carl Vogel
6 Author : Josef Perktold
7
8 """
9
10 import numpy as np
11 from ._smoothers_lowess import lowess as _lowess
12
13 def lowess(endog, exog, frac=2.0/3.0, it=3, delta=0.0, is_sorted=False,
14 missing='drop', return_sorted=True):
15 '''LOWESS (Locally Weighted Scatterplot Smoothing)
16
17 A lowess function that outs smoothed estimates of endog
18 at the given exog values from points (exog, endog)
19
20 Parameters
21 ----------
22 endog: 1-D numpy array
23 The y-values of the observed points
24 exog: 1-D numpy array
25 The x-values of the observed points
26 frac: float
27 Between 0 and 1. The fraction of the data used
28 when estimating each y-value.
29 it: int
30 The number of residual-based reweightings
31 to perform.
32 delta: float
33 Distance within which to use linear-interpolation
34 instead of weighted regression.
35 is_sorted : bool
36 If False (default), then the data will be sorted by exog before
37 calculating lowess. If True, then it is assumed that the data is
38 already sorted by exog.
39 missing : str
40 Available options are 'none', 'drop', and 'raise'. If 'none', no nan
41 checking is done. If 'drop', any observations with nans are dropped.
42 If 'raise', an error is raised. Default is 'drop'.
43 return_sorted : bool
44 If True (default), then the returned array is sorted by exog and has
45 missing (nan or infinite) observations removed.
46 If False, then the returned array is in the same length and the same
47 sequence of observations as the input array.
48
49 Returns
50 -------
51 out: ndarray, float
52 The returned array is two-dimensional if return_sorted is True, and
53 one dimensional if return_sorted is False.
54 If return_sorted is True, then a numpy array with two columns. The
55 first column contains the sorted x (exog) values and the second column
56 the associated estimated y (endog) values.
57 If return_sorted is False, then only the fitted values are returned,
58 and the observations will be in the same order as the input arrays.
59
60 Notes
61 -----
62 This lowess function implements the algorithm given in the
63 reference below using local linear estimates.
64
65 Suppose the input data has N points. The algorithm works by
66 estimating the `smooth` y_i by taking the frac*N closest points
67 to (x_i,y_i) based on their x values and estimating y_i
68 using a weighted linear regression. The weight for (x_j,y_j)
69 is tricube function applied to |x_i-x_j|.
70
71 If it > 1, then further weighted local linear regressions
72 are performed, where the weights are the same as above
73 times the _lowess_bisquare function of the residuals. Each iteration
74 takes approximately the same amount of time as the original fit,
75 so these iterations are expensive. They are most useful when
76 the noise has extremely heavy tails, such as Cauchy noise.
77 Noise with less heavy-tails, such as t-distributions with df>2,
78 are less problematic. The weights downgrade the influence of
79 points with large residuals. In the extreme case, points whose
80 residuals are larger than 6 times the median absolute residual
81 are given weight 0.
82
83 `delta` can be used to save computations. For each `x_i`, regressions
84 are skipped for points closer than `delta`. The next regression is
85 fit for the farthest point within delta of `x_i` and all points in
86 between are estimated by linearly interpolating between the two
87 regression fits.
88
89 Judicious choice of delta can cut computation time considerably
90 for large data (N > 5000). A good choice is ``delta = 0.01 * range(exog)``.
91
92 Some experimentation is likely required to find a good
93 choice of `frac` and `iter` for a particular dataset.
94
95 References
96 ----------
97 Cleveland, W.S. (1979) "Robust Locally Weighted Regression
98 and Smoothing Scatterplots". Journal of the American Statistical
99 Association 74 (368): 829-836.
100
101 Examples
102 --------
103 The below allows a comparison between how different the fits from
104 lowess for different values of frac can be.
105
106 >>> import numpy as np
107 >>> import statsmodels.api as sm
108 >>> lowess = sm.nonparametric.lowess
109 >>> x = np.random.uniform(low = -2*np.pi, high = 2*np.pi, size=500)
110 >>> y = np.sin(x) + np.random.normal(size=len(x))
111 >>> z = lowess(y, x)
112 >>> w = lowess(y, x, frac=1./3)
113
114 This gives a similar comparison for when it is 0 vs not.
115
116 >>> import numpy as np
117 >>> import scipy.stats as stats
118 >>> import statsmodels.api as sm
119 >>> lowess = sm.nonparametric.lowess
120 >>> x = np.random.uniform(low = -2*np.pi, high = 2*np.pi, size=500)
121 >>> y = np.sin(x) + stats.cauchy.rvs(size=len(x))
122 >>> z = lowess(y, x, frac= 1./3, it=0)
123 >>> w = lowess(y, x, frac=1./3)
124
125 '''
126
127 endog = np.asarray(endog, float)
128 exog = np.asarray(exog, float)
129
130 # Inputs should be vectors (1-D arrays) of the
131 # same length.
132 if exog.ndim != 1:
133 raise ValueError('exog must be a vector')
134 if endog.ndim != 1:
135 raise ValueError('endog must be a vector')
136 if endog.shape[0] != exog.shape[0] :
137 raise ValueError('exog and endog must have same length')
138
139 if missing in ['drop', 'raise']:
140 # Cut out missing values
141 mask_valid = (np.isfinite(exog) & np.isfinite(endog))
142 all_valid = np.all(mask_valid)
143 if all_valid:
144 y = endog
145 x = exog
146 else:
147 if missing == 'drop':
148 x = exog[mask_valid]
149 y = endog[mask_valid]
150 else:
151 raise ValueError('nan or inf found in data')
152 elif missing == 'none':
153 y = endog
154 x = exog
155 all_valid = True # we assume it's true if missing='none'
156 else:
157 raise ValueError("missing can only be 'none', 'drop' or 'raise'")
158
159 if not is_sorted:
160 # Sort both inputs according to the ascending order of x values
161 sort_index = np.argsort(x)
162 x = np.array(x[sort_index])
163 y = np.array(y[sort_index])
164
165 res = _lowess(y, x, frac=frac, it=it, delta=delta)
166 _, yfitted = res.T
167
168 if return_sorted or (all_valid and is_sorted):
169 return res
170 else:
171 # rebuild yfitted with original indices
172 # a bit messy: y might have been selected twice
173 if not is_sorted:
174 yfitted_ = np.empty_like(endog)
175 yfitted_.fill(np.nan)
176 yfitted_[sort_index] = yfitted
177 yfitted = yfitted_
178
179 if not all_valid:
180 yfitted_ = np.empty_like(endog)
181 yfitted_.fill(np.nan)
182 yfitted_[mask_valid] = yfitted
183 yfitted = yfitted_
184
185 # we don't need to return exog anymore
186 return yfitted
187
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/statsmodels/nonparametric/smoothers_lowess.py b/statsmodels/nonparametric/smoothers_lowess.py
--- a/statsmodels/nonparametric/smoothers_lowess.py
+++ b/statsmodels/nonparametric/smoothers_lowess.py
@@ -171,7 +171,7 @@
# rebuild yfitted with original indices
# a bit messy: y might have been selected twice
if not is_sorted:
- yfitted_ = np.empty_like(endog)
+ yfitted_ = np.empty_like(y)
yfitted_.fill(np.nan)
yfitted_[sort_index] = yfitted
yfitted = yfitted_
| {"golden_diff": "diff --git a/statsmodels/nonparametric/smoothers_lowess.py b/statsmodels/nonparametric/smoothers_lowess.py\n--- a/statsmodels/nonparametric/smoothers_lowess.py\n+++ b/statsmodels/nonparametric/smoothers_lowess.py\n@@ -171,7 +171,7 @@\n # rebuild yfitted with original indices\n # a bit messy: y might have been selected twice\n if not is_sorted:\n- yfitted_ = np.empty_like(endog)\n+ yfitted_ = np.empty_like(y)\n yfitted_.fill(np.nan)\n yfitted_[sort_index] = yfitted\n yfitted = yfitted_\n", "issue": "lowes test failure Ubuntu\nhttps://launchpadlibrarian.net/144807626/buildlog_ubuntu-raring-amd64.statsmodels_0.5.0~ppa17~revno-1430~raring1_UPLOADING.txt.gz\n\n```\n======================================================================\nERROR: statsmodels.nonparametric.tests.test_lowess.TestLowess.test_options\n----------------------------------------------------------------------\nTraceback (most recent call last):\n File \"/usr/lib/python2.7/dist-packages/nose/case.py\", line 197, in runTest\n self.test(*self.arg)\n File \"/build/buildd/statsmodels-0.5.0~ppa17~revno/debian/python-statsmodels/usr/lib/python2.7/dist-packages/statsmodels/nonparametric/tests/test_lowess.py\", line 144, in test_options\n return_sorted=False)\n File \"/build/buildd/statsmodels-0.5.0~ppa17~revno/debian/python-statsmodels/usr/lib/python2.7/dist-packages/statsmodels/nonparametric/smoothers_lowess.py\", line 182, in lowess\n yfitted_[mask_valid] = yfitted\nValueError: NumPy boolean array indexing assignment cannot assign 20 input values to the 17 output values where the mask is true\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Lowess - wrapper for cythonized extension\n\nAuthor : Chris Jordan-Squire\nAuthor : Carl Vogel\nAuthor : Josef Perktold\n\n\"\"\"\n\nimport numpy as np\nfrom ._smoothers_lowess import lowess as _lowess\n\ndef lowess(endog, exog, frac=2.0/3.0, it=3, delta=0.0, is_sorted=False,\n missing='drop', return_sorted=True):\n '''LOWESS (Locally Weighted Scatterplot Smoothing)\n\n A lowess function that outs smoothed estimates of endog\n at the given exog values from points (exog, endog)\n\n Parameters\n ----------\n endog: 1-D numpy array\n The y-values of the observed points\n exog: 1-D numpy array\n The x-values of the observed points\n frac: float\n Between 0 and 1. The fraction of the data used\n when estimating each y-value.\n it: int\n The number of residual-based reweightings\n to perform.\n delta: float\n Distance within which to use linear-interpolation\n instead of weighted regression.\n is_sorted : bool\n If False (default), then the data will be sorted by exog before\n calculating lowess. If True, then it is assumed that the data is\n already sorted by exog.\n missing : str\n Available options are 'none', 'drop', and 'raise'. If 'none', no nan\n checking is done. If 'drop', any observations with nans are dropped.\n If 'raise', an error is raised. Default is 'drop'.\n return_sorted : bool\n If True (default), then the returned array is sorted by exog and has\n missing (nan or infinite) observations removed.\n If False, then the returned array is in the same length and the same\n sequence of observations as the input array.\n\n Returns\n -------\n out: ndarray, float\n The returned array is two-dimensional if return_sorted is True, and\n one dimensional if return_sorted is False.\n If return_sorted is True, then a numpy array with two columns. The\n first column contains the sorted x (exog) values and the second column\n the associated estimated y (endog) values.\n If return_sorted is False, then only the fitted values are returned,\n and the observations will be in the same order as the input arrays.\n\n Notes\n -----\n This lowess function implements the algorithm given in the\n reference below using local linear estimates.\n\n Suppose the input data has N points. The algorithm works by\n estimating the `smooth` y_i by taking the frac*N closest points\n to (x_i,y_i) based on their x values and estimating y_i\n using a weighted linear regression. The weight for (x_j,y_j)\n is tricube function applied to |x_i-x_j|.\n\n If it > 1, then further weighted local linear regressions\n are performed, where the weights are the same as above\n times the _lowess_bisquare function of the residuals. Each iteration\n takes approximately the same amount of time as the original fit,\n so these iterations are expensive. They are most useful when\n the noise has extremely heavy tails, such as Cauchy noise.\n Noise with less heavy-tails, such as t-distributions with df>2,\n are less problematic. The weights downgrade the influence of\n points with large residuals. In the extreme case, points whose\n residuals are larger than 6 times the median absolute residual\n are given weight 0.\n\n `delta` can be used to save computations. For each `x_i`, regressions\n are skipped for points closer than `delta`. The next regression is\n fit for the farthest point within delta of `x_i` and all points in\n between are estimated by linearly interpolating between the two\n regression fits.\n\n Judicious choice of delta can cut computation time considerably\n for large data (N > 5000). A good choice is ``delta = 0.01 * range(exog)``.\n\n Some experimentation is likely required to find a good\n choice of `frac` and `iter` for a particular dataset.\n\n References\n ----------\n Cleveland, W.S. (1979) \"Robust Locally Weighted Regression\n and Smoothing Scatterplots\". Journal of the American Statistical\n Association 74 (368): 829-836.\n\n Examples\n --------\n The below allows a comparison between how different the fits from\n lowess for different values of frac can be.\n\n >>> import numpy as np\n >>> import statsmodels.api as sm\n >>> lowess = sm.nonparametric.lowess\n >>> x = np.random.uniform(low = -2*np.pi, high = 2*np.pi, size=500)\n >>> y = np.sin(x) + np.random.normal(size=len(x))\n >>> z = lowess(y, x)\n >>> w = lowess(y, x, frac=1./3)\n\n This gives a similar comparison for when it is 0 vs not.\n\n >>> import numpy as np\n >>> import scipy.stats as stats\n >>> import statsmodels.api as sm\n >>> lowess = sm.nonparametric.lowess\n >>> x = np.random.uniform(low = -2*np.pi, high = 2*np.pi, size=500)\n >>> y = np.sin(x) + stats.cauchy.rvs(size=len(x))\n >>> z = lowess(y, x, frac= 1./3, it=0)\n >>> w = lowess(y, x, frac=1./3)\n\n '''\n\n endog = np.asarray(endog, float)\n exog = np.asarray(exog, float)\n\n # Inputs should be vectors (1-D arrays) of the\n # same length.\n if exog.ndim != 1:\n raise ValueError('exog must be a vector')\n if endog.ndim != 1:\n raise ValueError('endog must be a vector')\n if endog.shape[0] != exog.shape[0] :\n raise ValueError('exog and endog must have same length')\n\n if missing in ['drop', 'raise']:\n # Cut out missing values\n mask_valid = (np.isfinite(exog) & np.isfinite(endog))\n all_valid = np.all(mask_valid)\n if all_valid:\n y = endog\n x = exog\n else:\n if missing == 'drop':\n x = exog[mask_valid]\n y = endog[mask_valid]\n else:\n raise ValueError('nan or inf found in data')\n elif missing == 'none':\n y = endog\n x = exog\n all_valid = True # we assume it's true if missing='none'\n else:\n raise ValueError(\"missing can only be 'none', 'drop' or 'raise'\")\n\n if not is_sorted:\n # Sort both inputs according to the ascending order of x values\n sort_index = np.argsort(x)\n x = np.array(x[sort_index])\n y = np.array(y[sort_index])\n\n res = _lowess(y, x, frac=frac, it=it, delta=delta)\n _, yfitted = res.T\n\n if return_sorted or (all_valid and is_sorted):\n return res\n else:\n # rebuild yfitted with original indices\n # a bit messy: y might have been selected twice\n if not is_sorted:\n yfitted_ = np.empty_like(endog)\n yfitted_.fill(np.nan)\n yfitted_[sort_index] = yfitted\n yfitted = yfitted_\n\n if not all_valid:\n yfitted_ = np.empty_like(endog)\n yfitted_.fill(np.nan)\n yfitted_[mask_valid] = yfitted\n yfitted = yfitted_\n\n # we don't need to return exog anymore\n return yfitted\n", "path": "statsmodels/nonparametric/smoothers_lowess.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Lowess - wrapper for cythonized extension\n\nAuthor : Chris Jordan-Squire\nAuthor : Carl Vogel\nAuthor : Josef Perktold\n\n\"\"\"\n\nimport numpy as np\nfrom ._smoothers_lowess import lowess as _lowess\n\ndef lowess(endog, exog, frac=2.0/3.0, it=3, delta=0.0, is_sorted=False,\n missing='drop', return_sorted=True):\n '''LOWESS (Locally Weighted Scatterplot Smoothing)\n\n A lowess function that outs smoothed estimates of endog\n at the given exog values from points (exog, endog)\n\n Parameters\n ----------\n endog: 1-D numpy array\n The y-values of the observed points\n exog: 1-D numpy array\n The x-values of the observed points\n frac: float\n Between 0 and 1. The fraction of the data used\n when estimating each y-value.\n it: int\n The number of residual-based reweightings\n to perform.\n delta: float\n Distance within which to use linear-interpolation\n instead of weighted regression.\n is_sorted : bool\n If False (default), then the data will be sorted by exog before\n calculating lowess. If True, then it is assumed that the data is\n already sorted by exog.\n missing : str\n Available options are 'none', 'drop', and 'raise'. If 'none', no nan\n checking is done. If 'drop', any observations with nans are dropped.\n If 'raise', an error is raised. Default is 'drop'.\n return_sorted : bool\n If True (default), then the returned array is sorted by exog and has\n missing (nan or infinite) observations removed.\n If False, then the returned array is in the same length and the same\n sequence of observations as the input array.\n\n Returns\n -------\n out: ndarray, float\n The returned array is two-dimensional if return_sorted is True, and\n one dimensional if return_sorted is False.\n If return_sorted is True, then a numpy array with two columns. The\n first column contains the sorted x (exog) values and the second column\n the associated estimated y (endog) values.\n If return_sorted is False, then only the fitted values are returned,\n and the observations will be in the same order as the input arrays.\n\n Notes\n -----\n This lowess function implements the algorithm given in the\n reference below using local linear estimates.\n\n Suppose the input data has N points. The algorithm works by\n estimating the `smooth` y_i by taking the frac*N closest points\n to (x_i,y_i) based on their x values and estimating y_i\n using a weighted linear regression. The weight for (x_j,y_j)\n is tricube function applied to |x_i-x_j|.\n\n If it > 1, then further weighted local linear regressions\n are performed, where the weights are the same as above\n times the _lowess_bisquare function of the residuals. Each iteration\n takes approximately the same amount of time as the original fit,\n so these iterations are expensive. They are most useful when\n the noise has extremely heavy tails, such as Cauchy noise.\n Noise with less heavy-tails, such as t-distributions with df>2,\n are less problematic. The weights downgrade the influence of\n points with large residuals. In the extreme case, points whose\n residuals are larger than 6 times the median absolute residual\n are given weight 0.\n\n `delta` can be used to save computations. For each `x_i`, regressions\n are skipped for points closer than `delta`. The next regression is\n fit for the farthest point within delta of `x_i` and all points in\n between are estimated by linearly interpolating between the two\n regression fits.\n\n Judicious choice of delta can cut computation time considerably\n for large data (N > 5000). A good choice is ``delta = 0.01 * range(exog)``.\n\n Some experimentation is likely required to find a good\n choice of `frac` and `iter` for a particular dataset.\n\n References\n ----------\n Cleveland, W.S. (1979) \"Robust Locally Weighted Regression\n and Smoothing Scatterplots\". Journal of the American Statistical\n Association 74 (368): 829-836.\n\n Examples\n --------\n The below allows a comparison between how different the fits from\n lowess for different values of frac can be.\n\n >>> import numpy as np\n >>> import statsmodels.api as sm\n >>> lowess = sm.nonparametric.lowess\n >>> x = np.random.uniform(low = -2*np.pi, high = 2*np.pi, size=500)\n >>> y = np.sin(x) + np.random.normal(size=len(x))\n >>> z = lowess(y, x)\n >>> w = lowess(y, x, frac=1./3)\n\n This gives a similar comparison for when it is 0 vs not.\n\n >>> import numpy as np\n >>> import scipy.stats as stats\n >>> import statsmodels.api as sm\n >>> lowess = sm.nonparametric.lowess\n >>> x = np.random.uniform(low = -2*np.pi, high = 2*np.pi, size=500)\n >>> y = np.sin(x) + stats.cauchy.rvs(size=len(x))\n >>> z = lowess(y, x, frac= 1./3, it=0)\n >>> w = lowess(y, x, frac=1./3)\n\n '''\n\n endog = np.asarray(endog, float)\n exog = np.asarray(exog, float)\n\n # Inputs should be vectors (1-D arrays) of the\n # same length.\n if exog.ndim != 1:\n raise ValueError('exog must be a vector')\n if endog.ndim != 1:\n raise ValueError('endog must be a vector')\n if endog.shape[0] != exog.shape[0] :\n raise ValueError('exog and endog must have same length')\n\n if missing in ['drop', 'raise']:\n # Cut out missing values\n mask_valid = (np.isfinite(exog) & np.isfinite(endog))\n all_valid = np.all(mask_valid)\n if all_valid:\n y = endog\n x = exog\n else:\n if missing == 'drop':\n x = exog[mask_valid]\n y = endog[mask_valid]\n else:\n raise ValueError('nan or inf found in data')\n elif missing == 'none':\n y = endog\n x = exog\n all_valid = True # we assume it's true if missing='none'\n else:\n raise ValueError(\"missing can only be 'none', 'drop' or 'raise'\")\n\n if not is_sorted:\n # Sort both inputs according to the ascending order of x values\n sort_index = np.argsort(x)\n x = np.array(x[sort_index])\n y = np.array(y[sort_index])\n\n res = _lowess(y, x, frac=frac, it=it, delta=delta)\n _, yfitted = res.T\n\n if return_sorted or (all_valid and is_sorted):\n return res\n else:\n # rebuild yfitted with original indices\n # a bit messy: y might have been selected twice\n if not is_sorted:\n yfitted_ = np.empty_like(y)\n yfitted_.fill(np.nan)\n yfitted_[sort_index] = yfitted\n yfitted = yfitted_\n\n if not all_valid:\n yfitted_ = np.empty_like(endog)\n yfitted_.fill(np.nan)\n yfitted_[mask_valid] = yfitted\n yfitted = yfitted_\n\n # we don't need to return exog anymore\n return yfitted\n", "path": "statsmodels/nonparametric/smoothers_lowess.py"}]} | 2,787 | 155 |
gh_patches_debug_8548 | rasdani/github-patches | git_diff | sktime__sktime-1571 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[DOC] Transformers missing from API Reference
#### Describe the issue linked to the documentation
<!--
Tell us about the confusion introduced in the documentation.
-->
There have been transformers that have recently been improved but have not been added the API Reference.
For example, STLTransformer, CLaSPTransformer, etc.
#### Suggest a potential alternative/fix
<!--
Tell us how we could improve the documentation in this regard.
-->
These need to be added to the documentation.
In the future we'll have to try and be good about following our [reviewer guide](https://www.sktime.org/en/stable/reviewer_guide.html) and waiting to merge until things are added to docs. Otherwise, it is hard for users to know what functionality exists.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sktime/transformations/series/difference.py`
Content:
```
1 #!/usr/bin/env python3 -u
2 # -*- coding: utf-8 -*-
3 # copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
4 """Class to iteratively apply differences to a time series."""
5 __author__ = ["Ryan Kuhns"]
6 __all__ = ["Differencer"]
7
8 import numpy as np
9 import pandas as pd
10 from sklearn.utils import check_array
11
12 from sktime.forecasting.base import ForecastingHorizon
13 from sktime.transformations.base import _SeriesToSeriesTransformer
14 from sktime.utils.validation import is_int
15 from sktime.utils.validation.series import check_series
16
17
18 def _check_lags(lags):
19 msg = " ".join(
20 [
21 "`lags` should be provided as a positive integer scaler, or",
22 "a list, tuple or np.ndarray of positive integers,"
23 f"but found {type(lags)}.",
24 ]
25 )
26 non_positive_msg = "`lags` should be positive integers."
27 if isinstance(lags, int):
28 if lags <= 0:
29 raise ValueError(non_positive_msg)
30 lags = check_array([lags], ensure_2d=False)
31 elif isinstance(lags, (list, tuple, np.ndarray)):
32 if not all([is_int(lag) for lag in lags]):
33 raise TypeError(msg)
34 lags = check_array(lags, ensure_2d=False)
35 if (lags <= 0).any():
36 raise ValueError(non_positive_msg)
37 else:
38 raise TypeError(msg)
39
40 return lags
41
42
43 def _diff_transform(Z, lags):
44 Zt = Z.copy()
45
46 if len(lags) == 0:
47 return Zt
48
49 else:
50 for lag in lags:
51 Zt = Zt.diff(lag)
52 return Zt
53
54
55 def _inverse_diff(Z, lag):
56 for i in range(lag):
57 Z.iloc[i::lag] = Z.iloc[i::lag].cumsum()
58
59 return Z
60
61
62 class Differencer(_SeriesToSeriesTransformer):
63 """Apply iterative differences to a timeseries.
64
65 The transformation works for univariate and multivariate timeseries. However,
66 the multivariate case applies the same differencing to every series.
67
68 Difference transformations are applied at the specified lags in the order
69 provided.
70
71 For example, given a timeseries with monthly periodicity, using lags=[1, 12]
72 corresponds to applying a standard first difference to handle trend, and
73 followed by a seasonal difference (at lag 12) to attempt to account for
74 seasonal dependence.
75
76 To provide a higher-order difference at the same lag list the lag multiple
77 times. For example, lags=[1, 1] takes iterative first differences like may
78 be needed for a series that is integrated of order 2.
79
80 Parameters
81 ----------
82 lags : int or array-like, default = 1
83 The lags used to difference the data.
84 If a single `int` value is
85
86 drop_na : bool, default = True
87 Whether the differencer should drop the initial observations that
88 contain missing values as a result of the differencing operation(s).
89
90 Attributes
91 ----------
92 lags : int or array-like
93 Lags used to perform the differencing of the input series.
94
95 drop_na : bool
96 Stores whether the Differencer drops the initial observations that contain
97 missing values as a result of the differencing operation(s).
98
99 Example
100 -------
101 >>> from sktime.transformations.series.difference import Differencer
102 >>> from sktime.datasets import load_airline
103 >>> y = load_airline()
104 >>> transformer = Differencer(lags=[1, 12])
105 >>> y_transform = transformer.fit_transform(y)
106 """
107
108 _tags = {
109 "fit-in-transform": False,
110 "transform-returns-same-time-index": False,
111 "univariate-only": False,
112 }
113
114 def __init__(self, lags=1, drop_na=True):
115 self.lags = lags
116 self.drop_na = drop_na
117 self._Z = None
118 self._lags = None
119 self._cumulative_lags = None
120 self._prior_cum_lags = None
121 self._prior_lags = None
122 super(Differencer, self).__init__()
123
124 def _check_inverse_transform_index(self, Z):
125 """Check fitted series contains indices needed in inverse_transform."""
126 first_idx = Z.index.min()
127 orig_first_idx, orig_last_idx = self._Z.index.min(), self._Z.index.max()
128
129 is_contained_by_fitted_z = False
130 is_future = False
131
132 if first_idx < orig_first_idx:
133 msg = [
134 "Some indices of `Z` are prior to timeseries used in `fit`.",
135 "Reconstruction via `inverse_transform` is not possible.",
136 ]
137 raise ValueError(" ".join(msg))
138
139 elif Z.index.difference(self._Z.index).shape[0] == 0:
140 is_contained_by_fitted_z = True
141
142 elif first_idx > orig_last_idx:
143 is_future = True
144
145 pad_z_inv = self.drop_na or is_future
146
147 cutoff = Z.index[0] if pad_z_inv else Z.index[self._cumulative_lags[-1]]
148 fh = ForecastingHorizon(np.arange(-1, -(self._cumulative_lags[-1] + 1), -1))
149 index = fh.to_absolute(cutoff).to_pandas()
150 index_diff = index.difference(self._Z.index)
151
152 if index_diff.shape[0] != 0 and not is_contained_by_fitted_z:
153 msg = [
154 f"Inverse transform requires indices {index}",
155 "to have been stored in `fit()`,",
156 f"but the indices {index_diff} were not found.",
157 ]
158 raise ValueError(" ".join(msg))
159
160 return is_contained_by_fitted_z, pad_z_inv
161
162 def _fit(self, Z, X=None):
163 """Logic used by fit method on `Z`.
164
165 Parameters
166 ----------
167 Z : pd.Series or pd.DataFrame
168 A timeseries to apply the specified transformation on.
169
170 Returns
171 -------
172 self
173 """
174 self._lags = _check_lags(self.lags)
175 self._prior_lags = np.roll(self._lags, shift=1)
176 self._prior_lags[0] = 0
177 self._cumulative_lags = self._lags.cumsum()
178 self._prior_cum_lags = np.zeros_like(self._cumulative_lags)
179 self._prior_cum_lags[1:] = self._cumulative_lags[:-1]
180 self._Z = Z.copy()
181 return self
182
183 def _transform(self, Z, X=None):
184 """Logic used by `transform` to apply transformation to `Z`.
185
186 Differences are applied at lags specified in `lags`.
187
188 Parameters
189 ----------
190 Z : pd.Series or pd.DataFrame
191 The timeseries to apply the specified transformation on.
192
193 Returns
194 -------
195 Zt : pd.Series or pd.DataFrame
196 The transformed timeseries.
197 """
198 Zt = _diff_transform(Z, self._lags)
199 if self.drop_na:
200 Zt = Zt.iloc[self._cumulative_lags[-1] :]
201 return Zt
202
203 def _inverse_transform(self, Z, X=None):
204 """Logic used by `inverse_transform` to reverse transformation on `Z`.
205
206 Parameters
207 ----------
208 Z : pd.Series or pd.DataFrame
209 A time series to reverse the transformation on.
210
211 Returns
212 -------
213 Z_inv : pd.Series or pd.DataFrame
214 The reconstructed timeseries after the transformation has been reversed.
215 """
216 is_df = isinstance(Z, pd.DataFrame)
217 is_contained_by_fit_z, pad_z_inv = self._check_inverse_transform_index(Z)
218
219 # If `Z` is entirely contained in fitted `_Z` we can just return
220 # the values from the timeseires stored in `fit` as a shortcut
221 if is_contained_by_fit_z:
222 Z_inv = self._Z.loc[Z.index, :] if is_df else self._Z.loc[Z.index]
223
224 else:
225 Z_inv = Z.copy()
226 for i, lag_info in enumerate(
227 zip(self._lags[::-1], self._prior_cum_lags[::-1])
228 ):
229 lag, prior_cum_lag = lag_info
230 _lags = self._lags[::-1][i + 1 :]
231 _transformed = _diff_transform(self._Z, _lags)
232
233 # Determine index values for initial values needed to reverse
234 # the differencing for the specified lag
235 if pad_z_inv:
236 cutoff = Z_inv.index[0]
237 else:
238 cutoff = Z_inv.index[prior_cum_lag + lag]
239 fh = ForecastingHorizon(np.arange(-1, -(lag + 1), -1))
240 index = fh.to_absolute(cutoff).to_pandas()
241
242 if is_df:
243 prior_n_timepoint_values = _transformed.loc[index, :]
244 else:
245 prior_n_timepoint_values = _transformed.loc[index]
246 if pad_z_inv:
247 Z_inv = pd.concat([prior_n_timepoint_values, Z_inv])
248 else:
249 Z_inv.update(prior_n_timepoint_values)
250
251 Z_inv = _inverse_diff(Z_inv, lag)
252
253 if pad_z_inv:
254 Z_inv = Z_inv.loc[Z.index, :] if is_df else Z_inv.loc[Z.index]
255
256 return Z_inv
257
258 def fit(self, Z, X=None):
259 """Fit the transformation on input series `Z`.
260
261 Parameters
262 ----------
263 Z : pd.Series or pd.DataFrame
264 A time series to apply the specified transformation on.
265
266 Returns
267 -------
268 self
269 """
270 Z = check_series(Z)
271
272 self._fit(Z, X=X)
273
274 self._is_fitted = True
275 return self
276
277 def transform(self, Z, X=None):
278 """Return transformed version of input series `Z`.
279
280 Parameters
281 ----------
282 Z : pd.Series or pd.DataFrame
283 A time series to apply the specified transformation on.
284
285 Returns
286 -------
287 Zt : pd.Series or pd.DataFrame
288 Transformed version of input series `Z`.
289 """
290 self.check_is_fitted()
291 Z = check_series(Z)
292
293 Zt = self._transform(Z, X=X)
294
295 return Zt
296
297 def inverse_transform(self, Z, X=None):
298 """Reverse transformation on input series `Z`.
299
300 Parameters
301 ----------
302 Z : pd.Series or pd.DataFrame
303 A time series to reverse the transformation on.
304
305 Returns
306 -------
307 Z_inv : pd.Series or pd.DataFrame
308 The reconstructed timeseries after the transformation has been reversed.
309 """
310 self.check_is_fitted()
311 Z = check_series(Z)
312
313 Z_inv = self._inverse_transform(Z, X=X)
314
315 return Z_inv
316
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sktime/transformations/series/difference.py b/sktime/transformations/series/difference.py
--- a/sktime/transformations/series/difference.py
+++ b/sktime/transformations/series/difference.py
@@ -96,8 +96,8 @@
Stores whether the Differencer drops the initial observations that contain
missing values as a result of the differencing operation(s).
- Example
- -------
+ Examples
+ --------
>>> from sktime.transformations.series.difference import Differencer
>>> from sktime.datasets import load_airline
>>> y = load_airline()
| {"golden_diff": "diff --git a/sktime/transformations/series/difference.py b/sktime/transformations/series/difference.py\n--- a/sktime/transformations/series/difference.py\n+++ b/sktime/transformations/series/difference.py\n@@ -96,8 +96,8 @@\n Stores whether the Differencer drops the initial observations that contain\n missing values as a result of the differencing operation(s).\n \n- Example\n- -------\n+ Examples\n+ --------\n >>> from sktime.transformations.series.difference import Differencer\n >>> from sktime.datasets import load_airline\n >>> y = load_airline()\n", "issue": "[DOC] Transformers missing from API Reference\n#### Describe the issue linked to the documentation\r\n\r\n<!--\r\nTell us about the confusion introduced in the documentation.\r\n-->\r\n\r\nThere have been transformers that have recently been improved but have not been added the API Reference. \r\n\r\nFor example, STLTransformer, CLaSPTransformer, etc.\r\n\r\n#### Suggest a potential alternative/fix\r\n\r\n<!--\r\nTell us how we could improve the documentation in this regard.\r\n-->\r\n\r\nThese need to be added to the documentation. \r\n\r\nIn the future we'll have to try and be good about following our [reviewer guide](https://www.sktime.org/en/stable/reviewer_guide.html) and waiting to merge until things are added to docs. Otherwise, it is hard for users to know what functionality exists.\n", "before_files": [{"content": "#!/usr/bin/env python3 -u\n# -*- coding: utf-8 -*-\n# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\"\"\"Class to iteratively apply differences to a time series.\"\"\"\n__author__ = [\"Ryan Kuhns\"]\n__all__ = [\"Differencer\"]\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.utils import check_array\n\nfrom sktime.forecasting.base import ForecastingHorizon\nfrom sktime.transformations.base import _SeriesToSeriesTransformer\nfrom sktime.utils.validation import is_int\nfrom sktime.utils.validation.series import check_series\n\n\ndef _check_lags(lags):\n msg = \" \".join(\n [\n \"`lags` should be provided as a positive integer scaler, or\",\n \"a list, tuple or np.ndarray of positive integers,\"\n f\"but found {type(lags)}.\",\n ]\n )\n non_positive_msg = \"`lags` should be positive integers.\"\n if isinstance(lags, int):\n if lags <= 0:\n raise ValueError(non_positive_msg)\n lags = check_array([lags], ensure_2d=False)\n elif isinstance(lags, (list, tuple, np.ndarray)):\n if not all([is_int(lag) for lag in lags]):\n raise TypeError(msg)\n lags = check_array(lags, ensure_2d=False)\n if (lags <= 0).any():\n raise ValueError(non_positive_msg)\n else:\n raise TypeError(msg)\n\n return lags\n\n\ndef _diff_transform(Z, lags):\n Zt = Z.copy()\n\n if len(lags) == 0:\n return Zt\n\n else:\n for lag in lags:\n Zt = Zt.diff(lag)\n return Zt\n\n\ndef _inverse_diff(Z, lag):\n for i in range(lag):\n Z.iloc[i::lag] = Z.iloc[i::lag].cumsum()\n\n return Z\n\n\nclass Differencer(_SeriesToSeriesTransformer):\n \"\"\"Apply iterative differences to a timeseries.\n\n The transformation works for univariate and multivariate timeseries. However,\n the multivariate case applies the same differencing to every series.\n\n Difference transformations are applied at the specified lags in the order\n provided.\n\n For example, given a timeseries with monthly periodicity, using lags=[1, 12]\n corresponds to applying a standard first difference to handle trend, and\n followed by a seasonal difference (at lag 12) to attempt to account for\n seasonal dependence.\n\n To provide a higher-order difference at the same lag list the lag multiple\n times. For example, lags=[1, 1] takes iterative first differences like may\n be needed for a series that is integrated of order 2.\n\n Parameters\n ----------\n lags : int or array-like, default = 1\n The lags used to difference the data.\n If a single `int` value is\n\n drop_na : bool, default = True\n Whether the differencer should drop the initial observations that\n contain missing values as a result of the differencing operation(s).\n\n Attributes\n ----------\n lags : int or array-like\n Lags used to perform the differencing of the input series.\n\n drop_na : bool\n Stores whether the Differencer drops the initial observations that contain\n missing values as a result of the differencing operation(s).\n\n Example\n -------\n >>> from sktime.transformations.series.difference import Differencer\n >>> from sktime.datasets import load_airline\n >>> y = load_airline()\n >>> transformer = Differencer(lags=[1, 12])\n >>> y_transform = transformer.fit_transform(y)\n \"\"\"\n\n _tags = {\n \"fit-in-transform\": False,\n \"transform-returns-same-time-index\": False,\n \"univariate-only\": False,\n }\n\n def __init__(self, lags=1, drop_na=True):\n self.lags = lags\n self.drop_na = drop_na\n self._Z = None\n self._lags = None\n self._cumulative_lags = None\n self._prior_cum_lags = None\n self._prior_lags = None\n super(Differencer, self).__init__()\n\n def _check_inverse_transform_index(self, Z):\n \"\"\"Check fitted series contains indices needed in inverse_transform.\"\"\"\n first_idx = Z.index.min()\n orig_first_idx, orig_last_idx = self._Z.index.min(), self._Z.index.max()\n\n is_contained_by_fitted_z = False\n is_future = False\n\n if first_idx < orig_first_idx:\n msg = [\n \"Some indices of `Z` are prior to timeseries used in `fit`.\",\n \"Reconstruction via `inverse_transform` is not possible.\",\n ]\n raise ValueError(\" \".join(msg))\n\n elif Z.index.difference(self._Z.index).shape[0] == 0:\n is_contained_by_fitted_z = True\n\n elif first_idx > orig_last_idx:\n is_future = True\n\n pad_z_inv = self.drop_na or is_future\n\n cutoff = Z.index[0] if pad_z_inv else Z.index[self._cumulative_lags[-1]]\n fh = ForecastingHorizon(np.arange(-1, -(self._cumulative_lags[-1] + 1), -1))\n index = fh.to_absolute(cutoff).to_pandas()\n index_diff = index.difference(self._Z.index)\n\n if index_diff.shape[0] != 0 and not is_contained_by_fitted_z:\n msg = [\n f\"Inverse transform requires indices {index}\",\n \"to have been stored in `fit()`,\",\n f\"but the indices {index_diff} were not found.\",\n ]\n raise ValueError(\" \".join(msg))\n\n return is_contained_by_fitted_z, pad_z_inv\n\n def _fit(self, Z, X=None):\n \"\"\"Logic used by fit method on `Z`.\n\n Parameters\n ----------\n Z : pd.Series or pd.DataFrame\n A timeseries to apply the specified transformation on.\n\n Returns\n -------\n self\n \"\"\"\n self._lags = _check_lags(self.lags)\n self._prior_lags = np.roll(self._lags, shift=1)\n self._prior_lags[0] = 0\n self._cumulative_lags = self._lags.cumsum()\n self._prior_cum_lags = np.zeros_like(self._cumulative_lags)\n self._prior_cum_lags[1:] = self._cumulative_lags[:-1]\n self._Z = Z.copy()\n return self\n\n def _transform(self, Z, X=None):\n \"\"\"Logic used by `transform` to apply transformation to `Z`.\n\n Differences are applied at lags specified in `lags`.\n\n Parameters\n ----------\n Z : pd.Series or pd.DataFrame\n The timeseries to apply the specified transformation on.\n\n Returns\n -------\n Zt : pd.Series or pd.DataFrame\n The transformed timeseries.\n \"\"\"\n Zt = _diff_transform(Z, self._lags)\n if self.drop_na:\n Zt = Zt.iloc[self._cumulative_lags[-1] :]\n return Zt\n\n def _inverse_transform(self, Z, X=None):\n \"\"\"Logic used by `inverse_transform` to reverse transformation on `Z`.\n\n Parameters\n ----------\n Z : pd.Series or pd.DataFrame\n A time series to reverse the transformation on.\n\n Returns\n -------\n Z_inv : pd.Series or pd.DataFrame\n The reconstructed timeseries after the transformation has been reversed.\n \"\"\"\n is_df = isinstance(Z, pd.DataFrame)\n is_contained_by_fit_z, pad_z_inv = self._check_inverse_transform_index(Z)\n\n # If `Z` is entirely contained in fitted `_Z` we can just return\n # the values from the timeseires stored in `fit` as a shortcut\n if is_contained_by_fit_z:\n Z_inv = self._Z.loc[Z.index, :] if is_df else self._Z.loc[Z.index]\n\n else:\n Z_inv = Z.copy()\n for i, lag_info in enumerate(\n zip(self._lags[::-1], self._prior_cum_lags[::-1])\n ):\n lag, prior_cum_lag = lag_info\n _lags = self._lags[::-1][i + 1 :]\n _transformed = _diff_transform(self._Z, _lags)\n\n # Determine index values for initial values needed to reverse\n # the differencing for the specified lag\n if pad_z_inv:\n cutoff = Z_inv.index[0]\n else:\n cutoff = Z_inv.index[prior_cum_lag + lag]\n fh = ForecastingHorizon(np.arange(-1, -(lag + 1), -1))\n index = fh.to_absolute(cutoff).to_pandas()\n\n if is_df:\n prior_n_timepoint_values = _transformed.loc[index, :]\n else:\n prior_n_timepoint_values = _transformed.loc[index]\n if pad_z_inv:\n Z_inv = pd.concat([prior_n_timepoint_values, Z_inv])\n else:\n Z_inv.update(prior_n_timepoint_values)\n\n Z_inv = _inverse_diff(Z_inv, lag)\n\n if pad_z_inv:\n Z_inv = Z_inv.loc[Z.index, :] if is_df else Z_inv.loc[Z.index]\n\n return Z_inv\n\n def fit(self, Z, X=None):\n \"\"\"Fit the transformation on input series `Z`.\n\n Parameters\n ----------\n Z : pd.Series or pd.DataFrame\n A time series to apply the specified transformation on.\n\n Returns\n -------\n self\n \"\"\"\n Z = check_series(Z)\n\n self._fit(Z, X=X)\n\n self._is_fitted = True\n return self\n\n def transform(self, Z, X=None):\n \"\"\"Return transformed version of input series `Z`.\n\n Parameters\n ----------\n Z : pd.Series or pd.DataFrame\n A time series to apply the specified transformation on.\n\n Returns\n -------\n Zt : pd.Series or pd.DataFrame\n Transformed version of input series `Z`.\n \"\"\"\n self.check_is_fitted()\n Z = check_series(Z)\n\n Zt = self._transform(Z, X=X)\n\n return Zt\n\n def inverse_transform(self, Z, X=None):\n \"\"\"Reverse transformation on input series `Z`.\n\n Parameters\n ----------\n Z : pd.Series or pd.DataFrame\n A time series to reverse the transformation on.\n\n Returns\n -------\n Z_inv : pd.Series or pd.DataFrame\n The reconstructed timeseries after the transformation has been reversed.\n \"\"\"\n self.check_is_fitted()\n Z = check_series(Z)\n\n Z_inv = self._inverse_transform(Z, X=X)\n\n return Z_inv\n", "path": "sktime/transformations/series/difference.py"}], "after_files": [{"content": "#!/usr/bin/env python3 -u\n# -*- coding: utf-8 -*-\n# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\"\"\"Class to iteratively apply differences to a time series.\"\"\"\n__author__ = [\"Ryan Kuhns\"]\n__all__ = [\"Differencer\"]\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.utils import check_array\n\nfrom sktime.forecasting.base import ForecastingHorizon\nfrom sktime.transformations.base import _SeriesToSeriesTransformer\nfrom sktime.utils.validation import is_int\nfrom sktime.utils.validation.series import check_series\n\n\ndef _check_lags(lags):\n msg = \" \".join(\n [\n \"`lags` should be provided as a positive integer scaler, or\",\n \"a list, tuple or np.ndarray of positive integers,\"\n f\"but found {type(lags)}.\",\n ]\n )\n non_positive_msg = \"`lags` should be positive integers.\"\n if isinstance(lags, int):\n if lags <= 0:\n raise ValueError(non_positive_msg)\n lags = check_array([lags], ensure_2d=False)\n elif isinstance(lags, (list, tuple, np.ndarray)):\n if not all([is_int(lag) for lag in lags]):\n raise TypeError(msg)\n lags = check_array(lags, ensure_2d=False)\n if (lags <= 0).any():\n raise ValueError(non_positive_msg)\n else:\n raise TypeError(msg)\n\n return lags\n\n\ndef _diff_transform(Z, lags):\n Zt = Z.copy()\n\n if len(lags) == 0:\n return Zt\n\n else:\n for lag in lags:\n Zt = Zt.diff(lag)\n return Zt\n\n\ndef _inverse_diff(Z, lag):\n for i in range(lag):\n Z.iloc[i::lag] = Z.iloc[i::lag].cumsum()\n\n return Z\n\n\nclass Differencer(_SeriesToSeriesTransformer):\n \"\"\"Apply iterative differences to a timeseries.\n\n The transformation works for univariate and multivariate timeseries. However,\n the multivariate case applies the same differencing to every series.\n\n Difference transformations are applied at the specified lags in the order\n provided.\n\n For example, given a timeseries with monthly periodicity, using lags=[1, 12]\n corresponds to applying a standard first difference to handle trend, and\n followed by a seasonal difference (at lag 12) to attempt to account for\n seasonal dependence.\n\n To provide a higher-order difference at the same lag list the lag multiple\n times. For example, lags=[1, 1] takes iterative first differences like may\n be needed for a series that is integrated of order 2.\n\n Parameters\n ----------\n lags : int or array-like, default = 1\n The lags used to difference the data.\n If a single `int` value is\n\n drop_na : bool, default = True\n Whether the differencer should drop the initial observations that\n contain missing values as a result of the differencing operation(s).\n\n Attributes\n ----------\n lags : int or array-like\n Lags used to perform the differencing of the input series.\n\n drop_na : bool\n Stores whether the Differencer drops the initial observations that contain\n missing values as a result of the differencing operation(s).\n\n Examples\n --------\n >>> from sktime.transformations.series.difference import Differencer\n >>> from sktime.datasets import load_airline\n >>> y = load_airline()\n >>> transformer = Differencer(lags=[1, 12])\n >>> y_transform = transformer.fit_transform(y)\n \"\"\"\n\n _tags = {\n \"fit-in-transform\": False,\n \"transform-returns-same-time-index\": False,\n \"univariate-only\": False,\n }\n\n def __init__(self, lags=1, drop_na=True):\n self.lags = lags\n self.drop_na = drop_na\n self._Z = None\n self._lags = None\n self._cumulative_lags = None\n self._prior_cum_lags = None\n self._prior_lags = None\n super(Differencer, self).__init__()\n\n def _check_inverse_transform_index(self, Z):\n \"\"\"Check fitted series contains indices needed in inverse_transform.\"\"\"\n first_idx = Z.index.min()\n orig_first_idx, orig_last_idx = self._Z.index.min(), self._Z.index.max()\n\n is_contained_by_fitted_z = False\n is_future = False\n\n if first_idx < orig_first_idx:\n msg = [\n \"Some indices of `Z` are prior to timeseries used in `fit`.\",\n \"Reconstruction via `inverse_transform` is not possible.\",\n ]\n raise ValueError(\" \".join(msg))\n\n elif Z.index.difference(self._Z.index).shape[0] == 0:\n is_contained_by_fitted_z = True\n\n elif first_idx > orig_last_idx:\n is_future = True\n\n pad_z_inv = self.drop_na or is_future\n\n cutoff = Z.index[0] if pad_z_inv else Z.index[self._cumulative_lags[-1]]\n fh = ForecastingHorizon(np.arange(-1, -(self._cumulative_lags[-1] + 1), -1))\n index = fh.to_absolute(cutoff).to_pandas()\n index_diff = index.difference(self._Z.index)\n\n if index_diff.shape[0] != 0 and not is_contained_by_fitted_z:\n msg = [\n f\"Inverse transform requires indices {index}\",\n \"to have been stored in `fit()`,\",\n f\"but the indices {index_diff} were not found.\",\n ]\n raise ValueError(\" \".join(msg))\n\n return is_contained_by_fitted_z, pad_z_inv\n\n def _fit(self, Z, X=None):\n \"\"\"Logic used by fit method on `Z`.\n\n Parameters\n ----------\n Z : pd.Series or pd.DataFrame\n A timeseries to apply the specified transformation on.\n\n Returns\n -------\n self\n \"\"\"\n self._lags = _check_lags(self.lags)\n self._prior_lags = np.roll(self._lags, shift=1)\n self._prior_lags[0] = 0\n self._cumulative_lags = self._lags.cumsum()\n self._prior_cum_lags = np.zeros_like(self._cumulative_lags)\n self._prior_cum_lags[1:] = self._cumulative_lags[:-1]\n self._Z = Z.copy()\n return self\n\n def _transform(self, Z, X=None):\n \"\"\"Logic used by `transform` to apply transformation to `Z`.\n\n Differences are applied at lags specified in `lags`.\n\n Parameters\n ----------\n Z : pd.Series or pd.DataFrame\n The timeseries to apply the specified transformation on.\n\n Returns\n -------\n Zt : pd.Series or pd.DataFrame\n The transformed timeseries.\n \"\"\"\n Zt = _diff_transform(Z, self._lags)\n if self.drop_na:\n Zt = Zt.iloc[self._cumulative_lags[-1] :]\n return Zt\n\n def _inverse_transform(self, Z, X=None):\n \"\"\"Logic used by `inverse_transform` to reverse transformation on `Z`.\n\n Parameters\n ----------\n Z : pd.Series or pd.DataFrame\n A time series to reverse the transformation on.\n\n Returns\n -------\n Z_inv : pd.Series or pd.DataFrame\n The reconstructed timeseries after the transformation has been reversed.\n \"\"\"\n is_df = isinstance(Z, pd.DataFrame)\n is_contained_by_fit_z, pad_z_inv = self._check_inverse_transform_index(Z)\n\n # If `Z` is entirely contained in fitted `_Z` we can just return\n # the values from the timeseires stored in `fit` as a shortcut\n if is_contained_by_fit_z:\n Z_inv = self._Z.loc[Z.index, :] if is_df else self._Z.loc[Z.index]\n\n else:\n Z_inv = Z.copy()\n for i, lag_info in enumerate(\n zip(self._lags[::-1], self._prior_cum_lags[::-1])\n ):\n lag, prior_cum_lag = lag_info\n _lags = self._lags[::-1][i + 1 :]\n _transformed = _diff_transform(self._Z, _lags)\n\n # Determine index values for initial values needed to reverse\n # the differencing for the specified lag\n if pad_z_inv:\n cutoff = Z_inv.index[0]\n else:\n cutoff = Z_inv.index[prior_cum_lag + lag]\n fh = ForecastingHorizon(np.arange(-1, -(lag + 1), -1))\n index = fh.to_absolute(cutoff).to_pandas()\n\n if is_df:\n prior_n_timepoint_values = _transformed.loc[index, :]\n else:\n prior_n_timepoint_values = _transformed.loc[index]\n if pad_z_inv:\n Z_inv = pd.concat([prior_n_timepoint_values, Z_inv])\n else:\n Z_inv.update(prior_n_timepoint_values)\n\n Z_inv = _inverse_diff(Z_inv, lag)\n\n if pad_z_inv:\n Z_inv = Z_inv.loc[Z.index, :] if is_df else Z_inv.loc[Z.index]\n\n return Z_inv\n\n def fit(self, Z, X=None):\n \"\"\"Fit the transformation on input series `Z`.\n\n Parameters\n ----------\n Z : pd.Series or pd.DataFrame\n A time series to apply the specified transformation on.\n\n Returns\n -------\n self\n \"\"\"\n Z = check_series(Z)\n\n self._fit(Z, X=X)\n\n self._is_fitted = True\n return self\n\n def transform(self, Z, X=None):\n \"\"\"Return transformed version of input series `Z`.\n\n Parameters\n ----------\n Z : pd.Series or pd.DataFrame\n A time series to apply the specified transformation on.\n\n Returns\n -------\n Zt : pd.Series or pd.DataFrame\n Transformed version of input series `Z`.\n \"\"\"\n self.check_is_fitted()\n Z = check_series(Z)\n\n Zt = self._transform(Z, X=X)\n\n return Zt\n\n def inverse_transform(self, Z, X=None):\n \"\"\"Reverse transformation on input series `Z`.\n\n Parameters\n ----------\n Z : pd.Series or pd.DataFrame\n A time series to reverse the transformation on.\n\n Returns\n -------\n Z_inv : pd.Series or pd.DataFrame\n The reconstructed timeseries after the transformation has been reversed.\n \"\"\"\n self.check_is_fitted()\n Z = check_series(Z)\n\n Z_inv = self._inverse_transform(Z, X=X)\n\n return Z_inv\n", "path": "sktime/transformations/series/difference.py"}]} | 3,630 | 144 |
gh_patches_debug_42848 | rasdani/github-patches | git_diff | yt-dlp__yt-dlp-7202 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[DigitalConcertHall] cannot download videos under /films
### DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE
- [X] I understand that I will be **blocked** if I *intentionally* remove or skip any mandatory\* field
### Checklist
- [X] I'm reporting that yt-dlp is broken on a **supported** site
- [X] I've verified that I'm running yt-dlp version **2023.03.04** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
- [X] I've checked that all provided URLs are playable in a browser with the same IP and same login details
- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/yt-dlp/yt-dlp/wiki/FAQ#video-url-contains-an-ampersand--and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)
- [X] I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766) and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
- [X] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required
### Region
All
### Provide a description that is worded well enough to be understood
A major part of the site Digital Concert Hall is [concert videos](https://www.digitalconcerthall.com/en/concerts) which yt-dlp handles just fine. But when I try to download videos [here](https://www.digitalconcerthall.com/en/films), e.g. [this one](https://www.digitalconcerthall.com/en/film/388) which lasts 44 minutes (can be played with the login credentials `--username [email protected] --password 3PXx-5.NDNmMD2!` which you can use), it only downloads the trailer which lasts 39 seconds. Using build from commit `ecfe479`.
### Provide verbose output that clearly demonstrates the problem
- [X] Run **your** yt-dlp command with **-vU** flag added (`yt-dlp -vU <your command line>`)
- [X] If using API, add `'verbose': True` to `YoutubeDL` params instead
- [X] Copy the WHOLE output (starting with `[debug] Command-line config`) and insert it below
### Complete Verbose Output
```shell
[debug] Command-line config: ['https://www.digitalconcerthall.com/en/film/388', '--username', 'PRIVATE', '--password', 'PRIVATE', '-vU']
[debug] Encodings: locale UTF-8, fs utf-8, pref UTF-8, out utf-8, error utf-8, screen utf-8
[debug] yt-dlp version [email protected] [392389b7d] (zip)
[debug] Python 3.11.3 (CPython x86_64 64bit) - Linux-6.3.4-arch2-1-x86_64-with-glibc2.37 (OpenSSL 3.0.8 7 Feb 2023, glibc 2.37)
[debug] exe versions: ffmpeg 6.0 (setts), ffprobe 6.0, rtmpdump 2.4
[debug] Optional libraries: Cryptodome-3.17, brotli-1.0.9, certifi-2023.05.07, mutagen-1.46.0, sqlite3-2.6.0, websockets-11.0.3
[debug] Proxy map: {}
[debug] Loaded 1842 extractors
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
Available version: [email protected], Current version: [email protected]
Current Build Hash: 5bfb7e4a15c47b2ff99b149b7c841e98ef606a4af18fd6330ddf3a0ed4a7ea19
yt-dlp is up to date ([email protected])
[generic] Extracting URL: https://www.digitalconcerthall.com/en/film/388
[generic] 388: Downloading webpage
WARNING: [generic] Falling back on generic information extractor
[generic] 388: Extracting information
[debug] Looking for embeds
[debug] Identified a JSON LD
[generic] Extracting URL: https://world-vod.dchdns.net/preview/388-t/preview.mp4#__youtubedl_smuggle=%7B%22force_videoid%22%3A+%22388%22%2C+%22to_generic%22%3A+true%2C+%22http_headers%22%3A+%7B%22Referer%22%3A+%22https%3A%2F%2Fwww.digitalconcerthall.com%2Fen%2Ffilm%2F388%22%7D%7D
[generic] 388: Downloading webpage
WARNING: [generic] URL could be a direct video link, returning it as such.
[debug] Formats sorted by: hasvid, ie_pref, lang, quality, res, fps, hdr:12(7), vcodec:vp9.2(10), channels, acodec, filesize, fs_approx, tbr, vbr, abr, asr, proto, vext, aext, hasaud, source, id
[debug] Default format spec: bestvideo*+bestaudio/best
[info] 388: Downloading 1 format(s): 0
[debug] Invoking http downloader on "https://world-vod.dchdns.net/preview/388-t/preview.mp4"
[download] Destination: The Berliner Philharmoniker and Frank Peter Zimmermann [388].mp4
[download] 100% of 16.16MiB in 00:00:07 at 2.05MiB/s
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `yt_dlp/extractor/digitalconcerthall.py`
Content:
```
1 from .common import InfoExtractor
2
3 from ..utils import (
4 ExtractorError,
5 parse_resolution,
6 traverse_obj,
7 try_get,
8 urlencode_postdata,
9 )
10
11
12 class DigitalConcertHallIE(InfoExtractor):
13 IE_DESC = 'DigitalConcertHall extractor'
14 _VALID_URL = r'https?://(?:www\.)?digitalconcerthall\.com/(?P<language>[a-z]+)/concert/(?P<id>[0-9]+)'
15 _OAUTH_URL = 'https://api.digitalconcerthall.com/v2/oauth2/token'
16 _ACCESS_TOKEN = None
17 _NETRC_MACHINE = 'digitalconcerthall'
18 _TESTS = [{
19 'note': 'Playlist with only one video',
20 'url': 'https://www.digitalconcerthall.com/en/concert/53201',
21 'info_dict': {
22 'id': '53201-1',
23 'ext': 'mp4',
24 'composer': 'Kurt Weill',
25 'title': '[Magic Night]',
26 'thumbnail': r're:^https?://images.digitalconcerthall.com/cms/thumbnails.*\.jpg$',
27 'upload_date': '20210624',
28 'timestamp': 1624548600,
29 'duration': 2798,
30 'album_artist': 'Members of the Berliner Philharmoniker / Simon Rössler',
31 },
32 'params': {'skip_download': 'm3u8'},
33 }, {
34 'note': 'Concert with several works and an interview',
35 'url': 'https://www.digitalconcerthall.com/en/concert/53785',
36 'info_dict': {
37 'id': '53785',
38 'album_artist': 'Berliner Philharmoniker / Kirill Petrenko',
39 'title': 'Kirill Petrenko conducts Mendelssohn and Shostakovich',
40 },
41 'params': {'skip_download': 'm3u8'},
42 'playlist_count': 3,
43 }]
44
45 def _perform_login(self, username, password):
46 token_response = self._download_json(
47 self._OAUTH_URL,
48 None, 'Obtaining token', errnote='Unable to obtain token', data=urlencode_postdata({
49 'affiliate': 'none',
50 'grant_type': 'device',
51 'device_vendor': 'unknown',
52 'app_id': 'dch.webapp',
53 'app_version': '1.0.0',
54 'client_secret': '2ySLN+2Fwb',
55 }), headers={
56 'Content-Type': 'application/x-www-form-urlencoded',
57 })
58 self._ACCESS_TOKEN = token_response['access_token']
59 try:
60 self._download_json(
61 self._OAUTH_URL,
62 None, note='Logging in', errnote='Unable to login', data=urlencode_postdata({
63 'grant_type': 'password',
64 'username': username,
65 'password': password,
66 }), headers={
67 'Content-Type': 'application/x-www-form-urlencoded',
68 'Referer': 'https://www.digitalconcerthall.com',
69 'Authorization': f'Bearer {self._ACCESS_TOKEN}'
70 })
71 except ExtractorError:
72 self.raise_login_required(msg='Login info incorrect')
73
74 def _real_initialize(self):
75 if not self._ACCESS_TOKEN:
76 self.raise_login_required(method='password')
77
78 def _entries(self, items, language, **kwargs):
79 for item in items:
80 video_id = item['id']
81 stream_info = self._download_json(
82 self._proto_relative_url(item['_links']['streams']['href']), video_id, headers={
83 'Accept': 'application/json',
84 'Authorization': f'Bearer {self._ACCESS_TOKEN}',
85 'Accept-Language': language
86 })
87
88 m3u8_url = traverse_obj(
89 stream_info, ('channel', lambda k, _: k.startswith('vod_mixed'), 'stream', 0, 'url'), get_all=False)
90 formats = self._extract_m3u8_formats(m3u8_url, video_id, 'mp4', 'm3u8_native', fatal=False)
91
92 yield {
93 'id': video_id,
94 'title': item.get('title'),
95 'composer': item.get('name_composer'),
96 'url': m3u8_url,
97 'formats': formats,
98 'duration': item.get('duration_total'),
99 'timestamp': traverse_obj(item, ('date', 'published')),
100 'description': item.get('short_description') or stream_info.get('short_description'),
101 **kwargs,
102 'chapters': [{
103 'start_time': chapter.get('time'),
104 'end_time': try_get(chapter, lambda x: x['time'] + x['duration']),
105 'title': chapter.get('text'),
106 } for chapter in item['cuepoints']] if item.get('cuepoints') else None,
107 }
108
109 def _real_extract(self, url):
110 language, video_id = self._match_valid_url(url).group('language', 'id')
111 if not language:
112 language = 'en'
113
114 thumbnail_url = self._html_search_regex(
115 r'(https?://images\.digitalconcerthall\.com/cms/thumbnails/.*\.jpg)',
116 self._download_webpage(url, video_id), 'thumbnail')
117 thumbnails = [{
118 'url': thumbnail_url,
119 **parse_resolution(thumbnail_url)
120 }]
121
122 vid_info = self._download_json(
123 f'https://api.digitalconcerthall.com/v2/concert/{video_id}', video_id, headers={
124 'Accept': 'application/json',
125 'Accept-Language': language
126 })
127 album_artist = ' / '.join(traverse_obj(vid_info, ('_links', 'artist', ..., 'name')) or '')
128
129 return {
130 '_type': 'playlist',
131 'id': video_id,
132 'title': vid_info.get('title'),
133 'entries': self._entries(traverse_obj(vid_info, ('_embedded', ..., ...)), language,
134 thumbnails=thumbnails, album_artist=album_artist),
135 'thumbnails': thumbnails,
136 'album_artist': album_artist,
137 }
138
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/yt_dlp/extractor/digitalconcerthall.py b/yt_dlp/extractor/digitalconcerthall.py
--- a/yt_dlp/extractor/digitalconcerthall.py
+++ b/yt_dlp/extractor/digitalconcerthall.py
@@ -11,7 +11,7 @@
class DigitalConcertHallIE(InfoExtractor):
IE_DESC = 'DigitalConcertHall extractor'
- _VALID_URL = r'https?://(?:www\.)?digitalconcerthall\.com/(?P<language>[a-z]+)/concert/(?P<id>[0-9]+)'
+ _VALID_URL = r'https?://(?:www\.)?digitalconcerthall\.com/(?P<language>[a-z]+)/(?P<type>film|concert)/(?P<id>[0-9]+)'
_OAUTH_URL = 'https://api.digitalconcerthall.com/v2/oauth2/token'
_ACCESS_TOKEN = None
_NETRC_MACHINE = 'digitalconcerthall'
@@ -40,6 +40,19 @@
},
'params': {'skip_download': 'm3u8'},
'playlist_count': 3,
+ }, {
+ 'url': 'https://www.digitalconcerthall.com/en/film/388',
+ 'info_dict': {
+ 'id': '388',
+ 'ext': 'mp4',
+ 'title': 'The Berliner Philharmoniker and Frank Peter Zimmermann',
+ 'description': 'md5:cfe25a7044fa4be13743e5089b5b5eb2',
+ 'thumbnail': r're:^https?://images.digitalconcerthall.com/cms/thumbnails.*\.jpg$',
+ 'upload_date': '20220714',
+ 'timestamp': 1657785600,
+ 'album_artist': 'Frank Peter Zimmermann / Benedikt von Bernstorff / Jakob von Bernstorff',
+ },
+ 'params': {'skip_download': 'm3u8'},
}]
def _perform_login(self, username, password):
@@ -75,7 +88,7 @@
if not self._ACCESS_TOKEN:
self.raise_login_required(method='password')
- def _entries(self, items, language, **kwargs):
+ def _entries(self, items, language, type_, **kwargs):
for item in items:
video_id = item['id']
stream_info = self._download_json(
@@ -103,11 +116,11 @@
'start_time': chapter.get('time'),
'end_time': try_get(chapter, lambda x: x['time'] + x['duration']),
'title': chapter.get('text'),
- } for chapter in item['cuepoints']] if item.get('cuepoints') else None,
+ } for chapter in item['cuepoints']] if item.get('cuepoints') and type_ == 'concert' else None,
}
def _real_extract(self, url):
- language, video_id = self._match_valid_url(url).group('language', 'id')
+ language, type_, video_id = self._match_valid_url(url).group('language', 'type', 'id')
if not language:
language = 'en'
@@ -120,18 +133,18 @@
}]
vid_info = self._download_json(
- f'https://api.digitalconcerthall.com/v2/concert/{video_id}', video_id, headers={
+ f'https://api.digitalconcerthall.com/v2/{type_}/{video_id}', video_id, headers={
'Accept': 'application/json',
'Accept-Language': language
})
album_artist = ' / '.join(traverse_obj(vid_info, ('_links', 'artist', ..., 'name')) or '')
+ videos = [vid_info] if type_ == 'film' else traverse_obj(vid_info, ('_embedded', ..., ...))
return {
'_type': 'playlist',
'id': video_id,
'title': vid_info.get('title'),
- 'entries': self._entries(traverse_obj(vid_info, ('_embedded', ..., ...)), language,
- thumbnails=thumbnails, album_artist=album_artist),
+ 'entries': self._entries(videos, language, thumbnails=thumbnails, album_artist=album_artist, type_=type_),
'thumbnails': thumbnails,
'album_artist': album_artist,
}
| {"golden_diff": "diff --git a/yt_dlp/extractor/digitalconcerthall.py b/yt_dlp/extractor/digitalconcerthall.py\n--- a/yt_dlp/extractor/digitalconcerthall.py\n+++ b/yt_dlp/extractor/digitalconcerthall.py\n@@ -11,7 +11,7 @@\n \n class DigitalConcertHallIE(InfoExtractor):\n IE_DESC = 'DigitalConcertHall extractor'\n- _VALID_URL = r'https?://(?:www\\.)?digitalconcerthall\\.com/(?P<language>[a-z]+)/concert/(?P<id>[0-9]+)'\n+ _VALID_URL = r'https?://(?:www\\.)?digitalconcerthall\\.com/(?P<language>[a-z]+)/(?P<type>film|concert)/(?P<id>[0-9]+)'\n _OAUTH_URL = 'https://api.digitalconcerthall.com/v2/oauth2/token'\n _ACCESS_TOKEN = None\n _NETRC_MACHINE = 'digitalconcerthall'\n@@ -40,6 +40,19 @@\n },\n 'params': {'skip_download': 'm3u8'},\n 'playlist_count': 3,\n+ }, {\n+ 'url': 'https://www.digitalconcerthall.com/en/film/388',\n+ 'info_dict': {\n+ 'id': '388',\n+ 'ext': 'mp4',\n+ 'title': 'The Berliner Philharmoniker and Frank Peter Zimmermann',\n+ 'description': 'md5:cfe25a7044fa4be13743e5089b5b5eb2',\n+ 'thumbnail': r're:^https?://images.digitalconcerthall.com/cms/thumbnails.*\\.jpg$',\n+ 'upload_date': '20220714',\n+ 'timestamp': 1657785600,\n+ 'album_artist': 'Frank Peter Zimmermann / Benedikt von Bernstorff / Jakob von Bernstorff',\n+ },\n+ 'params': {'skip_download': 'm3u8'},\n }]\n \n def _perform_login(self, username, password):\n@@ -75,7 +88,7 @@\n if not self._ACCESS_TOKEN:\n self.raise_login_required(method='password')\n \n- def _entries(self, items, language, **kwargs):\n+ def _entries(self, items, language, type_, **kwargs):\n for item in items:\n video_id = item['id']\n stream_info = self._download_json(\n@@ -103,11 +116,11 @@\n 'start_time': chapter.get('time'),\n 'end_time': try_get(chapter, lambda x: x['time'] + x['duration']),\n 'title': chapter.get('text'),\n- } for chapter in item['cuepoints']] if item.get('cuepoints') else None,\n+ } for chapter in item['cuepoints']] if item.get('cuepoints') and type_ == 'concert' else None,\n }\n \n def _real_extract(self, url):\n- language, video_id = self._match_valid_url(url).group('language', 'id')\n+ language, type_, video_id = self._match_valid_url(url).group('language', 'type', 'id')\n if not language:\n language = 'en'\n \n@@ -120,18 +133,18 @@\n }]\n \n vid_info = self._download_json(\n- f'https://api.digitalconcerthall.com/v2/concert/{video_id}', video_id, headers={\n+ f'https://api.digitalconcerthall.com/v2/{type_}/{video_id}', video_id, headers={\n 'Accept': 'application/json',\n 'Accept-Language': language\n })\n album_artist = ' / '.join(traverse_obj(vid_info, ('_links', 'artist', ..., 'name')) or '')\n+ videos = [vid_info] if type_ == 'film' else traverse_obj(vid_info, ('_embedded', ..., ...))\n \n return {\n '_type': 'playlist',\n 'id': video_id,\n 'title': vid_info.get('title'),\n- 'entries': self._entries(traverse_obj(vid_info, ('_embedded', ..., ...)), language,\n- thumbnails=thumbnails, album_artist=album_artist),\n+ 'entries': self._entries(videos, language, thumbnails=thumbnails, album_artist=album_artist, type_=type_),\n 'thumbnails': thumbnails,\n 'album_artist': album_artist,\n }\n", "issue": "[DigitalConcertHall] cannot download videos under /films\n### DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE\n\n- [X] I understand that I will be **blocked** if I *intentionally* remove or skip any mandatory\\* field\n\n### Checklist\n\n- [X] I'm reporting that yt-dlp is broken on a **supported** site\n- [X] I've verified that I'm running yt-dlp version **2023.03.04** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)\n- [X] I've checked that all provided URLs are playable in a browser with the same IP and same login details\n- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/yt-dlp/yt-dlp/wiki/FAQ#video-url-contains-an-ampersand--and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)\n- [X] I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766) and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates\n- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)\n- [X] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required\n\n### Region\n\nAll\n\n### Provide a description that is worded well enough to be understood\n\nA major part of the site Digital Concert Hall is [concert videos](https://www.digitalconcerthall.com/en/concerts) which yt-dlp handles just fine. But when I try to download videos [here](https://www.digitalconcerthall.com/en/films), e.g. [this one](https://www.digitalconcerthall.com/en/film/388) which lasts 44 minutes (can be played with the login credentials `--username [email protected] --password 3PXx-5.NDNmMD2!` which you can use), it only downloads the trailer which lasts 39 seconds. Using build from commit `ecfe479`.\n\n### Provide verbose output that clearly demonstrates the problem\n\n- [X] Run **your** yt-dlp command with **-vU** flag added (`yt-dlp -vU <your command line>`)\n- [X] If using API, add `'verbose': True` to `YoutubeDL` params instead\n- [X] Copy the WHOLE output (starting with `[debug] Command-line config`) and insert it below\n\n### Complete Verbose Output\n\n```shell\n[debug] Command-line config: ['https://www.digitalconcerthall.com/en/film/388', '--username', 'PRIVATE', '--password', 'PRIVATE', '-vU']\r\n[debug] Encodings: locale UTF-8, fs utf-8, pref UTF-8, out utf-8, error utf-8, screen utf-8\r\n[debug] yt-dlp version [email protected] [392389b7d] (zip)\r\n[debug] Python 3.11.3 (CPython x86_64 64bit) - Linux-6.3.4-arch2-1-x86_64-with-glibc2.37 (OpenSSL 3.0.8 7 Feb 2023, glibc 2.37)\r\n[debug] exe versions: ffmpeg 6.0 (setts), ffprobe 6.0, rtmpdump 2.4\r\n[debug] Optional libraries: Cryptodome-3.17, brotli-1.0.9, certifi-2023.05.07, mutagen-1.46.0, sqlite3-2.6.0, websockets-11.0.3\r\n[debug] Proxy map: {}\r\n[debug] Loaded 1842 extractors\r\n[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest\r\nAvailable version: [email protected], Current version: [email protected]\r\nCurrent Build Hash: 5bfb7e4a15c47b2ff99b149b7c841e98ef606a4af18fd6330ddf3a0ed4a7ea19\r\nyt-dlp is up to date ([email protected])\r\n[generic] Extracting URL: https://www.digitalconcerthall.com/en/film/388\r\n[generic] 388: Downloading webpage\r\nWARNING: [generic] Falling back on generic information extractor\r\n[generic] 388: Extracting information\r\n[debug] Looking for embeds\r\n[debug] Identified a JSON LD\r\n[generic] Extracting URL: https://world-vod.dchdns.net/preview/388-t/preview.mp4#__youtubedl_smuggle=%7B%22force_videoid%22%3A+%22388%22%2C+%22to_generic%22%3A+true%2C+%22http_headers%22%3A+%7B%22Referer%22%3A+%22https%3A%2F%2Fwww.digitalconcerthall.com%2Fen%2Ffilm%2F388%22%7D%7D\r\n[generic] 388: Downloading webpage\r\nWARNING: [generic] URL could be a direct video link, returning it as such.\r\n[debug] Formats sorted by: hasvid, ie_pref, lang, quality, res, fps, hdr:12(7), vcodec:vp9.2(10), channels, acodec, filesize, fs_approx, tbr, vbr, abr, asr, proto, vext, aext, hasaud, source, id\r\n[debug] Default format spec: bestvideo*+bestaudio/best\r\n[info] 388: Downloading 1 format(s): 0\r\n[debug] Invoking http downloader on \"https://world-vod.dchdns.net/preview/388-t/preview.mp4\"\r\n[download] Destination: The Berliner Philharmoniker and Frank Peter Zimmermann [388].mp4\r\n[download] 100% of 16.16MiB in 00:00:07 at 2.05MiB/s\n```\n\n", "before_files": [{"content": "from .common import InfoExtractor\n\nfrom ..utils import (\n ExtractorError,\n parse_resolution,\n traverse_obj,\n try_get,\n urlencode_postdata,\n)\n\n\nclass DigitalConcertHallIE(InfoExtractor):\n IE_DESC = 'DigitalConcertHall extractor'\n _VALID_URL = r'https?://(?:www\\.)?digitalconcerthall\\.com/(?P<language>[a-z]+)/concert/(?P<id>[0-9]+)'\n _OAUTH_URL = 'https://api.digitalconcerthall.com/v2/oauth2/token'\n _ACCESS_TOKEN = None\n _NETRC_MACHINE = 'digitalconcerthall'\n _TESTS = [{\n 'note': 'Playlist with only one video',\n 'url': 'https://www.digitalconcerthall.com/en/concert/53201',\n 'info_dict': {\n 'id': '53201-1',\n 'ext': 'mp4',\n 'composer': 'Kurt Weill',\n 'title': '[Magic Night]',\n 'thumbnail': r're:^https?://images.digitalconcerthall.com/cms/thumbnails.*\\.jpg$',\n 'upload_date': '20210624',\n 'timestamp': 1624548600,\n 'duration': 2798,\n 'album_artist': 'Members of the Berliner Philharmoniker / Simon R\u00f6ssler',\n },\n 'params': {'skip_download': 'm3u8'},\n }, {\n 'note': 'Concert with several works and an interview',\n 'url': 'https://www.digitalconcerthall.com/en/concert/53785',\n 'info_dict': {\n 'id': '53785',\n 'album_artist': 'Berliner Philharmoniker / Kirill Petrenko',\n 'title': 'Kirill Petrenko conducts Mendelssohn and Shostakovich',\n },\n 'params': {'skip_download': 'm3u8'},\n 'playlist_count': 3,\n }]\n\n def _perform_login(self, username, password):\n token_response = self._download_json(\n self._OAUTH_URL,\n None, 'Obtaining token', errnote='Unable to obtain token', data=urlencode_postdata({\n 'affiliate': 'none',\n 'grant_type': 'device',\n 'device_vendor': 'unknown',\n 'app_id': 'dch.webapp',\n 'app_version': '1.0.0',\n 'client_secret': '2ySLN+2Fwb',\n }), headers={\n 'Content-Type': 'application/x-www-form-urlencoded',\n })\n self._ACCESS_TOKEN = token_response['access_token']\n try:\n self._download_json(\n self._OAUTH_URL,\n None, note='Logging in', errnote='Unable to login', data=urlencode_postdata({\n 'grant_type': 'password',\n 'username': username,\n 'password': password,\n }), headers={\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Referer': 'https://www.digitalconcerthall.com',\n 'Authorization': f'Bearer {self._ACCESS_TOKEN}'\n })\n except ExtractorError:\n self.raise_login_required(msg='Login info incorrect')\n\n def _real_initialize(self):\n if not self._ACCESS_TOKEN:\n self.raise_login_required(method='password')\n\n def _entries(self, items, language, **kwargs):\n for item in items:\n video_id = item['id']\n stream_info = self._download_json(\n self._proto_relative_url(item['_links']['streams']['href']), video_id, headers={\n 'Accept': 'application/json',\n 'Authorization': f'Bearer {self._ACCESS_TOKEN}',\n 'Accept-Language': language\n })\n\n m3u8_url = traverse_obj(\n stream_info, ('channel', lambda k, _: k.startswith('vod_mixed'), 'stream', 0, 'url'), get_all=False)\n formats = self._extract_m3u8_formats(m3u8_url, video_id, 'mp4', 'm3u8_native', fatal=False)\n\n yield {\n 'id': video_id,\n 'title': item.get('title'),\n 'composer': item.get('name_composer'),\n 'url': m3u8_url,\n 'formats': formats,\n 'duration': item.get('duration_total'),\n 'timestamp': traverse_obj(item, ('date', 'published')),\n 'description': item.get('short_description') or stream_info.get('short_description'),\n **kwargs,\n 'chapters': [{\n 'start_time': chapter.get('time'),\n 'end_time': try_get(chapter, lambda x: x['time'] + x['duration']),\n 'title': chapter.get('text'),\n } for chapter in item['cuepoints']] if item.get('cuepoints') else None,\n }\n\n def _real_extract(self, url):\n language, video_id = self._match_valid_url(url).group('language', 'id')\n if not language:\n language = 'en'\n\n thumbnail_url = self._html_search_regex(\n r'(https?://images\\.digitalconcerthall\\.com/cms/thumbnails/.*\\.jpg)',\n self._download_webpage(url, video_id), 'thumbnail')\n thumbnails = [{\n 'url': thumbnail_url,\n **parse_resolution(thumbnail_url)\n }]\n\n vid_info = self._download_json(\n f'https://api.digitalconcerthall.com/v2/concert/{video_id}', video_id, headers={\n 'Accept': 'application/json',\n 'Accept-Language': language\n })\n album_artist = ' / '.join(traverse_obj(vid_info, ('_links', 'artist', ..., 'name')) or '')\n\n return {\n '_type': 'playlist',\n 'id': video_id,\n 'title': vid_info.get('title'),\n 'entries': self._entries(traverse_obj(vid_info, ('_embedded', ..., ...)), language,\n thumbnails=thumbnails, album_artist=album_artist),\n 'thumbnails': thumbnails,\n 'album_artist': album_artist,\n }\n", "path": "yt_dlp/extractor/digitalconcerthall.py"}], "after_files": [{"content": "from .common import InfoExtractor\n\nfrom ..utils import (\n ExtractorError,\n parse_resolution,\n traverse_obj,\n try_get,\n urlencode_postdata,\n)\n\n\nclass DigitalConcertHallIE(InfoExtractor):\n IE_DESC = 'DigitalConcertHall extractor'\n _VALID_URL = r'https?://(?:www\\.)?digitalconcerthall\\.com/(?P<language>[a-z]+)/(?P<type>film|concert)/(?P<id>[0-9]+)'\n _OAUTH_URL = 'https://api.digitalconcerthall.com/v2/oauth2/token'\n _ACCESS_TOKEN = None\n _NETRC_MACHINE = 'digitalconcerthall'\n _TESTS = [{\n 'note': 'Playlist with only one video',\n 'url': 'https://www.digitalconcerthall.com/en/concert/53201',\n 'info_dict': {\n 'id': '53201-1',\n 'ext': 'mp4',\n 'composer': 'Kurt Weill',\n 'title': '[Magic Night]',\n 'thumbnail': r're:^https?://images.digitalconcerthall.com/cms/thumbnails.*\\.jpg$',\n 'upload_date': '20210624',\n 'timestamp': 1624548600,\n 'duration': 2798,\n 'album_artist': 'Members of the Berliner Philharmoniker / Simon R\u00f6ssler',\n },\n 'params': {'skip_download': 'm3u8'},\n }, {\n 'note': 'Concert with several works and an interview',\n 'url': 'https://www.digitalconcerthall.com/en/concert/53785',\n 'info_dict': {\n 'id': '53785',\n 'album_artist': 'Berliner Philharmoniker / Kirill Petrenko',\n 'title': 'Kirill Petrenko conducts Mendelssohn and Shostakovich',\n },\n 'params': {'skip_download': 'm3u8'},\n 'playlist_count': 3,\n }, {\n 'url': 'https://www.digitalconcerthall.com/en/film/388',\n 'info_dict': {\n 'id': '388',\n 'ext': 'mp4',\n 'title': 'The Berliner Philharmoniker and Frank Peter Zimmermann',\n 'description': 'md5:cfe25a7044fa4be13743e5089b5b5eb2',\n 'thumbnail': r're:^https?://images.digitalconcerthall.com/cms/thumbnails.*\\.jpg$',\n 'upload_date': '20220714',\n 'timestamp': 1657785600,\n 'album_artist': 'Frank Peter Zimmermann / Benedikt von Bernstorff / Jakob von Bernstorff',\n },\n 'params': {'skip_download': 'm3u8'},\n }]\n\n def _perform_login(self, username, password):\n token_response = self._download_json(\n self._OAUTH_URL,\n None, 'Obtaining token', errnote='Unable to obtain token', data=urlencode_postdata({\n 'affiliate': 'none',\n 'grant_type': 'device',\n 'device_vendor': 'unknown',\n 'app_id': 'dch.webapp',\n 'app_version': '1.0.0',\n 'client_secret': '2ySLN+2Fwb',\n }), headers={\n 'Content-Type': 'application/x-www-form-urlencoded',\n })\n self._ACCESS_TOKEN = token_response['access_token']\n try:\n self._download_json(\n self._OAUTH_URL,\n None, note='Logging in', errnote='Unable to login', data=urlencode_postdata({\n 'grant_type': 'password',\n 'username': username,\n 'password': password,\n }), headers={\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Referer': 'https://www.digitalconcerthall.com',\n 'Authorization': f'Bearer {self._ACCESS_TOKEN}'\n })\n except ExtractorError:\n self.raise_login_required(msg='Login info incorrect')\n\n def _real_initialize(self):\n if not self._ACCESS_TOKEN:\n self.raise_login_required(method='password')\n\n def _entries(self, items, language, type_, **kwargs):\n for item in items:\n video_id = item['id']\n stream_info = self._download_json(\n self._proto_relative_url(item['_links']['streams']['href']), video_id, headers={\n 'Accept': 'application/json',\n 'Authorization': f'Bearer {self._ACCESS_TOKEN}',\n 'Accept-Language': language\n })\n\n m3u8_url = traverse_obj(\n stream_info, ('channel', lambda k, _: k.startswith('vod_mixed'), 'stream', 0, 'url'), get_all=False)\n formats = self._extract_m3u8_formats(m3u8_url, video_id, 'mp4', 'm3u8_native', fatal=False)\n\n yield {\n 'id': video_id,\n 'title': item.get('title'),\n 'composer': item.get('name_composer'),\n 'url': m3u8_url,\n 'formats': formats,\n 'duration': item.get('duration_total'),\n 'timestamp': traverse_obj(item, ('date', 'published')),\n 'description': item.get('short_description') or stream_info.get('short_description'),\n **kwargs,\n 'chapters': [{\n 'start_time': chapter.get('time'),\n 'end_time': try_get(chapter, lambda x: x['time'] + x['duration']),\n 'title': chapter.get('text'),\n } for chapter in item['cuepoints']] if item.get('cuepoints') and type_ == 'concert' else None,\n }\n\n def _real_extract(self, url):\n language, type_, video_id = self._match_valid_url(url).group('language', 'type', 'id')\n if not language:\n language = 'en'\n\n thumbnail_url = self._html_search_regex(\n r'(https?://images\\.digitalconcerthall\\.com/cms/thumbnails/.*\\.jpg)',\n self._download_webpage(url, video_id), 'thumbnail')\n thumbnails = [{\n 'url': thumbnail_url,\n **parse_resolution(thumbnail_url)\n }]\n\n vid_info = self._download_json(\n f'https://api.digitalconcerthall.com/v2/{type_}/{video_id}', video_id, headers={\n 'Accept': 'application/json',\n 'Accept-Language': language\n })\n album_artist = ' / '.join(traverse_obj(vid_info, ('_links', 'artist', ..., 'name')) or '')\n videos = [vid_info] if type_ == 'film' else traverse_obj(vid_info, ('_embedded', ..., ...))\n\n return {\n '_type': 'playlist',\n 'id': video_id,\n 'title': vid_info.get('title'),\n 'entries': self._entries(videos, language, thumbnails=thumbnails, album_artist=album_artist, type_=type_),\n 'thumbnails': thumbnails,\n 'album_artist': album_artist,\n }\n", "path": "yt_dlp/extractor/digitalconcerthall.py"}]} | 3,475 | 1,019 |
gh_patches_debug_9904 | rasdani/github-patches | git_diff | nipy__nipype-2432 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
workflow with iterables and cloned nodes fail when expanding iterables
### Summary
When running a workflow which includes a cloned node and iterables the workflow will fail when expanding the iterables because the id of the cloned node will be the same as the original one.
### Actual behavior
Will result in an error:
Traceback (most recent call last):
File "<ipython-input-55-177d6eaeef2c>", line 27, in <module>
workflow.run()
File "/data/eaxfjord/anaconda2/lib/python2.7/site-packages/nipype/pipeline/engine/workflows.py", line 592, in run
execgraph = generate_expanded_graph(deepcopy(flatgraph))
File "/data/eaxfjord/anaconda2/lib/python2.7/site-packages/nipype/pipeline/engine/utils.py", line 1042, in generate_expanded_graph
iterable_prefix, inode.synchronize)
File "/data/eaxfjord/anaconda2/lib/python2.7/site-packages/nipype/pipeline/engine/utils.py", line 733, in _merge_graphs
raise Exception(("Execution graph does not have a unique set of node "
Exception: Execution graph does not have a unique set of node names. Please rerun the workflow
### Expected behavior
Will execute normally without the errors.
### How to replicate the behavior
The following workflow will produce the error.
### Script/Workflow details
```python
from nipype.interfaces import utility as niu
from nipype.pipeline import engine as pe
def addstr(string):
string = ('%s+2' % string )
return string
subject_list = ['sub-001', 'sub-002']
inputnode = pe.Node(niu.IdentityInterface(fields = ['subject']),
name = 'inputnode')
inputnode.iterables = [('subject', subject_list)]
node_1 = pe.Node(niu.Function(input_names='string',
output_names= 'string',
function = addstr),name='node_1')
node_2 = node_1.clone('node_2')
workflow = pe.Workflow(name='my_workflow')
workflow.connect([(inputnode, node_1, [('subject','string')]),
(node_1, node_2, [('string','string')])])
workflow.run()
```
### Platform details:
/data/eaxfjord/anaconda2/lib/python2.7/site-packages/h5py/__init__.py:34: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
from ._conv import register_converters as _register_converters
{'nibabel_version': '2.2.1', 'sys_executable': '/data/eaxfjord/anaconda2/bin/python', 'networkx_version': '2.1', 'numpy_version': '1.14.0', 'sys_platform': 'linux2', 'sys_version': '2.7.13 |Anaconda custom (64-bit)| (default, Dec 20 2016, 23:09:15) \n[GCC 4.4.7 20120313 (Red Hat 4.4.7-1)]', 'commit_source': 'installation', 'commit_hash': '0a5948a0', 'pkg_path': '/data/eaxfjord/anaconda2/lib/python2.7/site-packages/nipype', 'nipype_version': '1.0.0', 'traits_version': '4.6.0', 'scipy_version': '1.0.0'}
1.0.0
### Execution environment
- My python environment outside container
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nipype/pipeline/engine/base.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
4 # vi: set ft=python sts=4 ts=4 sw=4 et:
5 """Defines functionality for pipelined execution of interfaces
6
7 The `EngineBase` class implements the more general view of a task.
8
9 .. testsetup::
10 # Change directory to provide relative paths for doctests
11 import os
12 filepath = os.path.dirname(os.path.realpath( __file__ ))
13 datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
14 os.chdir(datadir)
15
16 """
17 from __future__ import (print_function, division, unicode_literals,
18 absolute_import)
19 from builtins import object
20
21 from copy import deepcopy
22 import re
23 import numpy as np
24
25 from ... import config
26 from ...interfaces.base import DynamicTraitedSpec
27 from ...utils.filemanip import loadpkl, savepkl
28
29
30 class EngineBase(object):
31 """Defines common attributes and functions for workflows and nodes."""
32
33 def __init__(self, name=None, base_dir=None):
34 """ Initialize base parameters of a workflow or node
35
36 Parameters
37 ----------
38 name : string (mandatory)
39 Name of this node. Name must be alphanumeric and not contain any
40 special characters (e.g., '.', '@').
41 base_dir : string
42 base output directory (will be hashed before creations)
43 default=None, which results in the use of mkdtemp
44
45 """
46 self._hierarchy = None
47 self._name = None
48
49 self.base_dir = base_dir
50 self.config = deepcopy(config._sections)
51 self.name = name
52
53 @property
54 def name(self):
55 return self._name
56
57 @name.setter
58 def name(self, name):
59 if not name or not re.match(r'^[\w-]+$', name):
60 raise ValueError('[Workflow|Node] name "%s" is not valid.' % name)
61 self._name = name
62
63 @property
64 def fullname(self):
65 if self._hierarchy:
66 return '%s.%s' % (self._hierarchy, self.name)
67 return self.name
68
69 @property
70 def inputs(self):
71 raise NotImplementedError
72
73 @property
74 def outputs(self):
75 raise NotImplementedError
76
77 def clone(self, name):
78 """Clone an EngineBase object
79
80 Parameters
81 ----------
82
83 name : string (mandatory)
84 A clone of node or workflow must have a new name
85 """
86 if name == self.name:
87 raise ValueError('Cloning requires a new name, "%s" is in use.' % name)
88 clone = deepcopy(self)
89 clone.name = name
90 return clone
91
92 def _check_outputs(self, parameter):
93 return hasattr(self.outputs, parameter)
94
95 def _check_inputs(self, parameter):
96 if isinstance(self.inputs, DynamicTraitedSpec):
97 return True
98 return hasattr(self.inputs, parameter)
99
100 def __str__(self):
101 return self.fullname
102
103 def save(self, filename=None):
104 if filename is None:
105 filename = 'temp.pklz'
106 savepkl(filename, self)
107
108 def load(self, filename):
109 if '.npz' in filename:
110 DeprecationWarning(('npz files will be deprecated in the next '
111 'release. you can use numpy to open them.'))
112 return np.load(filename)
113 return loadpkl(filename)
114
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nipype/pipeline/engine/base.py b/nipype/pipeline/engine/base.py
--- a/nipype/pipeline/engine/base.py
+++ b/nipype/pipeline/engine/base.py
@@ -84,9 +84,12 @@
A clone of node or workflow must have a new name
"""
if name == self.name:
- raise ValueError('Cloning requires a new name, "%s" is in use.' % name)
+ raise ValueError('Cloning requires a new name, "%s" is '
+ 'in use.' % name)
clone = deepcopy(self)
clone.name = name
+ if hasattr(clone, '_id'):
+ clone._id = name
return clone
def _check_outputs(self, parameter):
| {"golden_diff": "diff --git a/nipype/pipeline/engine/base.py b/nipype/pipeline/engine/base.py\n--- a/nipype/pipeline/engine/base.py\n+++ b/nipype/pipeline/engine/base.py\n@@ -84,9 +84,12 @@\n A clone of node or workflow must have a new name\n \"\"\"\n if name == self.name:\n- raise ValueError('Cloning requires a new name, \"%s\" is in use.' % name)\n+ raise ValueError('Cloning requires a new name, \"%s\" is '\n+ 'in use.' % name)\n clone = deepcopy(self)\n clone.name = name\n+ if hasattr(clone, '_id'):\n+ clone._id = name\n return clone\n \n def _check_outputs(self, parameter):\n", "issue": "workflow with iterables and cloned nodes fail when expanding iterables\n### Summary\r\nWhen running a workflow which includes a cloned node and iterables the workflow will fail when expanding the iterables because the id of the cloned node will be the same as the original one.\r\n\r\n### Actual behavior\r\nWill result in an error:\r\nTraceback (most recent call last):\r\n\r\n File \"<ipython-input-55-177d6eaeef2c>\", line 27, in <module>\r\n workflow.run()\r\n\r\n File \"/data/eaxfjord/anaconda2/lib/python2.7/site-packages/nipype/pipeline/engine/workflows.py\", line 592, in run\r\n execgraph = generate_expanded_graph(deepcopy(flatgraph))\r\n\r\n File \"/data/eaxfjord/anaconda2/lib/python2.7/site-packages/nipype/pipeline/engine/utils.py\", line 1042, in generate_expanded_graph\r\n iterable_prefix, inode.synchronize)\r\n\r\n File \"/data/eaxfjord/anaconda2/lib/python2.7/site-packages/nipype/pipeline/engine/utils.py\", line 733, in _merge_graphs\r\n raise Exception((\"Execution graph does not have a unique set of node \"\r\n\r\nException: Execution graph does not have a unique set of node names. Please rerun the workflow\r\n\r\n### Expected behavior\r\nWill execute normally without the errors.\r\n\r\n### How to replicate the behavior\r\nThe following workflow will produce the error.\r\n\r\n### Script/Workflow details\r\n```python\r\nfrom nipype.interfaces import utility as niu\r\nfrom nipype.pipeline import engine as pe\r\n\r\ndef addstr(string):\r\n string = ('%s+2' % string )\r\n return string\r\n\r\nsubject_list = ['sub-001', 'sub-002']\r\n\r\ninputnode = pe.Node(niu.IdentityInterface(fields = ['subject']),\r\n name = 'inputnode')\r\ninputnode.iterables = [('subject', subject_list)]\r\n\r\nnode_1 = pe.Node(niu.Function(input_names='string',\r\n output_names= 'string',\r\n function = addstr),name='node_1')\r\nnode_2 = node_1.clone('node_2')\r\n\r\nworkflow = pe.Workflow(name='my_workflow')\r\nworkflow.connect([(inputnode, node_1, [('subject','string')]),\r\n (node_1, node_2, [('string','string')])])\r\nworkflow.run()\r\n```\r\n### Platform details:\r\n/data/eaxfjord/anaconda2/lib/python2.7/site-packages/h5py/__init__.py:34: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\r\n from ._conv import register_converters as _register_converters\r\n{'nibabel_version': '2.2.1', 'sys_executable': '/data/eaxfjord/anaconda2/bin/python', 'networkx_version': '2.1', 'numpy_version': '1.14.0', 'sys_platform': 'linux2', 'sys_version': '2.7.13 |Anaconda custom (64-bit)| (default, Dec 20 2016, 23:09:15) \\n[GCC 4.4.7 20120313 (Red Hat 4.4.7-1)]', 'commit_source': 'installation', 'commit_hash': '0a5948a0', 'pkg_path': '/data/eaxfjord/anaconda2/lib/python2.7/site-packages/nipype', 'nipype_version': '1.0.0', 'traits_version': '4.6.0', 'scipy_version': '1.0.0'}\r\n1.0.0\r\n\r\n\r\n### Execution environment\r\n- My python environment outside container\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"Defines functionality for pipelined execution of interfaces\n\nThe `EngineBase` class implements the more general view of a task.\n\n .. testsetup::\n # Change directory to provide relative paths for doctests\n import os\n filepath = os.path.dirname(os.path.realpath( __file__ ))\n datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))\n os.chdir(datadir)\n\n\"\"\"\nfrom __future__ import (print_function, division, unicode_literals,\n absolute_import)\nfrom builtins import object\n\nfrom copy import deepcopy\nimport re\nimport numpy as np\n\nfrom ... import config\nfrom ...interfaces.base import DynamicTraitedSpec\nfrom ...utils.filemanip import loadpkl, savepkl\n\n\nclass EngineBase(object):\n \"\"\"Defines common attributes and functions for workflows and nodes.\"\"\"\n\n def __init__(self, name=None, base_dir=None):\n \"\"\" Initialize base parameters of a workflow or node\n\n Parameters\n ----------\n name : string (mandatory)\n Name of this node. Name must be alphanumeric and not contain any\n special characters (e.g., '.', '@').\n base_dir : string\n base output directory (will be hashed before creations)\n default=None, which results in the use of mkdtemp\n\n \"\"\"\n self._hierarchy = None\n self._name = None\n\n self.base_dir = base_dir\n self.config = deepcopy(config._sections)\n self.name = name\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, name):\n if not name or not re.match(r'^[\\w-]+$', name):\n raise ValueError('[Workflow|Node] name \"%s\" is not valid.' % name)\n self._name = name\n\n @property\n def fullname(self):\n if self._hierarchy:\n return '%s.%s' % (self._hierarchy, self.name)\n return self.name\n\n @property\n def inputs(self):\n raise NotImplementedError\n\n @property\n def outputs(self):\n raise NotImplementedError\n\n def clone(self, name):\n \"\"\"Clone an EngineBase object\n\n Parameters\n ----------\n\n name : string (mandatory)\n A clone of node or workflow must have a new name\n \"\"\"\n if name == self.name:\n raise ValueError('Cloning requires a new name, \"%s\" is in use.' % name)\n clone = deepcopy(self)\n clone.name = name\n return clone\n\n def _check_outputs(self, parameter):\n return hasattr(self.outputs, parameter)\n\n def _check_inputs(self, parameter):\n if isinstance(self.inputs, DynamicTraitedSpec):\n return True\n return hasattr(self.inputs, parameter)\n\n def __str__(self):\n return self.fullname\n\n def save(self, filename=None):\n if filename is None:\n filename = 'temp.pklz'\n savepkl(filename, self)\n\n def load(self, filename):\n if '.npz' in filename:\n DeprecationWarning(('npz files will be deprecated in the next '\n 'release. you can use numpy to open them.'))\n return np.load(filename)\n return loadpkl(filename)\n", "path": "nipype/pipeline/engine/base.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"Defines functionality for pipelined execution of interfaces\n\nThe `EngineBase` class implements the more general view of a task.\n\n .. testsetup::\n # Change directory to provide relative paths for doctests\n import os\n filepath = os.path.dirname(os.path.realpath( __file__ ))\n datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))\n os.chdir(datadir)\n\n\"\"\"\nfrom __future__ import (print_function, division, unicode_literals,\n absolute_import)\nfrom builtins import object\n\nfrom copy import deepcopy\nimport re\nimport numpy as np\n\nfrom ... import config\nfrom ...interfaces.base import DynamicTraitedSpec\nfrom ...utils.filemanip import loadpkl, savepkl\n\n\nclass EngineBase(object):\n \"\"\"Defines common attributes and functions for workflows and nodes.\"\"\"\n\n def __init__(self, name=None, base_dir=None):\n \"\"\" Initialize base parameters of a workflow or node\n\n Parameters\n ----------\n name : string (mandatory)\n Name of this node. Name must be alphanumeric and not contain any\n special characters (e.g., '.', '@').\n base_dir : string\n base output directory (will be hashed before creations)\n default=None, which results in the use of mkdtemp\n\n \"\"\"\n self._hierarchy = None\n self._name = None\n\n self.base_dir = base_dir\n self.config = deepcopy(config._sections)\n self.name = name\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, name):\n if not name or not re.match(r'^[\\w-]+$', name):\n raise ValueError('[Workflow|Node] name \"%s\" is not valid.' % name)\n self._name = name\n\n @property\n def fullname(self):\n if self._hierarchy:\n return '%s.%s' % (self._hierarchy, self.name)\n return self.name\n\n @property\n def inputs(self):\n raise NotImplementedError\n\n @property\n def outputs(self):\n raise NotImplementedError\n\n def clone(self, name):\n \"\"\"Clone an EngineBase object\n\n Parameters\n ----------\n\n name : string (mandatory)\n A clone of node or workflow must have a new name\n \"\"\"\n if name == self.name:\n raise ValueError('Cloning requires a new name, \"%s\" is '\n 'in use.' % name)\n clone = deepcopy(self)\n clone.name = name\n if hasattr(clone, '_id'):\n clone._id = name\n return clone\n\n def _check_outputs(self, parameter):\n return hasattr(self.outputs, parameter)\n\n def _check_inputs(self, parameter):\n if isinstance(self.inputs, DynamicTraitedSpec):\n return True\n return hasattr(self.inputs, parameter)\n\n def __str__(self):\n return self.fullname\n\n def save(self, filename=None):\n if filename is None:\n filename = 'temp.pklz'\n savepkl(filename, self)\n\n def load(self, filename):\n if '.npz' in filename:\n DeprecationWarning(('npz files will be deprecated in the next '\n 'release. you can use numpy to open them.'))\n return np.load(filename)\n return loadpkl(filename)\n", "path": "nipype/pipeline/engine/base.py"}]} | 2,082 | 169 |
gh_patches_debug_28818 | rasdani/github-patches | git_diff | quantumlib__Cirq-1714 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PhasedXPowGate raised to a symbol power fails is_parameterized protocol
The following
```
g = cirq.PhasedXPowGate(exponent=0.1, phase_exponent=0.2)
val = cirq.is_parameterized(g ** sympy.Symbol('s'))
```
has val False. Compared with
```
h = cirq.XPowGate(exponent=0.1)
val = cirq.is_parameterized(h ** sympy.Symbol('s'))
```
which has val True.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cirq/value/periodic_value.py`
Content:
```
1 # Copyright 2018 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import Any, Union
16
17 import sympy
18
19 from cirq._compat import proper_repr
20 import cirq.protocols
21
22
23 class PeriodicValue:
24 """Wrapper for periodic numerical values.
25
26 Wrapper for periodic numerical types which implements `__eq__`, `__ne__`,
27 `__hash__` and `_approx_eq_` so that values which are in the same
28 equivalence class are treated as equal.
29
30 Internally the `value` passed to `__init__` is normalized to the interval
31 [0, `period`) and stored as that. Specialized version of `_approx_eq_` is
32 provided to cover values which end up at the opposite edges of this
33 interval.
34 """
35
36 def __init__(self, value: Union[int, float], period: Union[int, float]):
37 """Initializes the equivalence class.
38
39 Args:
40 value: numerical value to wrap.
41 period: periodicity of the numerical value.
42 """
43 self.value = value % period
44 self.period = period
45
46 def __eq__(self, other: Any) -> bool:
47 if not isinstance(other, type(self)):
48 return NotImplemented
49 return (self.value, self.period) == (other.value, other.period)
50
51 def __ne__(self, other: Any) -> bool:
52 return not self == other
53
54 def __hash__(self) -> int:
55 return hash((type(self), self.value, self.period))
56
57 def _approx_eq_(self, other: Any, atol: float) -> bool:
58 """Implementation of `SupportsApproximateEquality` protocol."""
59 if not isinstance(other, type(self)):
60 return NotImplemented
61
62 #self.value = value % period in __init__() creates a Mod
63 if isinstance(other.value, sympy.Mod):
64 return self.value == other.value
65 # Periods must be exactly equal to avoid drift of normalized value when
66 # original value increases.
67 if self.period != other.period:
68 return False
69
70 low = min(self.value, other.value)
71 high = max(self.value, other.value)
72
73 # Shift lower value outside of normalization interval in case low and
74 # high values are at the opposite borders of normalization interval.
75 if high - low > self.period / 2:
76 low += self.period
77
78 return cirq.protocols.approx_eq(low, high, atol=atol)
79
80 def __repr__(self):
81 return 'cirq.PeriodicValue({}, {})'.format(proper_repr(self.value),
82 proper_repr(self.period))
83
84 def _is_parameterized_(self):
85 return any(isinstance(val, sympy.Basic)
86 for val in (self.value, self.period))
87
```
Path: `cirq/ops/phased_x_gate.py`
Content:
```
1 # Copyright 2018 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """An `XPowGate` conjugated by `ZPowGate`s."""
16 from typing import Union, Sequence, Tuple, Optional, cast
17
18 import math
19 import numpy as np
20 import sympy
21
22 import cirq
23 from cirq import value, protocols
24 from cirq._compat import proper_repr
25 from cirq.ops import gate_features, raw_types, op_tree
26 from cirq.type_workarounds import NotImplementedType
27
28 @value.value_equality
29 class PhasedXPowGate(gate_features.SingleQubitGate):
30 """A gate equivalent to the circuit ───Z^-p───X^t───Z^p───."""
31
32 def __new__(cls,
33 *,
34 phase_exponent: Union[float, sympy.Symbol],
35 exponent: Union[float, sympy.Symbol] = 1.0,
36 global_shift: float = 0.0):
37 """Substitutes a raw X or raw Y if possible.
38
39 Args:
40 phase_exponent: The exponent on the Z gates conjugating the X gate.
41 exponent: The exponent on the X gate conjugated by Zs.
42 global_shift: How much to shift the operation's eigenvalues at
43 exponent=1.
44 """
45 p = value.canonicalize_half_turns(phase_exponent)
46 if p == 0:
47 return cirq.ops.common_gates.XPowGate(
48 exponent=exponent,
49 global_shift=global_shift)
50 if p == 0.5:
51 return cirq.ops.common_gates.YPowGate(
52 exponent=exponent,
53 global_shift=global_shift)
54 if p == 1 and not isinstance(exponent, sympy.Symbol):
55 return cirq.ops.common_gates.XPowGate(
56 exponent=-exponent,
57 global_shift=global_shift)
58 if p == -0.5 and not isinstance(exponent, sympy.Symbol):
59 return cirq.ops.common_gates.YPowGate(
60 exponent=-exponent,
61 global_shift=global_shift)
62 return super().__new__(cls)
63
64 def __init__(self,
65 *,
66 phase_exponent: Union[float, sympy.Symbol],
67 exponent: Union[float, sympy.Symbol] = 1.0,
68 global_shift: float = 0.0) -> None:
69 """
70 Args:
71 phase_exponent: The exponent on the Z gates conjugating the X gate.
72 exponent: The exponent on the X gate conjugated by Zs.
73 global_shift: How much to shift the operation's eigenvalues at
74 exponent=1.
75 """
76 self._phase_exponent = value.canonicalize_half_turns(phase_exponent)
77 self._exponent = exponent
78 self._global_shift = global_shift
79
80 def _qasm_(self,
81 args: protocols.QasmArgs,
82 qubits: Tuple[raw_types.Qid, ...]) -> Optional[str]:
83 if cirq.is_parameterized(self):
84 return None
85
86 args.validate_version('2.0')
87
88 e = cast(float, value.canonicalize_half_turns(self._exponent))
89 p = cast(float, self.phase_exponent)
90 epsilon = 10**-args.precision
91
92 if abs(e + 0.5) <= epsilon:
93 return args.format('u2({0:half_turns}, {1:half_turns}) {2};\n',
94 p + 0.5, -p - 0.5, qubits[0])
95
96 if abs(e - 0.5) <= epsilon:
97 return args.format('u2({0:half_turns}, {1:half_turns}) {2};\n',
98 p - 0.5, -p + 0.5, qubits[0])
99
100 return args.format(
101 'u3({0:half_turns}, {1:half_turns}, {2:half_turns}) {3};\n',
102 -e, p + 0.5, -p - 0.5, qubits[0])
103
104 def _decompose_(self, qubits: Sequence[raw_types.Qid]
105 ) -> op_tree.OP_TREE:
106 assert len(qubits) == 1
107 q = qubits[0]
108 z = cirq.Z(q)**self._phase_exponent
109 x = cirq.X(q)**self._exponent
110 if protocols.is_parameterized(z):
111 return NotImplemented
112 return z**-1, x, z
113
114 @property
115 def exponent(self) -> Union[float, sympy.Symbol]:
116 """The exponent on the central X gate conjugated by the Z gates."""
117 return self._exponent
118
119 @property
120 def phase_exponent(self) -> Union[float, sympy.Symbol]:
121 """The exponent on the Z gates conjugating the X gate."""
122 return self._phase_exponent
123
124 def __pow__(self, exponent: Union[float, sympy.Symbol]) -> 'PhasedXPowGate':
125 new_exponent = protocols.mul(self._exponent, exponent, NotImplemented)
126 if new_exponent is NotImplemented:
127 return NotImplemented
128 return PhasedXPowGate(phase_exponent=self._phase_exponent,
129 exponent=new_exponent,
130 global_shift=self._global_shift)
131
132 def _trace_distance_bound_(self):
133 """See `cirq.SupportsTraceDistanceBound`."""
134 return protocols.trace_distance_bound(cirq.X**self._exponent)
135
136 def _unitary_(self) -> Union[np.ndarray, NotImplementedType]:
137 """See `cirq.SupportsUnitary`."""
138 if self._is_parameterized_():
139 return NotImplemented
140 z = protocols.unitary(cirq.Z**self._phase_exponent)
141 x = protocols.unitary(cirq.X**self._exponent)
142 p = np.exp(1j * np.pi * self._global_shift * self._exponent)
143 return np.dot(np.dot(z, x), np.conj(z)) * p
144
145 def _pauli_expansion_(self) -> value.LinearDict[str]:
146 if self._is_parameterized_():
147 return NotImplemented
148 phase_angle = np.pi * self._phase_exponent / 2
149 angle = np.pi * self._exponent / 2
150 phase = 1j**(2 * self._exponent * (self._global_shift + 0.5))
151 return value.LinearDict({
152 'I': phase * np.cos(angle),
153 'X': -1j * phase * np.sin(angle) * np.cos(2 * phase_angle),
154 'Y': -1j * phase * np.sin(angle) * np.sin(2 * phase_angle),
155 })
156
157 def _is_parameterized_(self) -> bool:
158 """See `cirq.SupportsParameterization`."""
159 return (isinstance(self._exponent, sympy.Symbol) or
160 isinstance(self._phase_exponent, sympy.Symbol))
161
162 def _resolve_parameters_(self, param_resolver) -> 'PhasedXPowGate':
163 """See `cirq.SupportsParameterization`."""
164 return PhasedXPowGate(
165 phase_exponent=param_resolver.value_of(self._phase_exponent),
166 exponent=param_resolver.value_of(self._exponent),
167 global_shift=self._global_shift)
168
169 def _phase_by_(self, phase_turns, qubit_index):
170 """See `cirq.SupportsPhase`."""
171 assert qubit_index == 0
172 return PhasedXPowGate(
173 exponent=self._exponent,
174 phase_exponent=self._phase_exponent + phase_turns * 2,
175 global_shift=self._global_shift)
176
177 def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs
178 ) -> protocols.CircuitDiagramInfo:
179 """See `cirq.SupportsCircuitDiagramInfo`."""
180
181 if (isinstance(self.phase_exponent, sympy.Symbol) or
182 args.precision is None):
183 s = 'PhasedX({})'.format(self.phase_exponent)
184 else:
185 s = 'PhasedX({{:.{}}})'.format(args.precision).format(
186 self.phase_exponent)
187 return protocols.CircuitDiagramInfo(
188 wire_symbols=(s,),
189 exponent=value.canonicalize_half_turns(self._exponent))
190
191 def __str__(self):
192 info = protocols.circuit_diagram_info(self)
193 if info.exponent == 1:
194 return info.wire_symbols[0]
195 return '{}^{}'.format(info.wire_symbols[0], info.exponent)
196
197 def __repr__(self):
198 args = ['phase_exponent={}'.format(proper_repr(self.phase_exponent))]
199 if self.exponent != 1:
200 args.append('exponent={}'.format(proper_repr(self.exponent)))
201 if self._global_shift != 0:
202 args.append('global_shift={!r}'.format(self._global_shift))
203 return 'cirq.PhasedXPowGate({})'.format(', '.join(args))
204
205 def _period(self):
206 exponents = [self._global_shift, 1 + self._global_shift]
207 real_periods = [abs(2/e) for e in exponents if e != 0]
208 int_periods = [int(np.round(e)) for e in real_periods]
209 if any(i != r for i, r in zip(real_periods, int_periods)):
210 return None
211 if len(int_periods) == 1:
212 return int_periods[0]
213 return int_periods[0] * int_periods[1] / math.gcd(*int_periods)
214
215 @property
216 def _canonical_exponent(self):
217 period = self._period()
218 if not period or isinstance(self._exponent, sympy.Symbol):
219 return self._exponent
220 else:
221 return self._exponent % period
222
223 def _value_equality_values_(self):
224 return self.phase_exponent, self._canonical_exponent, self._global_shift
225
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cirq/ops/phased_x_gate.py b/cirq/ops/phased_x_gate.py
--- a/cirq/ops/phased_x_gate.py
+++ b/cirq/ops/phased_x_gate.py
@@ -156,8 +156,8 @@
def _is_parameterized_(self) -> bool:
"""See `cirq.SupportsParameterization`."""
- return (isinstance(self._exponent, sympy.Symbol) or
- isinstance(self._phase_exponent, sympy.Symbol))
+ return (protocols.is_parameterized(self._exponent) or
+ protocols.is_parameterized(self._phase_exponent))
def _resolve_parameters_(self, param_resolver) -> 'PhasedXPowGate':
"""See `cirq.SupportsParameterization`."""
diff --git a/cirq/value/periodic_value.py b/cirq/value/periodic_value.py
--- a/cirq/value/periodic_value.py
+++ b/cirq/value/periodic_value.py
@@ -16,8 +16,8 @@
import sympy
+from cirq import protocols
from cirq._compat import proper_repr
-import cirq.protocols
class PeriodicValue:
@@ -75,12 +75,13 @@
if high - low > self.period / 2:
low += self.period
- return cirq.protocols.approx_eq(low, high, atol=atol)
+ return protocols.approx_eq(low, high, atol=atol)
def __repr__(self):
return 'cirq.PeriodicValue({}, {})'.format(proper_repr(self.value),
proper_repr(self.period))
def _is_parameterized_(self):
- return any(isinstance(val, sympy.Basic)
- for val in (self.value, self.period))
+ return any(
+ protocols.is_parameterized(val)
+ for val in (self.value, self.period))
| {"golden_diff": "diff --git a/cirq/ops/phased_x_gate.py b/cirq/ops/phased_x_gate.py\n--- a/cirq/ops/phased_x_gate.py\n+++ b/cirq/ops/phased_x_gate.py\n@@ -156,8 +156,8 @@\n \n def _is_parameterized_(self) -> bool:\n \"\"\"See `cirq.SupportsParameterization`.\"\"\"\n- return (isinstance(self._exponent, sympy.Symbol) or\n- isinstance(self._phase_exponent, sympy.Symbol))\n+ return (protocols.is_parameterized(self._exponent) or\n+ protocols.is_parameterized(self._phase_exponent))\n \n def _resolve_parameters_(self, param_resolver) -> 'PhasedXPowGate':\n \"\"\"See `cirq.SupportsParameterization`.\"\"\"\ndiff --git a/cirq/value/periodic_value.py b/cirq/value/periodic_value.py\n--- a/cirq/value/periodic_value.py\n+++ b/cirq/value/periodic_value.py\n@@ -16,8 +16,8 @@\n \n import sympy\n \n+from cirq import protocols\n from cirq._compat import proper_repr\n-import cirq.protocols\n \n \n class PeriodicValue:\n@@ -75,12 +75,13 @@\n if high - low > self.period / 2:\n low += self.period\n \n- return cirq.protocols.approx_eq(low, high, atol=atol)\n+ return protocols.approx_eq(low, high, atol=atol)\n \n def __repr__(self):\n return 'cirq.PeriodicValue({}, {})'.format(proper_repr(self.value),\n proper_repr(self.period))\n \n def _is_parameterized_(self):\n- return any(isinstance(val, sympy.Basic)\n- for val in (self.value, self.period))\n+ return any(\n+ protocols.is_parameterized(val)\n+ for val in (self.value, self.period))\n", "issue": "PhasedXPowGate raised to a symbol power fails is_parameterized protocol\nThe following \r\n```\r\ng = cirq.PhasedXPowGate(exponent=0.1, phase_exponent=0.2)\r\nval = cirq.is_parameterized(g ** sympy.Symbol('s'))\r\n```\r\nhas val False. Compared with \r\n```\r\nh = cirq.XPowGate(exponent=0.1)\r\nval = cirq.is_parameterized(h ** sympy.Symbol('s'))\r\n```\r\nwhich has val True. \n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Any, Union\n\nimport sympy\n\nfrom cirq._compat import proper_repr\nimport cirq.protocols\n\n\nclass PeriodicValue:\n \"\"\"Wrapper for periodic numerical values.\n\n Wrapper for periodic numerical types which implements `__eq__`, `__ne__`,\n `__hash__` and `_approx_eq_` so that values which are in the same\n equivalence class are treated as equal.\n\n Internally the `value` passed to `__init__` is normalized to the interval\n [0, `period`) and stored as that. Specialized version of `_approx_eq_` is\n provided to cover values which end up at the opposite edges of this\n interval.\n \"\"\"\n\n def __init__(self, value: Union[int, float], period: Union[int, float]):\n \"\"\"Initializes the equivalence class.\n\n Args:\n value: numerical value to wrap.\n period: periodicity of the numerical value.\n \"\"\"\n self.value = value % period\n self.period = period\n\n def __eq__(self, other: Any) -> bool:\n if not isinstance(other, type(self)):\n return NotImplemented\n return (self.value, self.period) == (other.value, other.period)\n\n def __ne__(self, other: Any) -> bool:\n return not self == other\n\n def __hash__(self) -> int:\n return hash((type(self), self.value, self.period))\n\n def _approx_eq_(self, other: Any, atol: float) -> bool:\n \"\"\"Implementation of `SupportsApproximateEquality` protocol.\"\"\"\n if not isinstance(other, type(self)):\n return NotImplemented\n\n #self.value = value % period in __init__() creates a Mod\n if isinstance(other.value, sympy.Mod):\n return self.value == other.value\n # Periods must be exactly equal to avoid drift of normalized value when\n # original value increases.\n if self.period != other.period:\n return False\n\n low = min(self.value, other.value)\n high = max(self.value, other.value)\n\n # Shift lower value outside of normalization interval in case low and\n # high values are at the opposite borders of normalization interval.\n if high - low > self.period / 2:\n low += self.period\n\n return cirq.protocols.approx_eq(low, high, atol=atol)\n\n def __repr__(self):\n return 'cirq.PeriodicValue({}, {})'.format(proper_repr(self.value),\n proper_repr(self.period))\n\n def _is_parameterized_(self):\n return any(isinstance(val, sympy.Basic)\n for val in (self.value, self.period))\n", "path": "cirq/value/periodic_value.py"}, {"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"An `XPowGate` conjugated by `ZPowGate`s.\"\"\"\nfrom typing import Union, Sequence, Tuple, Optional, cast\n\nimport math\nimport numpy as np\nimport sympy\n\nimport cirq\nfrom cirq import value, protocols\nfrom cirq._compat import proper_repr\nfrom cirq.ops import gate_features, raw_types, op_tree\nfrom cirq.type_workarounds import NotImplementedType\n\[email protected]_equality\nclass PhasedXPowGate(gate_features.SingleQubitGate):\n \"\"\"A gate equivalent to the circuit \u2500\u2500\u2500Z^-p\u2500\u2500\u2500X^t\u2500\u2500\u2500Z^p\u2500\u2500\u2500.\"\"\"\n\n def __new__(cls,\n *,\n phase_exponent: Union[float, sympy.Symbol],\n exponent: Union[float, sympy.Symbol] = 1.0,\n global_shift: float = 0.0):\n \"\"\"Substitutes a raw X or raw Y if possible.\n\n Args:\n phase_exponent: The exponent on the Z gates conjugating the X gate.\n exponent: The exponent on the X gate conjugated by Zs.\n global_shift: How much to shift the operation's eigenvalues at\n exponent=1.\n \"\"\"\n p = value.canonicalize_half_turns(phase_exponent)\n if p == 0:\n return cirq.ops.common_gates.XPowGate(\n exponent=exponent,\n global_shift=global_shift)\n if p == 0.5:\n return cirq.ops.common_gates.YPowGate(\n exponent=exponent,\n global_shift=global_shift)\n if p == 1 and not isinstance(exponent, sympy.Symbol):\n return cirq.ops.common_gates.XPowGate(\n exponent=-exponent,\n global_shift=global_shift)\n if p == -0.5 and not isinstance(exponent, sympy.Symbol):\n return cirq.ops.common_gates.YPowGate(\n exponent=-exponent,\n global_shift=global_shift)\n return super().__new__(cls)\n\n def __init__(self,\n *,\n phase_exponent: Union[float, sympy.Symbol],\n exponent: Union[float, sympy.Symbol] = 1.0,\n global_shift: float = 0.0) -> None:\n \"\"\"\n Args:\n phase_exponent: The exponent on the Z gates conjugating the X gate.\n exponent: The exponent on the X gate conjugated by Zs.\n global_shift: How much to shift the operation's eigenvalues at\n exponent=1.\n \"\"\"\n self._phase_exponent = value.canonicalize_half_turns(phase_exponent)\n self._exponent = exponent\n self._global_shift = global_shift\n\n def _qasm_(self,\n args: protocols.QasmArgs,\n qubits: Tuple[raw_types.Qid, ...]) -> Optional[str]:\n if cirq.is_parameterized(self):\n return None\n\n args.validate_version('2.0')\n\n e = cast(float, value.canonicalize_half_turns(self._exponent))\n p = cast(float, self.phase_exponent)\n epsilon = 10**-args.precision\n\n if abs(e + 0.5) <= epsilon:\n return args.format('u2({0:half_turns}, {1:half_turns}) {2};\\n',\n p + 0.5, -p - 0.5, qubits[0])\n\n if abs(e - 0.5) <= epsilon:\n return args.format('u2({0:half_turns}, {1:half_turns}) {2};\\n',\n p - 0.5, -p + 0.5, qubits[0])\n\n return args.format(\n 'u3({0:half_turns}, {1:half_turns}, {2:half_turns}) {3};\\n',\n -e, p + 0.5, -p - 0.5, qubits[0])\n\n def _decompose_(self, qubits: Sequence[raw_types.Qid]\n ) -> op_tree.OP_TREE:\n assert len(qubits) == 1\n q = qubits[0]\n z = cirq.Z(q)**self._phase_exponent\n x = cirq.X(q)**self._exponent\n if protocols.is_parameterized(z):\n return NotImplemented\n return z**-1, x, z\n\n @property\n def exponent(self) -> Union[float, sympy.Symbol]:\n \"\"\"The exponent on the central X gate conjugated by the Z gates.\"\"\"\n return self._exponent\n\n @property\n def phase_exponent(self) -> Union[float, sympy.Symbol]:\n \"\"\"The exponent on the Z gates conjugating the X gate.\"\"\"\n return self._phase_exponent\n\n def __pow__(self, exponent: Union[float, sympy.Symbol]) -> 'PhasedXPowGate':\n new_exponent = protocols.mul(self._exponent, exponent, NotImplemented)\n if new_exponent is NotImplemented:\n return NotImplemented\n return PhasedXPowGate(phase_exponent=self._phase_exponent,\n exponent=new_exponent,\n global_shift=self._global_shift)\n\n def _trace_distance_bound_(self):\n \"\"\"See `cirq.SupportsTraceDistanceBound`.\"\"\"\n return protocols.trace_distance_bound(cirq.X**self._exponent)\n\n def _unitary_(self) -> Union[np.ndarray, NotImplementedType]:\n \"\"\"See `cirq.SupportsUnitary`.\"\"\"\n if self._is_parameterized_():\n return NotImplemented\n z = protocols.unitary(cirq.Z**self._phase_exponent)\n x = protocols.unitary(cirq.X**self._exponent)\n p = np.exp(1j * np.pi * self._global_shift * self._exponent)\n return np.dot(np.dot(z, x), np.conj(z)) * p\n\n def _pauli_expansion_(self) -> value.LinearDict[str]:\n if self._is_parameterized_():\n return NotImplemented\n phase_angle = np.pi * self._phase_exponent / 2\n angle = np.pi * self._exponent / 2\n phase = 1j**(2 * self._exponent * (self._global_shift + 0.5))\n return value.LinearDict({\n 'I': phase * np.cos(angle),\n 'X': -1j * phase * np.sin(angle) * np.cos(2 * phase_angle),\n 'Y': -1j * phase * np.sin(angle) * np.sin(2 * phase_angle),\n })\n\n def _is_parameterized_(self) -> bool:\n \"\"\"See `cirq.SupportsParameterization`.\"\"\"\n return (isinstance(self._exponent, sympy.Symbol) or\n isinstance(self._phase_exponent, sympy.Symbol))\n\n def _resolve_parameters_(self, param_resolver) -> 'PhasedXPowGate':\n \"\"\"See `cirq.SupportsParameterization`.\"\"\"\n return PhasedXPowGate(\n phase_exponent=param_resolver.value_of(self._phase_exponent),\n exponent=param_resolver.value_of(self._exponent),\n global_shift=self._global_shift)\n\n def _phase_by_(self, phase_turns, qubit_index):\n \"\"\"See `cirq.SupportsPhase`.\"\"\"\n assert qubit_index == 0\n return PhasedXPowGate(\n exponent=self._exponent,\n phase_exponent=self._phase_exponent + phase_turns * 2,\n global_shift=self._global_shift)\n\n def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs\n ) -> protocols.CircuitDiagramInfo:\n \"\"\"See `cirq.SupportsCircuitDiagramInfo`.\"\"\"\n\n if (isinstance(self.phase_exponent, sympy.Symbol) or\n args.precision is None):\n s = 'PhasedX({})'.format(self.phase_exponent)\n else:\n s = 'PhasedX({{:.{}}})'.format(args.precision).format(\n self.phase_exponent)\n return protocols.CircuitDiagramInfo(\n wire_symbols=(s,),\n exponent=value.canonicalize_half_turns(self._exponent))\n\n def __str__(self):\n info = protocols.circuit_diagram_info(self)\n if info.exponent == 1:\n return info.wire_symbols[0]\n return '{}^{}'.format(info.wire_symbols[0], info.exponent)\n\n def __repr__(self):\n args = ['phase_exponent={}'.format(proper_repr(self.phase_exponent))]\n if self.exponent != 1:\n args.append('exponent={}'.format(proper_repr(self.exponent)))\n if self._global_shift != 0:\n args.append('global_shift={!r}'.format(self._global_shift))\n return 'cirq.PhasedXPowGate({})'.format(', '.join(args))\n\n def _period(self):\n exponents = [self._global_shift, 1 + self._global_shift]\n real_periods = [abs(2/e) for e in exponents if e != 0]\n int_periods = [int(np.round(e)) for e in real_periods]\n if any(i != r for i, r in zip(real_periods, int_periods)):\n return None\n if len(int_periods) == 1:\n return int_periods[0]\n return int_periods[0] * int_periods[1] / math.gcd(*int_periods)\n\n @property\n def _canonical_exponent(self):\n period = self._period()\n if not period or isinstance(self._exponent, sympy.Symbol):\n return self._exponent\n else:\n return self._exponent % period\n\n def _value_equality_values_(self):\n return self.phase_exponent, self._canonical_exponent, self._global_shift\n", "path": "cirq/ops/phased_x_gate.py"}], "after_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Any, Union\n\nimport sympy\n\nfrom cirq import protocols\nfrom cirq._compat import proper_repr\n\n\nclass PeriodicValue:\n \"\"\"Wrapper for periodic numerical values.\n\n Wrapper for periodic numerical types which implements `__eq__`, `__ne__`,\n `__hash__` and `_approx_eq_` so that values which are in the same\n equivalence class are treated as equal.\n\n Internally the `value` passed to `__init__` is normalized to the interval\n [0, `period`) and stored as that. Specialized version of `_approx_eq_` is\n provided to cover values which end up at the opposite edges of this\n interval.\n \"\"\"\n\n def __init__(self, value: Union[int, float], period: Union[int, float]):\n \"\"\"Initializes the equivalence class.\n\n Args:\n value: numerical value to wrap.\n period: periodicity of the numerical value.\n \"\"\"\n self.value = value % period\n self.period = period\n\n def __eq__(self, other: Any) -> bool:\n if not isinstance(other, type(self)):\n return NotImplemented\n return (self.value, self.period) == (other.value, other.period)\n\n def __ne__(self, other: Any) -> bool:\n return not self == other\n\n def __hash__(self) -> int:\n return hash((type(self), self.value, self.period))\n\n def _approx_eq_(self, other: Any, atol: float) -> bool:\n \"\"\"Implementation of `SupportsApproximateEquality` protocol.\"\"\"\n if not isinstance(other, type(self)):\n return NotImplemented\n\n #self.value = value % period in __init__() creates a Mod\n if isinstance(other.value, sympy.Mod):\n return self.value == other.value\n # Periods must be exactly equal to avoid drift of normalized value when\n # original value increases.\n if self.period != other.period:\n return False\n\n low = min(self.value, other.value)\n high = max(self.value, other.value)\n\n # Shift lower value outside of normalization interval in case low and\n # high values are at the opposite borders of normalization interval.\n if high - low > self.period / 2:\n low += self.period\n\n return protocols.approx_eq(low, high, atol=atol)\n\n def __repr__(self):\n return 'cirq.PeriodicValue({}, {})'.format(proper_repr(self.value),\n proper_repr(self.period))\n\n def _is_parameterized_(self):\n return any(\n protocols.is_parameterized(val)\n for val in (self.value, self.period))\n", "path": "cirq/value/periodic_value.py"}, {"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"An `XPowGate` conjugated by `ZPowGate`s.\"\"\"\nfrom typing import Union, Sequence, Tuple, Optional, cast\n\nimport math\nimport numpy as np\nimport sympy\n\nimport cirq\nfrom cirq import value, protocols\nfrom cirq._compat import proper_repr\nfrom cirq.ops import gate_features, raw_types, op_tree\nfrom cirq.type_workarounds import NotImplementedType\n\[email protected]_equality\nclass PhasedXPowGate(gate_features.SingleQubitGate):\n \"\"\"A gate equivalent to the circuit \u2500\u2500\u2500Z^-p\u2500\u2500\u2500X^t\u2500\u2500\u2500Z^p\u2500\u2500\u2500.\"\"\"\n\n def __new__(cls,\n *,\n phase_exponent: Union[float, sympy.Symbol],\n exponent: Union[float, sympy.Symbol] = 1.0,\n global_shift: float = 0.0):\n \"\"\"Substitutes a raw X or raw Y if possible.\n\n Args:\n phase_exponent: The exponent on the Z gates conjugating the X gate.\n exponent: The exponent on the X gate conjugated by Zs.\n global_shift: How much to shift the operation's eigenvalues at\n exponent=1.\n \"\"\"\n p = value.canonicalize_half_turns(phase_exponent)\n if p == 0:\n return cirq.ops.common_gates.XPowGate(\n exponent=exponent,\n global_shift=global_shift)\n if p == 0.5:\n return cirq.ops.common_gates.YPowGate(\n exponent=exponent,\n global_shift=global_shift)\n if p == 1 and not isinstance(exponent, sympy.Symbol):\n return cirq.ops.common_gates.XPowGate(\n exponent=-exponent,\n global_shift=global_shift)\n if p == -0.5 and not isinstance(exponent, sympy.Symbol):\n return cirq.ops.common_gates.YPowGate(\n exponent=-exponent,\n global_shift=global_shift)\n return super().__new__(cls)\n\n def __init__(self,\n *,\n phase_exponent: Union[float, sympy.Symbol],\n exponent: Union[float, sympy.Symbol] = 1.0,\n global_shift: float = 0.0) -> None:\n \"\"\"\n Args:\n phase_exponent: The exponent on the Z gates conjugating the X gate.\n exponent: The exponent on the X gate conjugated by Zs.\n global_shift: How much to shift the operation's eigenvalues at\n exponent=1.\n \"\"\"\n self._phase_exponent = value.canonicalize_half_turns(phase_exponent)\n self._exponent = exponent\n self._global_shift = global_shift\n\n def _qasm_(self,\n args: protocols.QasmArgs,\n qubits: Tuple[raw_types.Qid, ...]) -> Optional[str]:\n if cirq.is_parameterized(self):\n return None\n\n args.validate_version('2.0')\n\n e = cast(float, value.canonicalize_half_turns(self._exponent))\n p = cast(float, self.phase_exponent)\n epsilon = 10**-args.precision\n\n if abs(e + 0.5) <= epsilon:\n return args.format('u2({0:half_turns}, {1:half_turns}) {2};\\n',\n p + 0.5, -p - 0.5, qubits[0])\n\n if abs(e - 0.5) <= epsilon:\n return args.format('u2({0:half_turns}, {1:half_turns}) {2};\\n',\n p - 0.5, -p + 0.5, qubits[0])\n\n return args.format(\n 'u3({0:half_turns}, {1:half_turns}, {2:half_turns}) {3};\\n',\n -e, p + 0.5, -p - 0.5, qubits[0])\n\n def _decompose_(self, qubits: Sequence[raw_types.Qid]\n ) -> op_tree.OP_TREE:\n assert len(qubits) == 1\n q = qubits[0]\n z = cirq.Z(q)**self._phase_exponent\n x = cirq.X(q)**self._exponent\n if protocols.is_parameterized(z):\n return NotImplemented\n return z**-1, x, z\n\n @property\n def exponent(self) -> Union[float, sympy.Symbol]:\n \"\"\"The exponent on the central X gate conjugated by the Z gates.\"\"\"\n return self._exponent\n\n @property\n def phase_exponent(self) -> Union[float, sympy.Symbol]:\n \"\"\"The exponent on the Z gates conjugating the X gate.\"\"\"\n return self._phase_exponent\n\n def __pow__(self, exponent: Union[float, sympy.Symbol]) -> 'PhasedXPowGate':\n new_exponent = protocols.mul(self._exponent, exponent, NotImplemented)\n if new_exponent is NotImplemented:\n return NotImplemented\n return PhasedXPowGate(phase_exponent=self._phase_exponent,\n exponent=new_exponent,\n global_shift=self._global_shift)\n\n def _trace_distance_bound_(self):\n \"\"\"See `cirq.SupportsTraceDistanceBound`.\"\"\"\n return protocols.trace_distance_bound(cirq.X**self._exponent)\n\n def _unitary_(self) -> Union[np.ndarray, NotImplementedType]:\n \"\"\"See `cirq.SupportsUnitary`.\"\"\"\n if self._is_parameterized_():\n return NotImplemented\n z = protocols.unitary(cirq.Z**self._phase_exponent)\n x = protocols.unitary(cirq.X**self._exponent)\n p = np.exp(1j * np.pi * self._global_shift * self._exponent)\n return np.dot(np.dot(z, x), np.conj(z)) * p\n\n def _pauli_expansion_(self) -> value.LinearDict[str]:\n if self._is_parameterized_():\n return NotImplemented\n phase_angle = np.pi * self._phase_exponent / 2\n angle = np.pi * self._exponent / 2\n phase = 1j**(2 * self._exponent * (self._global_shift + 0.5))\n return value.LinearDict({\n 'I': phase * np.cos(angle),\n 'X': -1j * phase * np.sin(angle) * np.cos(2 * phase_angle),\n 'Y': -1j * phase * np.sin(angle) * np.sin(2 * phase_angle),\n })\n\n def _is_parameterized_(self) -> bool:\n \"\"\"See `cirq.SupportsParameterization`.\"\"\"\n return (protocols.is_parameterized(self._exponent) or\n protocols.is_parameterized(self._phase_exponent))\n\n def _resolve_parameters_(self, param_resolver) -> 'PhasedXPowGate':\n \"\"\"See `cirq.SupportsParameterization`.\"\"\"\n return PhasedXPowGate(\n phase_exponent=param_resolver.value_of(self._phase_exponent),\n exponent=param_resolver.value_of(self._exponent),\n global_shift=self._global_shift)\n\n def _phase_by_(self, phase_turns, qubit_index):\n \"\"\"See `cirq.SupportsPhase`.\"\"\"\n assert qubit_index == 0\n return PhasedXPowGate(\n exponent=self._exponent,\n phase_exponent=self._phase_exponent + phase_turns * 2,\n global_shift=self._global_shift)\n\n def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs\n ) -> protocols.CircuitDiagramInfo:\n \"\"\"See `cirq.SupportsCircuitDiagramInfo`.\"\"\"\n\n if (isinstance(self.phase_exponent, sympy.Symbol) or\n args.precision is None):\n s = 'PhasedX({})'.format(self.phase_exponent)\n else:\n s = 'PhasedX({{:.{}}})'.format(args.precision).format(\n self.phase_exponent)\n return protocols.CircuitDiagramInfo(\n wire_symbols=(s,),\n exponent=value.canonicalize_half_turns(self._exponent))\n\n def __str__(self):\n info = protocols.circuit_diagram_info(self)\n if info.exponent == 1:\n return info.wire_symbols[0]\n return '{}^{}'.format(info.wire_symbols[0], info.exponent)\n\n def __repr__(self):\n args = ['phase_exponent={}'.format(proper_repr(self.phase_exponent))]\n if self.exponent != 1:\n args.append('exponent={}'.format(proper_repr(self.exponent)))\n if self._global_shift != 0:\n args.append('global_shift={!r}'.format(self._global_shift))\n return 'cirq.PhasedXPowGate({})'.format(', '.join(args))\n\n def _period(self):\n exponents = [self._global_shift, 1 + self._global_shift]\n real_periods = [abs(2/e) for e in exponents if e != 0]\n int_periods = [int(np.round(e)) for e in real_periods]\n if any(i != r for i, r in zip(real_periods, int_periods)):\n return None\n if len(int_periods) == 1:\n return int_periods[0]\n return int_periods[0] * int_periods[1] / math.gcd(*int_periods)\n\n @property\n def _canonical_exponent(self):\n period = self._period()\n if not period or isinstance(self._exponent, sympy.Symbol):\n return self._exponent\n else:\n return self._exponent % period\n\n def _value_equality_values_(self):\n return self.phase_exponent, self._canonical_exponent, self._global_shift\n", "path": "cirq/ops/phased_x_gate.py"}]} | 4,088 | 423 |
gh_patches_debug_22285 | rasdani/github-patches | git_diff | cocotb__cocotb-1410 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Logging non-string messages leads to AttributeError
Test code:
```
dut._log.info(dut.empty.value)
# dut.empty is a signal in the DUT, .value gets its BinaryValue object
```
This worked fine in older versions of cocotb, haven't attempted to find exact regression range yet. Now I get this trace:
```
Traceback (most recent call last):
File "/usr/lib64/python3.7/logging/__init__.py", line 1034, in emit
msg = self.format(record)
File "/usr/lib64/python3.7/logging/__init__.py", line 880, in format
return fmt.format(record)
File "/home/philipp/.local/lib/python3.7/site-packages/cocotb/log.py", line 201, in format
msg = '\n'.join([SimColourLogFormatter.loglevel2colour[record.levelno] % line for line in msg.split('\n')])
AttributeError: 'BinaryValue' object has no attribute 'split'
Call stack:
File "/home/philipp/.local/lib/python3.7/site-packages/cocotb/scheduler.py", line 355, in react
self._event_loop(trigger)
File "/home/philipp/.local/lib/python3.7/site-packages/cocotb/scheduler.py", line 448, in _event_loop
self.schedule(coro, trigger=trigger)
File "/home/philipp/.local/lib/python3.7/site-packages/cocotb/scheduler.py", line 758, in schedule
result = coroutine._advance(send_outcome)
File "/home/philipp/.local/lib/python3.7/site-packages/cocotb/decorators.py", line 264, in _advance
return super(RunningTest, self)._advance(outcome)
File "/home/philipp/.local/lib/python3.7/site-packages/cocotb/decorators.py", line 146, in _advance
return outcome.send(self._coro)
File "/home/philipp/.local/lib/python3.7/site-packages/cocotb/outcomes.py", line 38, in send
return gen.send(self.value)
File "path/to/test_fifo.py", line 45, in test_fifo_manual
dut._log.info(dut.empty.value)
```
I'll try to come up with a test and a fix soon-ish.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cocotb/log.py`
Content:
```
1 # Copyright (c) 2013, 2018 Potential Ventures Ltd
2 # Copyright (c) 2013 SolarFlare Communications Inc
3 # All rights reserved.
4 #
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are met:
7 # * Redistributions of source code must retain the above copyright
8 # notice, this list of conditions and the following disclaimer.
9 # * Redistributions in binary form must reproduce the above copyright
10 # notice, this list of conditions and the following disclaimer in the
11 # documentation and/or other materials provided with the distribution.
12 # * Neither the name of Potential Ventures Ltd,
13 # SolarFlare Communications Inc nor the
14 # names of its contributors may be used to endorse or promote products
15 # derived from this software without specific prior written permission.
16 #
17 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
18 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 # DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
21 # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 """
29 Everything related to logging
30 """
31
32 import os
33 import sys
34 import logging
35 import warnings
36
37 from cocotb.utils import get_sim_time, want_color_output
38
39 import cocotb.ANSI as ANSI
40
41 if "COCOTB_REDUCED_LOG_FMT" in os.environ:
42 _suppress = True
43 else:
44 _suppress = False
45
46 # Column alignment
47 _LEVEL_CHARS = len("CRITICAL") # noqa
48 _RECORD_CHARS = 35 # noqa
49 _FILENAME_CHARS = 20 # noqa
50 _LINENO_CHARS = 4 # noqa
51 _FUNCNAME_CHARS = 31 # noqa
52
53
54 def default_config():
55 """ Apply the default cocotb log formatting to the root logger.
56
57 This hooks up the logger to write to stdout, using either
58 :class:`SimColourLogFormatter` or :class:`SimLogFormatter` depending
59 on whether colored output is requested.
60
61 The logging level for cocotb logs is set based on the
62 :envvar:`COCOTB_LOG_LEVEL` environment variable, which defaults to ``INFO``.
63
64 If desired, this logging configuration can be overwritten by calling
65 ``logging.basicConfig(..., force=True)`` (in Python 3.8 onwards), or by
66 manually resetting the root logger instance, for which examples can be
67 found online.
68 """
69 # construct an appropriate handler
70 hdlr = logging.StreamHandler(sys.stdout)
71 if want_color_output():
72 hdlr.setFormatter(SimColourLogFormatter())
73 else:
74 hdlr.setFormatter(SimLogFormatter())
75
76 logging.setLoggerClass(SimBaseLog) # For backwards compatibility
77 logging.basicConfig()
78 logging.getLogger().handlers = [hdlr] # overwrite default handlers
79
80 # apply level settings for cocotb
81 log = logging.getLogger('cocotb')
82 level = os.getenv("COCOTB_LOG_LEVEL", "INFO")
83 try:
84 _default_log = getattr(logging, level)
85 except AttributeError:
86 log.error("Unable to set logging level to %r" % level)
87 _default_log = logging.INFO
88 log.setLevel(_default_log)
89
90 # Notify GPI of log level, which it uses as an optimization to avoid
91 # calling into Python.
92 if "COCOTB_SIM" in os.environ:
93 import simulator
94 simulator.log_level(_default_log)
95
96
97 class SimBaseLog(logging.getLoggerClass()):
98 """ This class only exists for backwards compatibility """
99
100 @property
101 def logger(self):
102 warnings.warn(
103 "the .logger attribute should not be used now that `SimLog` "
104 "returns a native logger instance directly.",
105 DeprecationWarning, stacklevel=2)
106 return self
107
108 @property
109 def colour(self):
110 warnings.warn(
111 "the .colour attribute may be removed in future, use the "
112 "equivalent `cocotb.utils.want_color_output()` instead",
113 DeprecationWarning, stacklevel=2)
114 return want_color_output()
115
116
117 # this used to be a class, hence the unusual capitalization
118 def SimLog(name, ident=None):
119 """ Like logging.getLogger, but append a numeric identifier to the name """
120 if ident is not None:
121 name = "%s.0x%x" % (name, ident)
122 return logging.getLogger(name)
123
124
125 class SimLogFormatter(logging.Formatter):
126 """Log formatter to provide consistent log message handling."""
127
128 # Removes the arguments from the base class. Docstring needed to make
129 # sphinx happy.
130 def __init__(self):
131 """ Takes no arguments. """
132 super().__init__()
133
134 # Justify and truncate
135 @staticmethod
136 def ljust(string, chars):
137 if len(string) > chars:
138 return ".." + string[(chars - 2) * -1:]
139 return string.ljust(chars)
140
141 @staticmethod
142 def rjust(string, chars):
143 if len(string) > chars:
144 return ".." + string[(chars - 2) * -1:]
145 return string.rjust(chars)
146
147 def _format(self, level, record, msg, coloured=False):
148 time_ns = get_sim_time('ns')
149 simtime = "%6.2fns" % (time_ns)
150 prefix = simtime.rjust(11) + ' ' + level + ' '
151 if not _suppress:
152 prefix += self.ljust(record.name, _RECORD_CHARS) + \
153 self.rjust(os.path.split(record.filename)[1], _FILENAME_CHARS) + \
154 ':' + self.ljust(str(record.lineno), _LINENO_CHARS) + \
155 ' in ' + self.ljust(str(record.funcName), _FUNCNAME_CHARS) + ' '
156
157 # these lines are copied from the builtin logger
158 if record.exc_info:
159 # Cache the traceback text to avoid converting it multiple times
160 # (it's constant anyway)
161 if not record.exc_text:
162 record.exc_text = self.formatException(record.exc_info)
163 if record.exc_text:
164 if msg[-1:] != "\n":
165 msg = msg + "\n"
166 msg = msg + record.exc_text
167
168 prefix_len = len(prefix)
169 if coloured:
170 prefix_len -= (len(level) - _LEVEL_CHARS)
171 pad = "\n" + " " * (prefix_len)
172 return prefix + pad.join(msg.split('\n'))
173
174 def format(self, record):
175 """Prettify the log output, annotate with simulation time"""
176 if record.args:
177 msg = record.msg % record.args
178 else:
179 msg = record.msg
180
181 msg = str(msg)
182 level = record.levelname.ljust(_LEVEL_CHARS)
183
184 return self._format(level, record, msg)
185
186
187 class SimColourLogFormatter(SimLogFormatter):
188 """Log formatter to provide consistent log message handling."""
189
190 loglevel2colour = {
191 logging.DEBUG : "%s",
192 logging.INFO : ANSI.COLOR_INFO + "%s" + ANSI.COLOR_DEFAULT,
193 logging.WARNING : ANSI.COLOR_WARNING + "%s" + ANSI.COLOR_DEFAULT,
194 logging.ERROR : ANSI.COLOR_ERROR + "%s" + ANSI.COLOR_DEFAULT,
195 logging.CRITICAL: ANSI.COLOR_CRITICAL + "%s" + ANSI.COLOR_DEFAULT,
196 }
197
198 def format(self, record):
199 """Prettify the log output, annotate with simulation time"""
200
201 if record.args:
202 msg = record.msg % record.args
203 else:
204 msg = record.msg
205
206 # Need to colour each line in case coloring is applied in the message
207 msg = '\n'.join([SimColourLogFormatter.loglevel2colour[record.levelno] % line for line in msg.split('\n')])
208 level = (SimColourLogFormatter.loglevel2colour[record.levelno] %
209 record.levelname.ljust(_LEVEL_CHARS))
210
211 return self._format(level, record, msg, coloured=True)
212
213
214 def _filter_from_c(logger_name, level):
215 return logging.getLogger(logger_name).isEnabledFor(level)
216
217
218 def _log_from_c(logger_name, level, filename, lineno, msg, function_name):
219 """
220 This is for use from the C world, and allows us to insert C stack
221 information.
222 """
223 logger = logging.getLogger(logger_name)
224 if logger.isEnabledFor(level):
225 record = logger.makeRecord(
226 logger.name,
227 level,
228 filename,
229 lineno,
230 msg,
231 None,
232 None,
233 function_name
234 )
235 logger.handle(record)
236
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cocotb/log.py b/cocotb/log.py
--- a/cocotb/log.py
+++ b/cocotb/log.py
@@ -167,12 +167,8 @@
def format(self, record):
"""Prettify the log output, annotate with simulation time"""
- if record.args:
- msg = record.msg % record.args
- else:
- msg = record.msg
- msg = str(msg)
+ msg = record.getMessage()
level = record.levelname.ljust(_LEVEL_CHARS)
return self._format(level, record, msg)
@@ -192,10 +188,7 @@
def format(self, record):
"""Prettify the log output, annotate with simulation time"""
- if record.args:
- msg = record.msg % record.args
- else:
- msg = record.msg
+ msg = record.getMessage()
# Need to colour each line in case coloring is applied in the message
msg = '\n'.join([SimColourLogFormatter.loglevel2colour[record.levelno] % line for line in msg.split('\n')])
| {"golden_diff": "diff --git a/cocotb/log.py b/cocotb/log.py\n--- a/cocotb/log.py\n+++ b/cocotb/log.py\n@@ -167,12 +167,8 @@\n \n def format(self, record):\n \"\"\"Prettify the log output, annotate with simulation time\"\"\"\n- if record.args:\n- msg = record.msg % record.args\n- else:\n- msg = record.msg\n \n- msg = str(msg)\n+ msg = record.getMessage()\n level = record.levelname.ljust(_LEVEL_CHARS)\n \n return self._format(level, record, msg)\n@@ -192,10 +188,7 @@\n def format(self, record):\n \"\"\"Prettify the log output, annotate with simulation time\"\"\"\n \n- if record.args:\n- msg = record.msg % record.args\n- else:\n- msg = record.msg\n+ msg = record.getMessage()\n \n # Need to colour each line in case coloring is applied in the message\n msg = '\\n'.join([SimColourLogFormatter.loglevel2colour[record.levelno] % line for line in msg.split('\\n')])\n", "issue": "Logging non-string messages leads to AttributeError\nTest code:\r\n\r\n```\r\ndut._log.info(dut.empty.value)\r\n# dut.empty is a signal in the DUT, .value gets its BinaryValue object\r\n```\r\n\r\nThis worked fine in older versions of cocotb, haven't attempted to find exact regression range yet. Now I get this trace:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/lib64/python3.7/logging/__init__.py\", line 1034, in emit\r\n msg = self.format(record)\r\n File \"/usr/lib64/python3.7/logging/__init__.py\", line 880, in format\r\n return fmt.format(record)\r\n File \"/home/philipp/.local/lib/python3.7/site-packages/cocotb/log.py\", line 201, in format\r\n msg = '\\n'.join([SimColourLogFormatter.loglevel2colour[record.levelno] % line for line in msg.split('\\n')])\r\nAttributeError: 'BinaryValue' object has no attribute 'split'\r\nCall stack:\r\n File \"/home/philipp/.local/lib/python3.7/site-packages/cocotb/scheduler.py\", line 355, in react\r\n self._event_loop(trigger)\r\n File \"/home/philipp/.local/lib/python3.7/site-packages/cocotb/scheduler.py\", line 448, in _event_loop\r\n self.schedule(coro, trigger=trigger)\r\n File \"/home/philipp/.local/lib/python3.7/site-packages/cocotb/scheduler.py\", line 758, in schedule\r\n result = coroutine._advance(send_outcome)\r\n File \"/home/philipp/.local/lib/python3.7/site-packages/cocotb/decorators.py\", line 264, in _advance\r\n return super(RunningTest, self)._advance(outcome)\r\n File \"/home/philipp/.local/lib/python3.7/site-packages/cocotb/decorators.py\", line 146, in _advance\r\n return outcome.send(self._coro)\r\n File \"/home/philipp/.local/lib/python3.7/site-packages/cocotb/outcomes.py\", line 38, in send\r\n return gen.send(self.value)\r\n File \"path/to/test_fifo.py\", line 45, in test_fifo_manual\r\n dut._log.info(dut.empty.value)\r\n```\r\n\r\nI'll try to come up with a test and a fix soon-ish.\n", "before_files": [{"content": "# Copyright (c) 2013, 2018 Potential Ventures Ltd\n# Copyright (c) 2013 SolarFlare Communications Inc\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd,\n# SolarFlare Communications Inc nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\nEverything related to logging\n\"\"\"\n\nimport os\nimport sys\nimport logging\nimport warnings\n\nfrom cocotb.utils import get_sim_time, want_color_output\n\nimport cocotb.ANSI as ANSI\n\nif \"COCOTB_REDUCED_LOG_FMT\" in os.environ:\n _suppress = True\nelse:\n _suppress = False\n\n# Column alignment\n_LEVEL_CHARS = len(\"CRITICAL\") # noqa\n_RECORD_CHARS = 35 # noqa\n_FILENAME_CHARS = 20 # noqa\n_LINENO_CHARS = 4 # noqa\n_FUNCNAME_CHARS = 31 # noqa\n\n\ndef default_config():\n \"\"\" Apply the default cocotb log formatting to the root logger.\n\n This hooks up the logger to write to stdout, using either\n :class:`SimColourLogFormatter` or :class:`SimLogFormatter` depending\n on whether colored output is requested.\n\n The logging level for cocotb logs is set based on the\n :envvar:`COCOTB_LOG_LEVEL` environment variable, which defaults to ``INFO``.\n\n If desired, this logging configuration can be overwritten by calling\n ``logging.basicConfig(..., force=True)`` (in Python 3.8 onwards), or by\n manually resetting the root logger instance, for which examples can be\n found online.\n \"\"\"\n # construct an appropriate handler\n hdlr = logging.StreamHandler(sys.stdout)\n if want_color_output():\n hdlr.setFormatter(SimColourLogFormatter())\n else:\n hdlr.setFormatter(SimLogFormatter())\n\n logging.setLoggerClass(SimBaseLog) # For backwards compatibility\n logging.basicConfig()\n logging.getLogger().handlers = [hdlr] # overwrite default handlers\n\n # apply level settings for cocotb\n log = logging.getLogger('cocotb')\n level = os.getenv(\"COCOTB_LOG_LEVEL\", \"INFO\")\n try:\n _default_log = getattr(logging, level)\n except AttributeError:\n log.error(\"Unable to set logging level to %r\" % level)\n _default_log = logging.INFO\n log.setLevel(_default_log)\n\n # Notify GPI of log level, which it uses as an optimization to avoid\n # calling into Python.\n if \"COCOTB_SIM\" in os.environ:\n import simulator\n simulator.log_level(_default_log)\n\n\nclass SimBaseLog(logging.getLoggerClass()):\n \"\"\" This class only exists for backwards compatibility \"\"\"\n\n @property\n def logger(self):\n warnings.warn(\n \"the .logger attribute should not be used now that `SimLog` \"\n \"returns a native logger instance directly.\",\n DeprecationWarning, stacklevel=2)\n return self\n\n @property\n def colour(self):\n warnings.warn(\n \"the .colour attribute may be removed in future, use the \"\n \"equivalent `cocotb.utils.want_color_output()` instead\",\n DeprecationWarning, stacklevel=2)\n return want_color_output()\n\n\n# this used to be a class, hence the unusual capitalization\ndef SimLog(name, ident=None):\n \"\"\" Like logging.getLogger, but append a numeric identifier to the name \"\"\"\n if ident is not None:\n name = \"%s.0x%x\" % (name, ident)\n return logging.getLogger(name)\n\n\nclass SimLogFormatter(logging.Formatter):\n \"\"\"Log formatter to provide consistent log message handling.\"\"\"\n\n # Removes the arguments from the base class. Docstring needed to make\n # sphinx happy.\n def __init__(self):\n \"\"\" Takes no arguments. \"\"\"\n super().__init__()\n\n # Justify and truncate\n @staticmethod\n def ljust(string, chars):\n if len(string) > chars:\n return \"..\" + string[(chars - 2) * -1:]\n return string.ljust(chars)\n\n @staticmethod\n def rjust(string, chars):\n if len(string) > chars:\n return \"..\" + string[(chars - 2) * -1:]\n return string.rjust(chars)\n\n def _format(self, level, record, msg, coloured=False):\n time_ns = get_sim_time('ns')\n simtime = \"%6.2fns\" % (time_ns)\n prefix = simtime.rjust(11) + ' ' + level + ' '\n if not _suppress:\n prefix += self.ljust(record.name, _RECORD_CHARS) + \\\n self.rjust(os.path.split(record.filename)[1], _FILENAME_CHARS) + \\\n ':' + self.ljust(str(record.lineno), _LINENO_CHARS) + \\\n ' in ' + self.ljust(str(record.funcName), _FUNCNAME_CHARS) + ' '\n\n # these lines are copied from the builtin logger\n if record.exc_info:\n # Cache the traceback text to avoid converting it multiple times\n # (it's constant anyway)\n if not record.exc_text:\n record.exc_text = self.formatException(record.exc_info)\n if record.exc_text:\n if msg[-1:] != \"\\n\":\n msg = msg + \"\\n\"\n msg = msg + record.exc_text\n\n prefix_len = len(prefix)\n if coloured:\n prefix_len -= (len(level) - _LEVEL_CHARS)\n pad = \"\\n\" + \" \" * (prefix_len)\n return prefix + pad.join(msg.split('\\n'))\n\n def format(self, record):\n \"\"\"Prettify the log output, annotate with simulation time\"\"\"\n if record.args:\n msg = record.msg % record.args\n else:\n msg = record.msg\n\n msg = str(msg)\n level = record.levelname.ljust(_LEVEL_CHARS)\n\n return self._format(level, record, msg)\n\n\nclass SimColourLogFormatter(SimLogFormatter):\n \"\"\"Log formatter to provide consistent log message handling.\"\"\"\n\n loglevel2colour = {\n logging.DEBUG : \"%s\",\n logging.INFO : ANSI.COLOR_INFO + \"%s\" + ANSI.COLOR_DEFAULT,\n logging.WARNING : ANSI.COLOR_WARNING + \"%s\" + ANSI.COLOR_DEFAULT,\n logging.ERROR : ANSI.COLOR_ERROR + \"%s\" + ANSI.COLOR_DEFAULT,\n logging.CRITICAL: ANSI.COLOR_CRITICAL + \"%s\" + ANSI.COLOR_DEFAULT,\n }\n\n def format(self, record):\n \"\"\"Prettify the log output, annotate with simulation time\"\"\"\n\n if record.args:\n msg = record.msg % record.args\n else:\n msg = record.msg\n\n # Need to colour each line in case coloring is applied in the message\n msg = '\\n'.join([SimColourLogFormatter.loglevel2colour[record.levelno] % line for line in msg.split('\\n')])\n level = (SimColourLogFormatter.loglevel2colour[record.levelno] %\n record.levelname.ljust(_LEVEL_CHARS))\n\n return self._format(level, record, msg, coloured=True)\n\n\ndef _filter_from_c(logger_name, level):\n return logging.getLogger(logger_name).isEnabledFor(level)\n\n\ndef _log_from_c(logger_name, level, filename, lineno, msg, function_name):\n \"\"\"\n This is for use from the C world, and allows us to insert C stack\n information.\n \"\"\"\n logger = logging.getLogger(logger_name)\n if logger.isEnabledFor(level):\n record = logger.makeRecord(\n logger.name,\n level,\n filename,\n lineno,\n msg,\n None,\n None,\n function_name\n )\n logger.handle(record)\n", "path": "cocotb/log.py"}], "after_files": [{"content": "# Copyright (c) 2013, 2018 Potential Ventures Ltd\n# Copyright (c) 2013 SolarFlare Communications Inc\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd,\n# SolarFlare Communications Inc nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\nEverything related to logging\n\"\"\"\n\nimport os\nimport sys\nimport logging\nimport warnings\n\nfrom cocotb.utils import get_sim_time, want_color_output\n\nimport cocotb.ANSI as ANSI\n\nif \"COCOTB_REDUCED_LOG_FMT\" in os.environ:\n _suppress = True\nelse:\n _suppress = False\n\n# Column alignment\n_LEVEL_CHARS = len(\"CRITICAL\") # noqa\n_RECORD_CHARS = 35 # noqa\n_FILENAME_CHARS = 20 # noqa\n_LINENO_CHARS = 4 # noqa\n_FUNCNAME_CHARS = 31 # noqa\n\n\ndef default_config():\n \"\"\" Apply the default cocotb log formatting to the root logger.\n\n This hooks up the logger to write to stdout, using either\n :class:`SimColourLogFormatter` or :class:`SimLogFormatter` depending\n on whether colored output is requested.\n\n The logging level for cocotb logs is set based on the\n :envvar:`COCOTB_LOG_LEVEL` environment variable, which defaults to ``INFO``.\n\n If desired, this logging configuration can be overwritten by calling\n ``logging.basicConfig(..., force=True)`` (in Python 3.8 onwards), or by\n manually resetting the root logger instance, for which examples can be\n found online.\n \"\"\"\n # construct an appropriate handler\n hdlr = logging.StreamHandler(sys.stdout)\n if want_color_output():\n hdlr.setFormatter(SimColourLogFormatter())\n else:\n hdlr.setFormatter(SimLogFormatter())\n\n logging.setLoggerClass(SimBaseLog) # For backwards compatibility\n logging.basicConfig()\n logging.getLogger().handlers = [hdlr] # overwrite default handlers\n\n # apply level settings for cocotb\n log = logging.getLogger('cocotb')\n level = os.getenv(\"COCOTB_LOG_LEVEL\", \"INFO\")\n try:\n _default_log = getattr(logging, level)\n except AttributeError:\n log.error(\"Unable to set logging level to %r\" % level)\n _default_log = logging.INFO\n log.setLevel(_default_log)\n\n # Notify GPI of log level, which it uses as an optimization to avoid\n # calling into Python.\n if \"COCOTB_SIM\" in os.environ:\n import simulator\n simulator.log_level(_default_log)\n\n\nclass SimBaseLog(logging.getLoggerClass()):\n \"\"\" This class only exists for backwards compatibility \"\"\"\n\n @property\n def logger(self):\n warnings.warn(\n \"the .logger attribute should not be used now that `SimLog` \"\n \"returns a native logger instance directly.\",\n DeprecationWarning, stacklevel=2)\n return self\n\n @property\n def colour(self):\n warnings.warn(\n \"the .colour attribute may be removed in future, use the \"\n \"equivalent `cocotb.utils.want_color_output()` instead\",\n DeprecationWarning, stacklevel=2)\n return want_color_output()\n\n\n# this used to be a class, hence the unusual capitalization\ndef SimLog(name, ident=None):\n \"\"\" Like logging.getLogger, but append a numeric identifier to the name \"\"\"\n if ident is not None:\n name = \"%s.0x%x\" % (name, ident)\n return logging.getLogger(name)\n\n\nclass SimLogFormatter(logging.Formatter):\n \"\"\"Log formatter to provide consistent log message handling.\"\"\"\n\n # Justify and truncate\n @staticmethod\n def ljust(string, chars):\n if len(string) > chars:\n return \"..\" + string[(chars - 2) * -1:]\n return string.ljust(chars)\n\n @staticmethod\n def rjust(string, chars):\n if len(string) > chars:\n return \"..\" + string[(chars - 2) * -1:]\n return string.rjust(chars)\n\n def _format(self, level, record, msg, coloured=False):\n time_ns = get_sim_time('ns')\n simtime = \"%6.2fns\" % (time_ns)\n prefix = simtime.rjust(11) + ' ' + level + ' '\n if not _suppress:\n prefix += self.ljust(record.name, _RECORD_CHARS) + \\\n self.rjust(os.path.split(record.filename)[1], _FILENAME_CHARS) + \\\n ':' + self.ljust(str(record.lineno), _LINENO_CHARS) + \\\n ' in ' + self.ljust(str(record.funcName), _FUNCNAME_CHARS) + ' '\n\n # these lines are copied from the builtin logger\n if record.exc_info:\n # Cache the traceback text to avoid converting it multiple times\n # (it's constant anyway)\n if not record.exc_text:\n record.exc_text = self.formatException(record.exc_info)\n if record.exc_text:\n if msg[-1:] != \"\\n\":\n msg = msg + \"\\n\"\n msg = msg + record.exc_text\n\n prefix_len = len(prefix)\n if coloured:\n prefix_len -= (len(level) - _LEVEL_CHARS)\n pad = \"\\n\" + \" \" * (prefix_len)\n return prefix + pad.join(msg.split('\\n'))\n\n def format(self, record):\n \"\"\"Prettify the log output, annotate with simulation time\"\"\"\n\n msg = record.getMessage()\n level = record.levelname.ljust(_LEVEL_CHARS)\n\n return self._format(level, record, msg)\n\n\nclass SimColourLogFormatter(SimLogFormatter):\n \"\"\"Log formatter to provide consistent log message handling.\"\"\"\n\n loglevel2colour = {\n logging.DEBUG : \"%s\",\n logging.INFO : ANSI.COLOR_INFO + \"%s\" + ANSI.COLOR_DEFAULT,\n logging.WARNING : ANSI.COLOR_WARNING + \"%s\" + ANSI.COLOR_DEFAULT,\n logging.ERROR : ANSI.COLOR_ERROR + \"%s\" + ANSI.COLOR_DEFAULT,\n logging.CRITICAL: ANSI.COLOR_CRITICAL + \"%s\" + ANSI.COLOR_DEFAULT,\n }\n\n def format(self, record):\n \"\"\"Prettify the log output, annotate with simulation time\"\"\"\n\n msg = record.getMessage()\n\n # Need to colour each line in case coloring is applied in the message\n msg = '\\n'.join([SimColourLogFormatter.loglevel2colour[record.levelno] % line for line in msg.split('\\n')])\n level = (SimColourLogFormatter.loglevel2colour[record.levelno] %\n record.levelname.ljust(_LEVEL_CHARS))\n\n return self._format(level, record, msg, coloured=True)\n\n\ndef _filter_from_c(logger_name, level):\n return logging.getLogger(logger_name).isEnabledFor(level)\n\n\ndef _log_from_c(logger_name, level, filename, lineno, msg, function_name):\n \"\"\"\n This is for use from the C world, and allows us to insert C stack\n information.\n \"\"\"\n logger = logging.getLogger(logger_name)\n if logger.isEnabledFor(level):\n record = logger.makeRecord(\n logger.name,\n level,\n filename,\n lineno,\n msg,\n None,\n None,\n function_name\n )\n logger.handle(record)\n", "path": "cocotb/log.py"}]} | 3,352 | 257 |
gh_patches_debug_7100 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-1665 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug]: Dudley Council, UK - Not Loading
### I Have A Problem With:
A specific source
### What's Your Problem
Now the Xmas alternative dates have been removed from the council website it's no longer loading my bin dates into my calendar and showing the below error messages in the HA log file
### Source (if relevant)
dudley_gov_uk
### Logs
```Shell
2024-01-10 01:19:15.591 ERROR (SyncWorker_4) [waste_collection_schedule.source_shell] fetch failed for source Dudley Metropolitan Borough Council:
Traceback (most recent call last):
File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py", line 134, in fetch
entries = self._source.fetch()
^^^^^^^^^^^^^^^^^^^^
File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/dudley_gov_uk.py", line 107, in fetch
xmas_map = self.get_xmas_map(footer_panel)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/dudley_gov_uk.py", line 71, in get_xmas_map
footer_panel.find("table").find("tr"),
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
AttributeError: 'NoneType' object has no attribute 'find'
```
### Relevant Configuration
_No response_
### Checklist Source Error
- [x] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)
- [X] Checked that the website of your service provider is still working
- [x] Tested my attributes on the service provider website (if possible)
- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on "Redownload" and choose master as version)
### Checklist Sensor Error
- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)
### Required
- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.
- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `custom_components/waste_collection_schedule/waste_collection_schedule/source/dudley_gov_uk.py`
Content:
```
1 import re
2 from datetime import date, datetime, timedelta
3
4 import requests
5 from bs4 import BeautifulSoup
6 from waste_collection_schedule import Collection # type: ignore[attr-defined]
7
8 TITLE = "Dudley Metropolitan Borough Council"
9 DESCRIPTION = "Source for Dudley Metropolitan Borough Council, UK."
10 URL = "https://dudley.gov.uk"
11 TEST_CASES = {
12 "Test_001": {"uprn": "90090715"},
13 "Test_002": {"uprn": 90104555},
14 "Test_003": {"uprn": "90164803"},
15 "Test_004": {"uprn": 90092621},
16 }
17 ICON_MAP = {"RECYCLING": "mdi:recycle", "GARDEN": "mdi:leaf", "REFUSE": "mdi:trash-can"}
18 REGEX = {
19 "DATES": r"(\d+ \w{3})",
20 "DAYS": r"every: (Monday|Tuesday|Wednesday|Thursday|Friday)",
21 }
22 DAYS = {
23 "Monday": 0,
24 "Tuesday": 1,
25 "Wednesday": 2,
26 "Thursday": 3,
27 "Friday": 4,
28 "Saturday": 5,
29 "Sunday": 6,
30 }
31
32
33 class Source:
34 def __init__(self, uprn: str | int):
35 self._uprn = str(uprn)
36
37 def check_date(self, d: str, t: datetime, y: int):
38 """
39 Get date, append year, and increment year if date is >1 month in the past.
40
41 This tries to deal year-end dates when the YEAR is missing
42 """
43 d += " " + str(y)
44 try:
45 date = datetime.strptime(d, "%d %b %Y")
46 except ValueError:
47 date = datetime.strptime(d, "%A %d %b %Y")
48 if (date - t) < timedelta(days=-31):
49 date = date.replace(year=date.year + 1)
50 return date.date()
51
52 def append_entries(self, d: datetime, w: str, e: list) -> list:
53 e.append(
54 Collection(
55 date=d,
56 t=w,
57 icon=ICON_MAP.get(w.upper()),
58 )
59 )
60 return e
61
62 def get_xmas_map(self, footer_panel) -> dict[date, date]:
63 if not (
64 footer_panel
65 and footer_panel.find("table")
66 and footer_panel.find("table").find("tr")
67 ):
68 print(
69 footer_panel,
70 footer_panel.find("table"),
71 footer_panel.find("table").find("tr"),
72 )
73 return {}
74 xmas_map: dict = {}
75 today = datetime.now()
76 yr = int(today.year)
77 for tr in footer_panel.find("table").findAll("tr")[1:]:
78 try:
79 moved, moved_to = tr.findAll("td")
80 moved = self.check_date(moved.text, today, yr)
81 moved_to = self.check_date(moved_to.text, today, yr)
82 xmas_map[moved] = moved_to
83 except Exception as e:
84 print(e)
85 continue
86 return xmas_map
87
88 def fetch(self):
89 today = datetime.now()
90 today = today.replace(hour=0, minute=0, second=0, microsecond=0)
91 yr = int(today.year)
92
93 s = requests.Session()
94 r = s.get(
95 f"https://maps.dudley.gov.uk/?action=SetAddress&UniqueId={self._uprn}"
96 )
97 soup = BeautifulSoup(r.text, "html.parser")
98
99 panel = soup.find("div", {"aria-label": "Refuse and Recycling Collection"})
100 panel_data = panel.find("div", {"class": "atPanelData"})
101 waste_data = panel_data.text.split("Next")[
102 1:
103 ] # remove first element it just contains general info
104
105 # get table of holiday moved dates (only around xmas)
106 footer_panel = panel.find("div", {"class": "atPanelFooter"})
107 xmas_map = self.get_xmas_map(footer_panel)
108
109 entries = []
110 # Deal with Recycling and Garden collections
111 for item in waste_data:
112 text = item.replace("\r\n", "").strip()
113 if "recycling" in text:
114 dates = re.findall(REGEX["DATES"], text)
115 for dt in dates:
116 dt = self.check_date(dt, today, yr)
117 dt = xmas_map.get(dt, dt)
118 self.append_entries(dt, "Recycling", entries)
119 elif "garden" in text:
120 dates = re.findall(REGEX["DATES"], text)
121 for dt in dates:
122 dt = self.check_date(dt, today, yr)
123 dt = xmas_map.get(dt, dt)
124 self.append_entries(dt, "Garden", entries)
125
126 # Refuse collections only have a DAY not a date, so work out dates for the next few collections
127 refuse_day = re.findall(REGEX["DAYS"], panel_data.text)[0]
128 refuse_date = today + timedelta((int(DAYS[refuse_day]) - today.weekday()) % 7)
129 for i in range(0, 4):
130 temp_date = (refuse_date + timedelta(days=7 * i)).date()
131 temp_date = xmas_map.get(temp_date, temp_date)
132 self.append_entries(temp_date, "Refuse", entries)
133
134 return entries
135
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/dudley_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/dudley_gov_uk.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/dudley_gov_uk.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/dudley_gov_uk.py
@@ -65,11 +65,6 @@
and footer_panel.find("table")
and footer_panel.find("table").find("tr")
):
- print(
- footer_panel,
- footer_panel.find("table"),
- footer_panel.find("table").find("tr"),
- )
return {}
xmas_map: dict = {}
today = datetime.now()
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/dudley_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/dudley_gov_uk.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/dudley_gov_uk.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/dudley_gov_uk.py\n@@ -65,11 +65,6 @@\n and footer_panel.find(\"table\")\n and footer_panel.find(\"table\").find(\"tr\")\n ):\n- print(\n- footer_panel,\n- footer_panel.find(\"table\"),\n- footer_panel.find(\"table\").find(\"tr\"),\n- )\n return {}\n xmas_map: dict = {}\n today = datetime.now()\n", "issue": "[Bug]: Dudley Council, UK - Not Loading\n### I Have A Problem With:\r\n\r\nA specific source\r\n\r\n### What's Your Problem\r\n\r\nNow the Xmas alternative dates have been removed from the council website it's no longer loading my bin dates into my calendar and showing the below error messages in the HA log file\r\n\r\n### Source (if relevant)\r\n\r\ndudley_gov_uk\r\n\r\n### Logs\r\n\r\n```Shell\r\n2024-01-10 01:19:15.591 ERROR (SyncWorker_4) [waste_collection_schedule.source_shell] fetch failed for source Dudley Metropolitan Borough Council:\r\nTraceback (most recent call last):\r\n File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py\", line 134, in fetch\r\n entries = self._source.fetch()\r\n ^^^^^^^^^^^^^^^^^^^^\r\n File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/dudley_gov_uk.py\", line 107, in fetch\r\n xmas_map = self.get_xmas_map(footer_panel)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/dudley_gov_uk.py\", line 71, in get_xmas_map\r\n footer_panel.find(\"table\").find(\"tr\"),\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\nAttributeError: 'NoneType' object has no attribute 'find'\r\n\r\n```\r\n\r\n\r\n### Relevant Configuration\r\n\r\n_No response_\r\n\r\n### Checklist Source Error\r\n\r\n- [x] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)\r\n- [X] Checked that the website of your service provider is still working\r\n- [x] Tested my attributes on the service provider website (if possible)\r\n- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on \"Redownload\" and choose master as version)\r\n\r\n### Checklist Sensor Error\r\n\r\n- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)\r\n\r\n### Required\r\n\r\n- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.\r\n- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.\n", "before_files": [{"content": "import re\nfrom datetime import date, datetime, timedelta\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\nTITLE = \"Dudley Metropolitan Borough Council\"\nDESCRIPTION = \"Source for Dudley Metropolitan Borough Council, UK.\"\nURL = \"https://dudley.gov.uk\"\nTEST_CASES = {\n \"Test_001\": {\"uprn\": \"90090715\"},\n \"Test_002\": {\"uprn\": 90104555},\n \"Test_003\": {\"uprn\": \"90164803\"},\n \"Test_004\": {\"uprn\": 90092621},\n}\nICON_MAP = {\"RECYCLING\": \"mdi:recycle\", \"GARDEN\": \"mdi:leaf\", \"REFUSE\": \"mdi:trash-can\"}\nREGEX = {\n \"DATES\": r\"(\\d+ \\w{3})\",\n \"DAYS\": r\"every: (Monday|Tuesday|Wednesday|Thursday|Friday)\",\n}\nDAYS = {\n \"Monday\": 0,\n \"Tuesday\": 1,\n \"Wednesday\": 2,\n \"Thursday\": 3,\n \"Friday\": 4,\n \"Saturday\": 5,\n \"Sunday\": 6,\n}\n\n\nclass Source:\n def __init__(self, uprn: str | int):\n self._uprn = str(uprn)\n\n def check_date(self, d: str, t: datetime, y: int):\n \"\"\"\n Get date, append year, and increment year if date is >1 month in the past.\n\n This tries to deal year-end dates when the YEAR is missing\n \"\"\"\n d += \" \" + str(y)\n try:\n date = datetime.strptime(d, \"%d %b %Y\")\n except ValueError:\n date = datetime.strptime(d, \"%A %d %b %Y\")\n if (date - t) < timedelta(days=-31):\n date = date.replace(year=date.year + 1)\n return date.date()\n\n def append_entries(self, d: datetime, w: str, e: list) -> list:\n e.append(\n Collection(\n date=d,\n t=w,\n icon=ICON_MAP.get(w.upper()),\n )\n )\n return e\n\n def get_xmas_map(self, footer_panel) -> dict[date, date]:\n if not (\n footer_panel\n and footer_panel.find(\"table\")\n and footer_panel.find(\"table\").find(\"tr\")\n ):\n print(\n footer_panel,\n footer_panel.find(\"table\"),\n footer_panel.find(\"table\").find(\"tr\"),\n )\n return {}\n xmas_map: dict = {}\n today = datetime.now()\n yr = int(today.year)\n for tr in footer_panel.find(\"table\").findAll(\"tr\")[1:]:\n try:\n moved, moved_to = tr.findAll(\"td\")\n moved = self.check_date(moved.text, today, yr)\n moved_to = self.check_date(moved_to.text, today, yr)\n xmas_map[moved] = moved_to\n except Exception as e:\n print(e)\n continue\n return xmas_map\n\n def fetch(self):\n today = datetime.now()\n today = today.replace(hour=0, minute=0, second=0, microsecond=0)\n yr = int(today.year)\n\n s = requests.Session()\n r = s.get(\n f\"https://maps.dudley.gov.uk/?action=SetAddress&UniqueId={self._uprn}\"\n )\n soup = BeautifulSoup(r.text, \"html.parser\")\n\n panel = soup.find(\"div\", {\"aria-label\": \"Refuse and Recycling Collection\"})\n panel_data = panel.find(\"div\", {\"class\": \"atPanelData\"})\n waste_data = panel_data.text.split(\"Next\")[\n 1:\n ] # remove first element it just contains general info\n\n # get table of holiday moved dates (only around xmas)\n footer_panel = panel.find(\"div\", {\"class\": \"atPanelFooter\"})\n xmas_map = self.get_xmas_map(footer_panel)\n\n entries = []\n # Deal with Recycling and Garden collections\n for item in waste_data:\n text = item.replace(\"\\r\\n\", \"\").strip()\n if \"recycling\" in text:\n dates = re.findall(REGEX[\"DATES\"], text)\n for dt in dates:\n dt = self.check_date(dt, today, yr)\n dt = xmas_map.get(dt, dt)\n self.append_entries(dt, \"Recycling\", entries)\n elif \"garden\" in text:\n dates = re.findall(REGEX[\"DATES\"], text)\n for dt in dates:\n dt = self.check_date(dt, today, yr)\n dt = xmas_map.get(dt, dt)\n self.append_entries(dt, \"Garden\", entries)\n\n # Refuse collections only have a DAY not a date, so work out dates for the next few collections\n refuse_day = re.findall(REGEX[\"DAYS\"], panel_data.text)[0]\n refuse_date = today + timedelta((int(DAYS[refuse_day]) - today.weekday()) % 7)\n for i in range(0, 4):\n temp_date = (refuse_date + timedelta(days=7 * i)).date()\n temp_date = xmas_map.get(temp_date, temp_date)\n self.append_entries(temp_date, \"Refuse\", entries)\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/dudley_gov_uk.py"}], "after_files": [{"content": "import re\nfrom datetime import date, datetime, timedelta\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\nTITLE = \"Dudley Metropolitan Borough Council\"\nDESCRIPTION = \"Source for Dudley Metropolitan Borough Council, UK.\"\nURL = \"https://dudley.gov.uk\"\nTEST_CASES = {\n \"Test_001\": {\"uprn\": \"90090715\"},\n \"Test_002\": {\"uprn\": 90104555},\n \"Test_003\": {\"uprn\": \"90164803\"},\n \"Test_004\": {\"uprn\": 90092621},\n}\nICON_MAP = {\"RECYCLING\": \"mdi:recycle\", \"GARDEN\": \"mdi:leaf\", \"REFUSE\": \"mdi:trash-can\"}\nREGEX = {\n \"DATES\": r\"(\\d+ \\w{3})\",\n \"DAYS\": r\"every: (Monday|Tuesday|Wednesday|Thursday|Friday)\",\n}\nDAYS = {\n \"Monday\": 0,\n \"Tuesday\": 1,\n \"Wednesday\": 2,\n \"Thursday\": 3,\n \"Friday\": 4,\n \"Saturday\": 5,\n \"Sunday\": 6,\n}\n\n\nclass Source:\n def __init__(self, uprn: str | int):\n self._uprn = str(uprn)\n\n def check_date(self, d: str, t: datetime, y: int):\n \"\"\"\n Get date, append year, and increment year if date is >1 month in the past.\n\n This tries to deal year-end dates when the YEAR is missing\n \"\"\"\n d += \" \" + str(y)\n try:\n date = datetime.strptime(d, \"%d %b %Y\")\n except ValueError:\n date = datetime.strptime(d, \"%A %d %b %Y\")\n if (date - t) < timedelta(days=-31):\n date = date.replace(year=date.year + 1)\n return date.date()\n\n def append_entries(self, d: datetime, w: str, e: list) -> list:\n e.append(\n Collection(\n date=d,\n t=w,\n icon=ICON_MAP.get(w.upper()),\n )\n )\n return e\n\n def get_xmas_map(self, footer_panel) -> dict[date, date]:\n if not (\n footer_panel\n and footer_panel.find(\"table\")\n and footer_panel.find(\"table\").find(\"tr\")\n ):\n return {}\n xmas_map: dict = {}\n today = datetime.now()\n yr = int(today.year)\n for tr in footer_panel.find(\"table\").findAll(\"tr\")[1:]:\n try:\n moved, moved_to = tr.findAll(\"td\")\n moved = self.check_date(moved.text, today, yr)\n moved_to = self.check_date(moved_to.text, today, yr)\n xmas_map[moved] = moved_to\n except Exception as e:\n print(e)\n continue\n return xmas_map\n\n def fetch(self):\n today = datetime.now()\n today = today.replace(hour=0, minute=0, second=0, microsecond=0)\n yr = int(today.year)\n\n s = requests.Session()\n r = s.get(\n f\"https://maps.dudley.gov.uk/?action=SetAddress&UniqueId={self._uprn}\"\n )\n soup = BeautifulSoup(r.text, \"html.parser\")\n\n panel = soup.find(\"div\", {\"aria-label\": \"Refuse and Recycling Collection\"})\n panel_data = panel.find(\"div\", {\"class\": \"atPanelData\"})\n waste_data = panel_data.text.split(\"Next\")[\n 1:\n ] # remove first element it just contains general info\n\n # get table of holiday moved dates (only around xmas)\n footer_panel = panel.find(\"div\", {\"class\": \"atPanelFooter\"})\n xmas_map = self.get_xmas_map(footer_panel)\n\n entries = []\n # Deal with Recycling and Garden collections\n for item in waste_data:\n text = item.replace(\"\\r\\n\", \"\").strip()\n if \"recycling\" in text:\n dates = re.findall(REGEX[\"DATES\"], text)\n for dt in dates:\n dt = self.check_date(dt, today, yr)\n dt = xmas_map.get(dt, dt)\n self.append_entries(dt, \"Recycling\", entries)\n elif \"garden\" in text:\n dates = re.findall(REGEX[\"DATES\"], text)\n for dt in dates:\n dt = self.check_date(dt, today, yr)\n dt = xmas_map.get(dt, dt)\n self.append_entries(dt, \"Garden\", entries)\n\n # Refuse collections only have a DAY not a date, so work out dates for the next few collections\n refuse_day = re.findall(REGEX[\"DAYS\"], panel_data.text)[0]\n refuse_date = today + timedelta((int(DAYS[refuse_day]) - today.weekday()) % 7)\n for i in range(0, 4):\n temp_date = (refuse_date + timedelta(days=7 * i)).date()\n temp_date = xmas_map.get(temp_date, temp_date)\n self.append_entries(temp_date, \"Refuse\", entries)\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/dudley_gov_uk.py"}]} | 2,285 | 172 |
gh_patches_debug_6896 | rasdani/github-patches | git_diff | pantsbuild__pants-20719 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
"export" goal docs have unclosed call out
**Describe the bug**
The warning callout about "exporting tools requires ..." seems to be unclosed:
- https://www.pantsbuild.org/2.18/reference/goals/export
- https://www.pantsbuild.org/2.19/reference/goals/export
- https://www.pantsbuild.org/2.20/reference/goals/export

**Pants version**
2.18 onwards
**OS**
macOS
**Additional info**
Introduced in #20604
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/python/pants/core/goals/export.py`
Content:
```
1 # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 from __future__ import annotations
5
6 import itertools
7 import os
8 from dataclasses import dataclass
9 from typing import Iterable, Mapping, Sequence, cast
10
11 from pants.base.build_root import BuildRoot
12 from pants.core.goals.generate_lockfiles import (
13 GenerateToolLockfileSentinel,
14 KnownUserResolveNames,
15 KnownUserResolveNamesRequest,
16 UnrecognizedResolveNamesError,
17 )
18 from pants.core.util_rules.distdir import DistDir
19 from pants.core.util_rules.environments import _warn_on_non_local_environments
20 from pants.engine.collection import Collection
21 from pants.engine.console import Console
22 from pants.engine.env_vars import EnvironmentVars, EnvironmentVarsRequest
23 from pants.engine.environment import EnvironmentName
24 from pants.engine.fs import EMPTY_DIGEST, AddPrefix, Digest, MergeDigests, Workspace
25 from pants.engine.goal import Goal, GoalSubsystem
26 from pants.engine.internals.selectors import Effect, Get, MultiGet
27 from pants.engine.process import InteractiveProcess, InteractiveProcessResult
28 from pants.engine.rules import collect_rules, goal_rule
29 from pants.engine.target import FilteredTargets, Target
30 from pants.engine.unions import UnionMembership, union
31 from pants.option.option_types import StrListOption
32 from pants.util.dirutil import safe_rmtree
33 from pants.util.frozendict import FrozenDict
34 from pants.util.strutil import softwrap
35
36
37 class ExportError(Exception):
38 pass
39
40
41 @union(in_scope_types=[EnvironmentName])
42 @dataclass(frozen=True)
43 class ExportRequest:
44 """A union for exportable data provided by a backend.
45
46 Subclass and install a member of this type to export data.
47 """
48
49 targets: Sequence[Target]
50
51
52 @dataclass(frozen=True)
53 class PostProcessingCommand:
54 """A command to run as a local process after an exported digest is materialized."""
55
56 # Values in the argv tuple can contain the format specifier "{digest_root}", which will be
57 # substituted with the (absolute) path to the location under distdir in which the
58 # digest is materialized.
59 argv: tuple[str, ...]
60 # The command will be run with an environment consisting of just PATH, set to the Pants
61 # process's own PATH env var, plus these extra env vars.
62 extra_env: FrozenDict[str, str]
63
64 def __init__(
65 self,
66 argv: Iterable[str],
67 extra_env: Mapping[str, str] = FrozenDict(),
68 ):
69 object.__setattr__(self, "argv", tuple(argv))
70 object.__setattr__(self, "extra_env", FrozenDict(extra_env))
71
72
73 @dataclass(frozen=True)
74 class ExportResult:
75 description: str
76 # Materialize digests under this reldir.
77 reldir: str
78 # Materialize this digest.
79 digest: Digest
80 # Run these commands as local processes after the digest is materialized.
81 post_processing_cmds: tuple[PostProcessingCommand, ...]
82 # Set for the common special case of exporting a resolve, and names that resolve.
83 # Set to None for other export results.
84 resolve: str | None
85
86 def __init__(
87 self,
88 description: str,
89 reldir: str,
90 *,
91 digest: Digest = EMPTY_DIGEST,
92 post_processing_cmds: Iterable[PostProcessingCommand] = tuple(),
93 resolve: str | None = None,
94 ):
95 object.__setattr__(self, "description", description)
96 object.__setattr__(self, "reldir", reldir)
97 object.__setattr__(self, "digest", digest)
98 object.__setattr__(self, "post_processing_cmds", tuple(post_processing_cmds))
99 object.__setattr__(self, "resolve", resolve)
100
101
102 class ExportResults(Collection[ExportResult]):
103 pass
104
105
106 class ExportSubsystem(GoalSubsystem):
107 name = "export"
108 help = softwrap(
109 """
110 Export Pants data for use in other tools, such as IDEs.
111
112 :::caution Exporting tools requires creating a custom lockfile for them
113 Follow [the instructions for creating tool lockfiles](../../docs/python/overview/lockfiles#lockfiles-for-tools)
114 :::
115 """
116 )
117
118 # NB: Only options that are relevant across many/most backends and languages
119 # should be defined here. Backend-specific options should be defined in that backend
120 # as plugin options on this subsystem.
121
122 # Exporting resolves is a common use-case for `export`, often the primary one, so we
123 # add affordances for it at the core goal level.
124 resolve = StrListOption(
125 default=[],
126 help="Export the specified resolve(s). The export format is backend-specific, "
127 "e.g., Python resolves are exported as virtualenvs.",
128 )
129
130
131 class Export(Goal):
132 subsystem_cls = ExportSubsystem
133 environment_behavior = Goal.EnvironmentBehavior.LOCAL_ONLY
134
135
136 @goal_rule
137 async def export(
138 console: Console,
139 targets: FilteredTargets,
140 workspace: Workspace,
141 union_membership: UnionMembership,
142 build_root: BuildRoot,
143 dist_dir: DistDir,
144 export_subsys: ExportSubsystem,
145 ) -> Export:
146 request_types = cast("Iterable[type[ExportRequest]]", union_membership.get(ExportRequest))
147 requests = tuple(request_type(targets) for request_type in request_types)
148 all_results = await MultiGet(Get(ExportResults, ExportRequest, request) for request in requests)
149 flattened_results = [res for results in all_results for res in results]
150
151 await _warn_on_non_local_environments(targets, "the `export` goal")
152
153 prefixed_digests = await MultiGet(
154 Get(Digest, AddPrefix(result.digest, result.reldir)) for result in flattened_results
155 )
156 output_dir = os.path.join(str(dist_dir.relpath), "export")
157 for result in flattened_results:
158 digest_root = os.path.join(build_root.path, output_dir, result.reldir)
159 safe_rmtree(digest_root)
160 merged_digest = await Get(Digest, MergeDigests(prefixed_digests))
161 dist_digest = await Get(Digest, AddPrefix(merged_digest, output_dir))
162 workspace.write_digest(dist_digest)
163 environment = await Get(EnvironmentVars, EnvironmentVarsRequest(["PATH"]))
164 resolves_exported = set()
165 for result in flattened_results:
166 result_dir = os.path.join(output_dir, result.reldir)
167 digest_root = os.path.join(build_root.path, result_dir)
168 for cmd in result.post_processing_cmds:
169 argv = tuple(arg.format(digest_root=digest_root) for arg in cmd.argv)
170 ip = InteractiveProcess(
171 argv=argv,
172 env={"PATH": environment.get("PATH", ""), **cmd.extra_env},
173 run_in_workspace=True,
174 )
175 ipr = await Effect(InteractiveProcessResult, InteractiveProcess, ip)
176 if ipr.exit_code:
177 raise ExportError(f"Failed to write {result.description} to {result_dir}")
178 if result.resolve:
179 resolves_exported.add(result.resolve)
180 console.print_stdout(f"Wrote {result.description} to {result_dir}")
181
182 unexported_resolves = sorted((set(export_subsys.resolve) - resolves_exported))
183 if unexported_resolves:
184 all_known_user_resolve_names = await MultiGet(
185 Get(KnownUserResolveNames, KnownUserResolveNamesRequest, request())
186 for request in union_membership.get(KnownUserResolveNamesRequest)
187 )
188 all_valid_resolve_names = sorted(
189 {
190 *itertools.chain.from_iterable(kurn.names for kurn in all_known_user_resolve_names),
191 *(
192 sentinel.resolve_name
193 for sentinel in union_membership.get(GenerateToolLockfileSentinel)
194 ),
195 }
196 )
197 raise UnrecognizedResolveNamesError(
198 unexported_resolves,
199 all_valid_resolve_names,
200 description_of_origin="the option --export-resolve",
201 )
202
203 return Export(exit_code=0)
204
205
206 def rules():
207 return collect_rules()
208
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/python/pants/core/goals/export.py b/src/python/pants/core/goals/export.py
--- a/src/python/pants/core/goals/export.py
+++ b/src/python/pants/core/goals/export.py
@@ -110,7 +110,9 @@
Export Pants data for use in other tools, such as IDEs.
:::caution Exporting tools requires creating a custom lockfile for them
+
Follow [the instructions for creating tool lockfiles](../../docs/python/overview/lockfiles#lockfiles-for-tools)
+
:::
"""
)
| {"golden_diff": "diff --git a/src/python/pants/core/goals/export.py b/src/python/pants/core/goals/export.py\n--- a/src/python/pants/core/goals/export.py\n+++ b/src/python/pants/core/goals/export.py\n@@ -110,7 +110,9 @@\n Export Pants data for use in other tools, such as IDEs.\n \n :::caution Exporting tools requires creating a custom lockfile for them\n+\n Follow [the instructions for creating tool lockfiles](../../docs/python/overview/lockfiles#lockfiles-for-tools)\n+\n :::\n \"\"\"\n )\n", "issue": "\"export\" goal docs have unclosed call out\n**Describe the bug**\r\n\r\nThe warning callout about \"exporting tools requires ...\" seems to be unclosed:\r\n\r\n- https://www.pantsbuild.org/2.18/reference/goals/export\r\n- https://www.pantsbuild.org/2.19/reference/goals/export\r\n- https://www.pantsbuild.org/2.20/reference/goals/export\r\n\r\n\r\n\r\n**Pants version**\r\n2.18 onwards\r\n\r\n**OS**\r\nmacOS\r\n\r\n**Additional info**\r\nIntroduced in #20604 \n", "before_files": [{"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nimport itertools\nimport os\nfrom dataclasses import dataclass\nfrom typing import Iterable, Mapping, Sequence, cast\n\nfrom pants.base.build_root import BuildRoot\nfrom pants.core.goals.generate_lockfiles import (\n GenerateToolLockfileSentinel,\n KnownUserResolveNames,\n KnownUserResolveNamesRequest,\n UnrecognizedResolveNamesError,\n)\nfrom pants.core.util_rules.distdir import DistDir\nfrom pants.core.util_rules.environments import _warn_on_non_local_environments\nfrom pants.engine.collection import Collection\nfrom pants.engine.console import Console\nfrom pants.engine.env_vars import EnvironmentVars, EnvironmentVarsRequest\nfrom pants.engine.environment import EnvironmentName\nfrom pants.engine.fs import EMPTY_DIGEST, AddPrefix, Digest, MergeDigests, Workspace\nfrom pants.engine.goal import Goal, GoalSubsystem\nfrom pants.engine.internals.selectors import Effect, Get, MultiGet\nfrom pants.engine.process import InteractiveProcess, InteractiveProcessResult\nfrom pants.engine.rules import collect_rules, goal_rule\nfrom pants.engine.target import FilteredTargets, Target\nfrom pants.engine.unions import UnionMembership, union\nfrom pants.option.option_types import StrListOption\nfrom pants.util.dirutil import safe_rmtree\nfrom pants.util.frozendict import FrozenDict\nfrom pants.util.strutil import softwrap\n\n\nclass ExportError(Exception):\n pass\n\n\n@union(in_scope_types=[EnvironmentName])\n@dataclass(frozen=True)\nclass ExportRequest:\n \"\"\"A union for exportable data provided by a backend.\n\n Subclass and install a member of this type to export data.\n \"\"\"\n\n targets: Sequence[Target]\n\n\n@dataclass(frozen=True)\nclass PostProcessingCommand:\n \"\"\"A command to run as a local process after an exported digest is materialized.\"\"\"\n\n # Values in the argv tuple can contain the format specifier \"{digest_root}\", which will be\n # substituted with the (absolute) path to the location under distdir in which the\n # digest is materialized.\n argv: tuple[str, ...]\n # The command will be run with an environment consisting of just PATH, set to the Pants\n # process's own PATH env var, plus these extra env vars.\n extra_env: FrozenDict[str, str]\n\n def __init__(\n self,\n argv: Iterable[str],\n extra_env: Mapping[str, str] = FrozenDict(),\n ):\n object.__setattr__(self, \"argv\", tuple(argv))\n object.__setattr__(self, \"extra_env\", FrozenDict(extra_env))\n\n\n@dataclass(frozen=True)\nclass ExportResult:\n description: str\n # Materialize digests under this reldir.\n reldir: str\n # Materialize this digest.\n digest: Digest\n # Run these commands as local processes after the digest is materialized.\n post_processing_cmds: tuple[PostProcessingCommand, ...]\n # Set for the common special case of exporting a resolve, and names that resolve.\n # Set to None for other export results.\n resolve: str | None\n\n def __init__(\n self,\n description: str,\n reldir: str,\n *,\n digest: Digest = EMPTY_DIGEST,\n post_processing_cmds: Iterable[PostProcessingCommand] = tuple(),\n resolve: str | None = None,\n ):\n object.__setattr__(self, \"description\", description)\n object.__setattr__(self, \"reldir\", reldir)\n object.__setattr__(self, \"digest\", digest)\n object.__setattr__(self, \"post_processing_cmds\", tuple(post_processing_cmds))\n object.__setattr__(self, \"resolve\", resolve)\n\n\nclass ExportResults(Collection[ExportResult]):\n pass\n\n\nclass ExportSubsystem(GoalSubsystem):\n name = \"export\"\n help = softwrap(\n \"\"\"\n Export Pants data for use in other tools, such as IDEs.\n\n :::caution Exporting tools requires creating a custom lockfile for them\n Follow [the instructions for creating tool lockfiles](../../docs/python/overview/lockfiles#lockfiles-for-tools)\n :::\n \"\"\"\n )\n\n # NB: Only options that are relevant across many/most backends and languages\n # should be defined here. Backend-specific options should be defined in that backend\n # as plugin options on this subsystem.\n\n # Exporting resolves is a common use-case for `export`, often the primary one, so we\n # add affordances for it at the core goal level.\n resolve = StrListOption(\n default=[],\n help=\"Export the specified resolve(s). The export format is backend-specific, \"\n \"e.g., Python resolves are exported as virtualenvs.\",\n )\n\n\nclass Export(Goal):\n subsystem_cls = ExportSubsystem\n environment_behavior = Goal.EnvironmentBehavior.LOCAL_ONLY\n\n\n@goal_rule\nasync def export(\n console: Console,\n targets: FilteredTargets,\n workspace: Workspace,\n union_membership: UnionMembership,\n build_root: BuildRoot,\n dist_dir: DistDir,\n export_subsys: ExportSubsystem,\n) -> Export:\n request_types = cast(\"Iterable[type[ExportRequest]]\", union_membership.get(ExportRequest))\n requests = tuple(request_type(targets) for request_type in request_types)\n all_results = await MultiGet(Get(ExportResults, ExportRequest, request) for request in requests)\n flattened_results = [res for results in all_results for res in results]\n\n await _warn_on_non_local_environments(targets, \"the `export` goal\")\n\n prefixed_digests = await MultiGet(\n Get(Digest, AddPrefix(result.digest, result.reldir)) for result in flattened_results\n )\n output_dir = os.path.join(str(dist_dir.relpath), \"export\")\n for result in flattened_results:\n digest_root = os.path.join(build_root.path, output_dir, result.reldir)\n safe_rmtree(digest_root)\n merged_digest = await Get(Digest, MergeDigests(prefixed_digests))\n dist_digest = await Get(Digest, AddPrefix(merged_digest, output_dir))\n workspace.write_digest(dist_digest)\n environment = await Get(EnvironmentVars, EnvironmentVarsRequest([\"PATH\"]))\n resolves_exported = set()\n for result in flattened_results:\n result_dir = os.path.join(output_dir, result.reldir)\n digest_root = os.path.join(build_root.path, result_dir)\n for cmd in result.post_processing_cmds:\n argv = tuple(arg.format(digest_root=digest_root) for arg in cmd.argv)\n ip = InteractiveProcess(\n argv=argv,\n env={\"PATH\": environment.get(\"PATH\", \"\"), **cmd.extra_env},\n run_in_workspace=True,\n )\n ipr = await Effect(InteractiveProcessResult, InteractiveProcess, ip)\n if ipr.exit_code:\n raise ExportError(f\"Failed to write {result.description} to {result_dir}\")\n if result.resolve:\n resolves_exported.add(result.resolve)\n console.print_stdout(f\"Wrote {result.description} to {result_dir}\")\n\n unexported_resolves = sorted((set(export_subsys.resolve) - resolves_exported))\n if unexported_resolves:\n all_known_user_resolve_names = await MultiGet(\n Get(KnownUserResolveNames, KnownUserResolveNamesRequest, request())\n for request in union_membership.get(KnownUserResolveNamesRequest)\n )\n all_valid_resolve_names = sorted(\n {\n *itertools.chain.from_iterable(kurn.names for kurn in all_known_user_resolve_names),\n *(\n sentinel.resolve_name\n for sentinel in union_membership.get(GenerateToolLockfileSentinel)\n ),\n }\n )\n raise UnrecognizedResolveNamesError(\n unexported_resolves,\n all_valid_resolve_names,\n description_of_origin=\"the option --export-resolve\",\n )\n\n return Export(exit_code=0)\n\n\ndef rules():\n return collect_rules()\n", "path": "src/python/pants/core/goals/export.py"}], "after_files": [{"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nimport itertools\nimport os\nfrom dataclasses import dataclass\nfrom typing import Iterable, Mapping, Sequence, cast\n\nfrom pants.base.build_root import BuildRoot\nfrom pants.core.goals.generate_lockfiles import (\n GenerateToolLockfileSentinel,\n KnownUserResolveNames,\n KnownUserResolveNamesRequest,\n UnrecognizedResolveNamesError,\n)\nfrom pants.core.util_rules.distdir import DistDir\nfrom pants.core.util_rules.environments import _warn_on_non_local_environments\nfrom pants.engine.collection import Collection\nfrom pants.engine.console import Console\nfrom pants.engine.env_vars import EnvironmentVars, EnvironmentVarsRequest\nfrom pants.engine.environment import EnvironmentName\nfrom pants.engine.fs import EMPTY_DIGEST, AddPrefix, Digest, MergeDigests, Workspace\nfrom pants.engine.goal import Goal, GoalSubsystem\nfrom pants.engine.internals.selectors import Effect, Get, MultiGet\nfrom pants.engine.process import InteractiveProcess, InteractiveProcessResult\nfrom pants.engine.rules import collect_rules, goal_rule\nfrom pants.engine.target import FilteredTargets, Target\nfrom pants.engine.unions import UnionMembership, union\nfrom pants.option.option_types import StrListOption\nfrom pants.util.dirutil import safe_rmtree\nfrom pants.util.frozendict import FrozenDict\nfrom pants.util.strutil import softwrap\n\n\nclass ExportError(Exception):\n pass\n\n\n@union(in_scope_types=[EnvironmentName])\n@dataclass(frozen=True)\nclass ExportRequest:\n \"\"\"A union for exportable data provided by a backend.\n\n Subclass and install a member of this type to export data.\n \"\"\"\n\n targets: Sequence[Target]\n\n\n@dataclass(frozen=True)\nclass PostProcessingCommand:\n \"\"\"A command to run as a local process after an exported digest is materialized.\"\"\"\n\n # Values in the argv tuple can contain the format specifier \"{digest_root}\", which will be\n # substituted with the (absolute) path to the location under distdir in which the\n # digest is materialized.\n argv: tuple[str, ...]\n # The command will be run with an environment consisting of just PATH, set to the Pants\n # process's own PATH env var, plus these extra env vars.\n extra_env: FrozenDict[str, str]\n\n def __init__(\n self,\n argv: Iterable[str],\n extra_env: Mapping[str, str] = FrozenDict(),\n ):\n object.__setattr__(self, \"argv\", tuple(argv))\n object.__setattr__(self, \"extra_env\", FrozenDict(extra_env))\n\n\n@dataclass(frozen=True)\nclass ExportResult:\n description: str\n # Materialize digests under this reldir.\n reldir: str\n # Materialize this digest.\n digest: Digest\n # Run these commands as local processes after the digest is materialized.\n post_processing_cmds: tuple[PostProcessingCommand, ...]\n # Set for the common special case of exporting a resolve, and names that resolve.\n # Set to None for other export results.\n resolve: str | None\n\n def __init__(\n self,\n description: str,\n reldir: str,\n *,\n digest: Digest = EMPTY_DIGEST,\n post_processing_cmds: Iterable[PostProcessingCommand] = tuple(),\n resolve: str | None = None,\n ):\n object.__setattr__(self, \"description\", description)\n object.__setattr__(self, \"reldir\", reldir)\n object.__setattr__(self, \"digest\", digest)\n object.__setattr__(self, \"post_processing_cmds\", tuple(post_processing_cmds))\n object.__setattr__(self, \"resolve\", resolve)\n\n\nclass ExportResults(Collection[ExportResult]):\n pass\n\n\nclass ExportSubsystem(GoalSubsystem):\n name = \"export\"\n help = softwrap(\n \"\"\"\n Export Pants data for use in other tools, such as IDEs.\n\n :::caution Exporting tools requires creating a custom lockfile for them\n\n Follow [the instructions for creating tool lockfiles](../../docs/python/overview/lockfiles#lockfiles-for-tools)\n\n :::\n \"\"\"\n )\n\n # NB: Only options that are relevant across many/most backends and languages\n # should be defined here. Backend-specific options should be defined in that backend\n # as plugin options on this subsystem.\n\n # Exporting resolves is a common use-case for `export`, often the primary one, so we\n # add affordances for it at the core goal level.\n resolve = StrListOption(\n default=[],\n help=\"Export the specified resolve(s). The export format is backend-specific, \"\n \"e.g., Python resolves are exported as virtualenvs.\",\n )\n\n\nclass Export(Goal):\n subsystem_cls = ExportSubsystem\n environment_behavior = Goal.EnvironmentBehavior.LOCAL_ONLY\n\n\n@goal_rule\nasync def export(\n console: Console,\n targets: FilteredTargets,\n workspace: Workspace,\n union_membership: UnionMembership,\n build_root: BuildRoot,\n dist_dir: DistDir,\n export_subsys: ExportSubsystem,\n) -> Export:\n request_types = cast(\"Iterable[type[ExportRequest]]\", union_membership.get(ExportRequest))\n requests = tuple(request_type(targets) for request_type in request_types)\n all_results = await MultiGet(Get(ExportResults, ExportRequest, request) for request in requests)\n flattened_results = [res for results in all_results for res in results]\n\n await _warn_on_non_local_environments(targets, \"the `export` goal\")\n\n prefixed_digests = await MultiGet(\n Get(Digest, AddPrefix(result.digest, result.reldir)) for result in flattened_results\n )\n output_dir = os.path.join(str(dist_dir.relpath), \"export\")\n for result in flattened_results:\n digest_root = os.path.join(build_root.path, output_dir, result.reldir)\n safe_rmtree(digest_root)\n merged_digest = await Get(Digest, MergeDigests(prefixed_digests))\n dist_digest = await Get(Digest, AddPrefix(merged_digest, output_dir))\n workspace.write_digest(dist_digest)\n environment = await Get(EnvironmentVars, EnvironmentVarsRequest([\"PATH\"]))\n resolves_exported = set()\n for result in flattened_results:\n result_dir = os.path.join(output_dir, result.reldir)\n digest_root = os.path.join(build_root.path, result_dir)\n for cmd in result.post_processing_cmds:\n argv = tuple(arg.format(digest_root=digest_root) for arg in cmd.argv)\n ip = InteractiveProcess(\n argv=argv,\n env={\"PATH\": environment.get(\"PATH\", \"\"), **cmd.extra_env},\n run_in_workspace=True,\n )\n ipr = await Effect(InteractiveProcessResult, InteractiveProcess, ip)\n if ipr.exit_code:\n raise ExportError(f\"Failed to write {result.description} to {result_dir}\")\n if result.resolve:\n resolves_exported.add(result.resolve)\n console.print_stdout(f\"Wrote {result.description} to {result_dir}\")\n\n unexported_resolves = sorted((set(export_subsys.resolve) - resolves_exported))\n if unexported_resolves:\n all_known_user_resolve_names = await MultiGet(\n Get(KnownUserResolveNames, KnownUserResolveNamesRequest, request())\n for request in union_membership.get(KnownUserResolveNamesRequest)\n )\n all_valid_resolve_names = sorted(\n {\n *itertools.chain.from_iterable(kurn.names for kurn in all_known_user_resolve_names),\n *(\n sentinel.resolve_name\n for sentinel in union_membership.get(GenerateToolLockfileSentinel)\n ),\n }\n )\n raise UnrecognizedResolveNamesError(\n unexported_resolves,\n all_valid_resolve_names,\n description_of_origin=\"the option --export-resolve\",\n )\n\n return Export(exit_code=0)\n\n\ndef rules():\n return collect_rules()\n", "path": "src/python/pants/core/goals/export.py"}]} | 2,660 | 128 |
gh_patches_debug_25864 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-1089 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Implement filtering options for date & time types
## Problem
- We need to ensure that records that include columns of all Date & Time types support the following filters via API:
- between {x} and {y}
- is {x}
- is not {x}
- before {x}
- after {x}
- on or before {x}
- or or after {x}
- is empty
- is not empty
- We need to also ensure that values of the filters don't have to be an exact date. We should accept natural language like "next month", or "tomorrow".
- We could use https://dateparser.readthedocs.io/
This involves:
- Implementing the filters in the backend
- Updating the `/api/v0/databases/<id>/types/` endpoint to store available filters on this type
- Filter information should include the number of parameters needing to be passed in (e.g. `between` needs 2 parameters, `is empty` needs 0)
## Additional context
- We're using our fork of `sqlalchemy-filters` to provide filtering. See: https://github.com/centerofci/sqlalchemy-filters.
- This issue is blocked by implementation of Date & Time types:
- #424
- #425
- #426
- #557 provides some context on storing filters.
Implement filtering options for duration type
## Problem
- We need to ensure that records that include columns of duration types support the following filters via API:
- between {x} and {y}
- equals {x}
- does not equal {x}
- greater than {x}
- less than {x}
- greater than or equals {x}
- less than or equals {x}
- is empty
- is not empty
- We need to also ensure that values of the filters don't have to be a number. We should accept natural language like "an hour", or "2 days".
- We could use something like https://github.com/oleiade/durations or https://github.com/wroberts/pytimeparse
This involves:
- Implementing the filters in the backend
- Updating the `/api/v0/databases/<id>/types/` endpoint to store available filters on this type
- Filter information should include the number of parameters needing to be passed in (e.g. `between` needs 2 parameters, `is empty` needs 0)
## Additional context
- These filters are the same as those for Number types. We should just reuse those.
- The only additional thing we need to implement is converting natural language to numbers.
- Relevant issue: #385
- We're using our fork of `sqlalchemy-filters` to provide filtering. See: https://github.com/centerofci/sqlalchemy-filters.
- This issue is blocked by implementation of #430
- #557 provides some context on storing filters.
Marking as blocked until #385 and #430 are complete.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `db/types/base.py`
Content:
```
1 from enum import Enum
2
3 from sqlalchemy import create_engine
4
5 from db import constants
6
7 from db.functions import hints
8
9 from frozendict import frozendict
10
11
12 CHAR = 'char'
13 STRING = 'string'
14 VARCHAR = 'varchar'
15
16
17 class PostgresType(Enum):
18 """
19 This only includes built-in Postgres types that SQLAlchemy supports.
20 SQLAlchemy doesn't support XML. See zzzeek's comment on:
21 https://stackoverflow.com/questions/16153512/using-postgresql-xml-data-type-with-sqlalchemy
22 The values are keys returned by get_available_types.
23 """
24 _ARRAY = '_array'
25 BIGINT = 'bigint'
26 BIT_VARYING = 'bit varying'
27 BIT = 'bit'
28 BOOLEAN = 'boolean'
29 BYTEA = 'bytea'
30 CHAR = '"char"'
31 CHARACTER_VARYING = 'character varying'
32 CHARACTER = 'character'
33 CIDR = 'cidr'
34 DATE = 'date'
35 DATERANGE = 'daterange'
36 DECIMAL = 'decimal'
37 DOUBLE_PRECISION = 'double precision'
38 FLOAT = 'float'
39 HSTORE = 'hstore'
40 INET = 'inet'
41 INT4RANGE = 'int4range'
42 INT8RANGE = 'int8range'
43 INTEGER = 'integer'
44 INTERVAL = 'interval'
45 JSON = 'json'
46 JSONB = 'jsonb'
47 MACADDR = 'macaddr'
48 MONEY = 'money'
49 NAME = 'name'
50 NUMERIC = 'numeric'
51 NUMRANGE = 'numrange'
52 OID = 'oid'
53 REAL = 'real'
54 REGCLASS = 'regclass'
55 SMALLINT = 'smallint'
56 TEXT = 'text'
57 TIME = 'time'
58 TIME_WITH_TIME_ZONE = 'time with time zone'
59 TIME_WITHOUT_TIME_ZONE = 'time without time zone'
60 TIMESTAMP = 'timestamp'
61 TIMESTAMP_WITH_TIME_ZONE = 'timestamp with time zone'
62 TIMESTAMP_WITHOUT_TIME_ZONE = 'timestamp without time zone'
63 TSRANGE = 'tsrange'
64 TSTZRANGE = 'tstzrange'
65 TSVECTOR = 'tsvector'
66 UUID = 'uuid'
67
68
69 class MathesarCustomType(Enum):
70 """
71 This is a list of custom Mathesar DB types.
72 Keys returned by get_available_types are of the format 'mathesar_types.VALUE'
73 """
74 EMAIL = 'email'
75 URI = 'uri'
76 MATHESAR_MONEY = 'mathesar_money'
77
78
79 _known_vanilla_db_types = tuple(postgres_type for postgres_type in PostgresType)
80
81
82 _known_custom_db_types = tuple(mathesar_custom_type for mathesar_custom_type in MathesarCustomType)
83
84
85 # Known database types are those that are defined on our PostgresType and MathesarCustomType Enums.
86 known_db_types = _known_vanilla_db_types + _known_custom_db_types
87
88
89 # Origin: https://www.python.org/dev/peps/pep-0616/#id17
90 def _remove_prefix(self: str, prefix: str, /) -> str:
91 """
92 This will remove the passed prefix, if it's there.
93 Otherwise, it will return the string unchanged.
94 """
95 if self.startswith(prefix):
96 return self[len(prefix):]
97 else:
98 return self[:]
99
100
101 def get_db_type_enum_from_id(db_type_id):
102 """
103 Gets an instance of either the PostgresType enum or the MathesarCustomType enum corresponding
104 to the provided db_type_id. If the id doesn't correspond to any of the mentioned enums,
105 returns None.
106 """
107 try:
108 return PostgresType(db_type_id)
109 except ValueError:
110 try:
111 # Sometimes MA type identifiers are qualified like so: `mathesar_types.uri`.
112 # We want to remove that prefix, when it's there, because MathesarCustomType
113 # enum stores type ids without a qualifier (e.g. `uri`).
114 possible_prefix = _ma_type_qualifier_prefix + '.'
115 preprocessed_db_type_id = _remove_prefix(db_type_id, possible_prefix)
116 return MathesarCustomType(preprocessed_db_type_id)
117 except ValueError:
118 return None
119
120
121 def _build_db_types_hinted():
122 """
123 Builds up a map of db types to hintsets.
124 """
125 # Start out by defining some hints manually.
126 db_types_hinted = {
127 PostgresType.BOOLEAN: tuple([
128 hints.boolean
129 ]),
130 MathesarCustomType.URI: tuple([
131 hints.uri
132 ]),
133 MathesarCustomType.EMAIL: tuple([
134 hints.email
135 ]),
136 }
137
138 # Then, start adding hints automatically.
139 # This is for many-to-many relationships, i.e. adding multiple identical hintsets to the
140 # hintsets of multiple db types.
141 def _add_to_db_type_hintsets(db_types, hints):
142 """
143 Mutates db_types_hinted to map every hint in `hints` to every DB type in `db_types`.
144 """
145 for db_type in db_types:
146 if db_type in db_types_hinted:
147 updated_hintset = tuple(set(db_types_hinted[db_type] + tuple(hints)))
148 db_types_hinted[db_type] = updated_hintset
149 else:
150 db_types_hinted[db_type] = tuple(hints)
151
152 # all types get the "any" hint
153 all_db_types = known_db_types
154 hints_for_all_db_types = (hints.any,)
155 _add_to_db_type_hintsets(all_db_types, hints_for_all_db_types)
156
157 # string-like types get the "string_like" hint
158 string_like_db_types = (
159 PostgresType.CHARACTER_VARYING,
160 PostgresType.CHARACTER,
161 PostgresType.TEXT,
162 MathesarCustomType.URI,
163 MathesarCustomType.EMAIL,
164 )
165 hints_for_string_like_types = (hints.string_like,)
166 _add_to_db_type_hintsets(string_like_db_types, hints_for_string_like_types)
167
168 # numeric types get the "comparable" hint
169 numeric_db_types = (
170 PostgresType.BIGINT,
171 PostgresType.DECIMAL,
172 PostgresType.DOUBLE_PRECISION,
173 PostgresType.FLOAT,
174 PostgresType.INTEGER,
175 PostgresType.SMALLINT,
176 PostgresType.NUMERIC,
177 PostgresType.REAL,
178 )
179 hints_for_numeric_db_types = (hints.comparable,)
180 _add_to_db_type_hintsets(numeric_db_types, hints_for_numeric_db_types)
181
182 return frozendict(db_types_hinted)
183
184
185 db_types_hinted = _build_db_types_hinted()
186
187
188 SCHEMA = f"{constants.MATHESAR_PREFIX}types"
189 # Since we want to have our identifiers quoted appropriately for use in
190 # PostgreSQL, we want to use the postgres dialect preparer to set this up.
191 preparer = create_engine("postgresql://").dialect.identifier_preparer
192
193
194 # Should usually equal `mathesar_types`
195 _ma_type_qualifier_prefix = preparer.quote_schema(SCHEMA)
196
197
198 def get_qualified_name(name):
199 return ".".join([_ma_type_qualifier_prefix, name])
200
201
202 def get_available_types(engine):
203 """
204 Returns a dict where the keys are database type names defined on the database associated with
205 provided Engine, and the values are their SQLAlchemy classes.
206 """
207 return engine.dialect.ischema_names
208
209
210 def get_available_known_db_types(engine):
211 """
212 Returns database types that are both available on the database and known through our Enums
213 above.
214 """
215 available_db_types = get_available_types(engine)
216 return tuple(
217 known_db_type
218 for known_db_type in known_db_types
219 if known_db_type.value in available_db_types
220 )
221
222
223 def get_db_type_name(sa_type, engine):
224 try:
225 db_type = sa_type.compile(dialect=engine.dialect)
226 except TypeError:
227 db_type = sa_type().compile(dialect=engine.dialect)
228 return db_type
229
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/db/types/base.py b/db/types/base.py
--- a/db/types/base.py
+++ b/db/types/base.py
@@ -156,8 +156,10 @@
# string-like types get the "string_like" hint
string_like_db_types = (
- PostgresType.CHARACTER_VARYING,
+ PostgresType.CHAR,
PostgresType.CHARACTER,
+ PostgresType.CHARACTER_VARYING,
+ PostgresType.NAME,
PostgresType.TEXT,
MathesarCustomType.URI,
MathesarCustomType.EMAIL,
@@ -175,10 +177,25 @@
PostgresType.SMALLINT,
PostgresType.NUMERIC,
PostgresType.REAL,
+ PostgresType.MONEY,
)
hints_for_numeric_db_types = (hints.comparable,)
_add_to_db_type_hintsets(numeric_db_types, hints_for_numeric_db_types)
+ # time related types get the "comparable" hint
+ time_related_db_types = (
+ PostgresType.DATE,
+ PostgresType.TIME,
+ PostgresType.TIME_WITH_TIME_ZONE,
+ PostgresType.TIME_WITHOUT_TIME_ZONE,
+ PostgresType.TIMESTAMP,
+ PostgresType.TIMESTAMP_WITH_TIME_ZONE,
+ PostgresType.TIMESTAMP_WITHOUT_TIME_ZONE,
+ PostgresType.INTERVAL,
+ )
+ hints_for_time_related_types = (hints.comparable,)
+ _add_to_db_type_hintsets(time_related_db_types, hints_for_time_related_types)
+
return frozendict(db_types_hinted)
| {"golden_diff": "diff --git a/db/types/base.py b/db/types/base.py\n--- a/db/types/base.py\n+++ b/db/types/base.py\n@@ -156,8 +156,10 @@\n \n # string-like types get the \"string_like\" hint\n string_like_db_types = (\n- PostgresType.CHARACTER_VARYING,\n+ PostgresType.CHAR,\n PostgresType.CHARACTER,\n+ PostgresType.CHARACTER_VARYING,\n+ PostgresType.NAME,\n PostgresType.TEXT,\n MathesarCustomType.URI,\n MathesarCustomType.EMAIL,\n@@ -175,10 +177,25 @@\n PostgresType.SMALLINT,\n PostgresType.NUMERIC,\n PostgresType.REAL,\n+ PostgresType.MONEY,\n )\n hints_for_numeric_db_types = (hints.comparable,)\n _add_to_db_type_hintsets(numeric_db_types, hints_for_numeric_db_types)\n \n+ # time related types get the \"comparable\" hint\n+ time_related_db_types = (\n+ PostgresType.DATE,\n+ PostgresType.TIME,\n+ PostgresType.TIME_WITH_TIME_ZONE,\n+ PostgresType.TIME_WITHOUT_TIME_ZONE,\n+ PostgresType.TIMESTAMP,\n+ PostgresType.TIMESTAMP_WITH_TIME_ZONE,\n+ PostgresType.TIMESTAMP_WITHOUT_TIME_ZONE,\n+ PostgresType.INTERVAL,\n+ )\n+ hints_for_time_related_types = (hints.comparable,)\n+ _add_to_db_type_hintsets(time_related_db_types, hints_for_time_related_types)\n+\n return frozendict(db_types_hinted)\n", "issue": "Implement filtering options for date & time types\n## Problem\r\n- We need to ensure that records that include columns of all Date & Time types support the following filters via API: \r\n - between {x} and {y}\r\n - is {x}\r\n - is not {x}\r\n - before {x}\r\n - after {x}\r\n - on or before {x}\r\n - or or after {x}\r\n - is empty\r\n - is not empty\r\n- We need to also ensure that values of the filters don't have to be an exact date. We should accept natural language like \"next month\", or \"tomorrow\".\r\n - We could use https://dateparser.readthedocs.io/\r\n\r\nThis involves:\r\n- Implementing the filters in the backend\r\n- Updating the `/api/v0/databases/<id>/types/` endpoint to store available filters on this type\r\n - Filter information should include the number of parameters needing to be passed in (e.g. `between` needs 2 parameters, `is empty` needs 0)\r\n\r\n## Additional context\r\n- We're using our fork of `sqlalchemy-filters` to provide filtering. See: https://github.com/centerofci/sqlalchemy-filters.\r\n- This issue is blocked by implementation of Date & Time types: \r\n - #424 \r\n - #425 \r\n - #426\r\n- #557 provides some context on storing filters.\nImplement filtering options for duration type\n## Problem\r\n- We need to ensure that records that include columns of duration types support the following filters via API: \r\n - between {x} and {y}\r\n - equals {x}\r\n - does not equal {x}\r\n - greater than {x}\r\n - less than {x}\r\n - greater than or equals {x}\r\n - less than or equals {x}\r\n - is empty\r\n - is not empty\r\n- We need to also ensure that values of the filters don't have to be a number. We should accept natural language like \"an hour\", or \"2 days\".\r\n - We could use something like https://github.com/oleiade/durations or https://github.com/wroberts/pytimeparse\r\n\r\nThis involves:\r\n- Implementing the filters in the backend\r\n- Updating the `/api/v0/databases/<id>/types/` endpoint to store available filters on this type\r\n - Filter information should include the number of parameters needing to be passed in (e.g. `between` needs 2 parameters, `is empty` needs 0)\r\n\r\n## Additional context\r\n- These filters are the same as those for Number types. We should just reuse those.\r\n - The only additional thing we need to implement is converting natural language to numbers.\r\n - Relevant issue: #385 \r\n- We're using our fork of `sqlalchemy-filters` to provide filtering. See: https://github.com/centerofci/sqlalchemy-filters.\r\n- This issue is blocked by implementation of #430 \r\n- #557 provides some context on storing filters.\r\n\r\nMarking as blocked until #385 and #430 are complete.\n", "before_files": [{"content": "from enum import Enum\n\nfrom sqlalchemy import create_engine\n\nfrom db import constants\n\nfrom db.functions import hints\n\nfrom frozendict import frozendict\n\n\nCHAR = 'char'\nSTRING = 'string'\nVARCHAR = 'varchar'\n\n\nclass PostgresType(Enum):\n \"\"\"\n This only includes built-in Postgres types that SQLAlchemy supports.\n SQLAlchemy doesn't support XML. See zzzeek's comment on:\n https://stackoverflow.com/questions/16153512/using-postgresql-xml-data-type-with-sqlalchemy\n The values are keys returned by get_available_types.\n \"\"\"\n _ARRAY = '_array'\n BIGINT = 'bigint'\n BIT_VARYING = 'bit varying'\n BIT = 'bit'\n BOOLEAN = 'boolean'\n BYTEA = 'bytea'\n CHAR = '\"char\"'\n CHARACTER_VARYING = 'character varying'\n CHARACTER = 'character'\n CIDR = 'cidr'\n DATE = 'date'\n DATERANGE = 'daterange'\n DECIMAL = 'decimal'\n DOUBLE_PRECISION = 'double precision'\n FLOAT = 'float'\n HSTORE = 'hstore'\n INET = 'inet'\n INT4RANGE = 'int4range'\n INT8RANGE = 'int8range'\n INTEGER = 'integer'\n INTERVAL = 'interval'\n JSON = 'json'\n JSONB = 'jsonb'\n MACADDR = 'macaddr'\n MONEY = 'money'\n NAME = 'name'\n NUMERIC = 'numeric'\n NUMRANGE = 'numrange'\n OID = 'oid'\n REAL = 'real'\n REGCLASS = 'regclass'\n SMALLINT = 'smallint'\n TEXT = 'text'\n TIME = 'time'\n TIME_WITH_TIME_ZONE = 'time with time zone'\n TIME_WITHOUT_TIME_ZONE = 'time without time zone'\n TIMESTAMP = 'timestamp'\n TIMESTAMP_WITH_TIME_ZONE = 'timestamp with time zone'\n TIMESTAMP_WITHOUT_TIME_ZONE = 'timestamp without time zone'\n TSRANGE = 'tsrange'\n TSTZRANGE = 'tstzrange'\n TSVECTOR = 'tsvector'\n UUID = 'uuid'\n\n\nclass MathesarCustomType(Enum):\n \"\"\"\n This is a list of custom Mathesar DB types.\n Keys returned by get_available_types are of the format 'mathesar_types.VALUE'\n \"\"\"\n EMAIL = 'email'\n URI = 'uri'\n MATHESAR_MONEY = 'mathesar_money'\n\n\n_known_vanilla_db_types = tuple(postgres_type for postgres_type in PostgresType)\n\n\n_known_custom_db_types = tuple(mathesar_custom_type for mathesar_custom_type in MathesarCustomType)\n\n\n# Known database types are those that are defined on our PostgresType and MathesarCustomType Enums.\nknown_db_types = _known_vanilla_db_types + _known_custom_db_types\n\n\n# Origin: https://www.python.org/dev/peps/pep-0616/#id17\ndef _remove_prefix(self: str, prefix: str, /) -> str:\n \"\"\"\n This will remove the passed prefix, if it's there.\n Otherwise, it will return the string unchanged.\n \"\"\"\n if self.startswith(prefix):\n return self[len(prefix):]\n else:\n return self[:]\n\n\ndef get_db_type_enum_from_id(db_type_id):\n \"\"\"\n Gets an instance of either the PostgresType enum or the MathesarCustomType enum corresponding\n to the provided db_type_id. If the id doesn't correspond to any of the mentioned enums,\n returns None.\n \"\"\"\n try:\n return PostgresType(db_type_id)\n except ValueError:\n try:\n # Sometimes MA type identifiers are qualified like so: `mathesar_types.uri`.\n # We want to remove that prefix, when it's there, because MathesarCustomType\n # enum stores type ids without a qualifier (e.g. `uri`).\n possible_prefix = _ma_type_qualifier_prefix + '.'\n preprocessed_db_type_id = _remove_prefix(db_type_id, possible_prefix)\n return MathesarCustomType(preprocessed_db_type_id)\n except ValueError:\n return None\n\n\ndef _build_db_types_hinted():\n \"\"\"\n Builds up a map of db types to hintsets.\n \"\"\"\n # Start out by defining some hints manually.\n db_types_hinted = {\n PostgresType.BOOLEAN: tuple([\n hints.boolean\n ]),\n MathesarCustomType.URI: tuple([\n hints.uri\n ]),\n MathesarCustomType.EMAIL: tuple([\n hints.email\n ]),\n }\n\n # Then, start adding hints automatically.\n # This is for many-to-many relationships, i.e. adding multiple identical hintsets to the\n # hintsets of multiple db types.\n def _add_to_db_type_hintsets(db_types, hints):\n \"\"\"\n Mutates db_types_hinted to map every hint in `hints` to every DB type in `db_types`.\n \"\"\"\n for db_type in db_types:\n if db_type in db_types_hinted:\n updated_hintset = tuple(set(db_types_hinted[db_type] + tuple(hints)))\n db_types_hinted[db_type] = updated_hintset\n else:\n db_types_hinted[db_type] = tuple(hints)\n\n # all types get the \"any\" hint\n all_db_types = known_db_types\n hints_for_all_db_types = (hints.any,)\n _add_to_db_type_hintsets(all_db_types, hints_for_all_db_types)\n\n # string-like types get the \"string_like\" hint\n string_like_db_types = (\n PostgresType.CHARACTER_VARYING,\n PostgresType.CHARACTER,\n PostgresType.TEXT,\n MathesarCustomType.URI,\n MathesarCustomType.EMAIL,\n )\n hints_for_string_like_types = (hints.string_like,)\n _add_to_db_type_hintsets(string_like_db_types, hints_for_string_like_types)\n\n # numeric types get the \"comparable\" hint\n numeric_db_types = (\n PostgresType.BIGINT,\n PostgresType.DECIMAL,\n PostgresType.DOUBLE_PRECISION,\n PostgresType.FLOAT,\n PostgresType.INTEGER,\n PostgresType.SMALLINT,\n PostgresType.NUMERIC,\n PostgresType.REAL,\n )\n hints_for_numeric_db_types = (hints.comparable,)\n _add_to_db_type_hintsets(numeric_db_types, hints_for_numeric_db_types)\n\n return frozendict(db_types_hinted)\n\n\ndb_types_hinted = _build_db_types_hinted()\n\n\nSCHEMA = f\"{constants.MATHESAR_PREFIX}types\"\n# Since we want to have our identifiers quoted appropriately for use in\n# PostgreSQL, we want to use the postgres dialect preparer to set this up.\npreparer = create_engine(\"postgresql://\").dialect.identifier_preparer\n\n\n# Should usually equal `mathesar_types`\n_ma_type_qualifier_prefix = preparer.quote_schema(SCHEMA)\n\n\ndef get_qualified_name(name):\n return \".\".join([_ma_type_qualifier_prefix, name])\n\n\ndef get_available_types(engine):\n \"\"\"\n Returns a dict where the keys are database type names defined on the database associated with\n provided Engine, and the values are their SQLAlchemy classes.\n \"\"\"\n return engine.dialect.ischema_names\n\n\ndef get_available_known_db_types(engine):\n \"\"\"\n Returns database types that are both available on the database and known through our Enums\n above.\n \"\"\"\n available_db_types = get_available_types(engine)\n return tuple(\n known_db_type\n for known_db_type in known_db_types\n if known_db_type.value in available_db_types\n )\n\n\ndef get_db_type_name(sa_type, engine):\n try:\n db_type = sa_type.compile(dialect=engine.dialect)\n except TypeError:\n db_type = sa_type().compile(dialect=engine.dialect)\n return db_type\n", "path": "db/types/base.py"}], "after_files": [{"content": "from enum import Enum\n\nfrom sqlalchemy import create_engine\n\nfrom db import constants\n\nfrom db.functions import hints\n\nfrom frozendict import frozendict\n\n\nCHAR = 'char'\nSTRING = 'string'\nVARCHAR = 'varchar'\n\n\nclass PostgresType(Enum):\n \"\"\"\n This only includes built-in Postgres types that SQLAlchemy supports.\n SQLAlchemy doesn't support XML. See zzzeek's comment on:\n https://stackoverflow.com/questions/16153512/using-postgresql-xml-data-type-with-sqlalchemy\n The values are keys returned by get_available_types.\n \"\"\"\n _ARRAY = '_array'\n BIGINT = 'bigint'\n BIT_VARYING = 'bit varying'\n BIT = 'bit'\n BOOLEAN = 'boolean'\n BYTEA = 'bytea'\n CHAR = '\"char\"'\n CHARACTER_VARYING = 'character varying'\n CHARACTER = 'character'\n CIDR = 'cidr'\n DATE = 'date'\n DATERANGE = 'daterange'\n DECIMAL = 'decimal'\n DOUBLE_PRECISION = 'double precision'\n FLOAT = 'float'\n HSTORE = 'hstore'\n INET = 'inet'\n INT4RANGE = 'int4range'\n INT8RANGE = 'int8range'\n INTEGER = 'integer'\n INTERVAL = 'interval'\n JSON = 'json'\n JSONB = 'jsonb'\n MACADDR = 'macaddr'\n MONEY = 'money'\n NAME = 'name'\n NUMERIC = 'numeric'\n NUMRANGE = 'numrange'\n OID = 'oid'\n REAL = 'real'\n REGCLASS = 'regclass'\n SMALLINT = 'smallint'\n TEXT = 'text'\n TIME = 'time'\n TIME_WITH_TIME_ZONE = 'time with time zone'\n TIME_WITHOUT_TIME_ZONE = 'time without time zone'\n TIMESTAMP = 'timestamp'\n TIMESTAMP_WITH_TIME_ZONE = 'timestamp with time zone'\n TIMESTAMP_WITHOUT_TIME_ZONE = 'timestamp without time zone'\n TSRANGE = 'tsrange'\n TSTZRANGE = 'tstzrange'\n TSVECTOR = 'tsvector'\n UUID = 'uuid'\n\n\nclass MathesarCustomType(Enum):\n \"\"\"\n This is a list of custom Mathesar DB types.\n Keys returned by get_available_types are of the format 'mathesar_types.VALUE'\n \"\"\"\n EMAIL = 'email'\n URI = 'uri'\n MATHESAR_MONEY = 'mathesar_money'\n\n\n_known_vanilla_db_types = tuple(postgres_type for postgres_type in PostgresType)\n\n\n_known_custom_db_types = tuple(mathesar_custom_type for mathesar_custom_type in MathesarCustomType)\n\n\n# Known database types are those that are defined on our PostgresType and MathesarCustomType Enums.\nknown_db_types = _known_vanilla_db_types + _known_custom_db_types\n\n\n# Origin: https://www.python.org/dev/peps/pep-0616/#id17\ndef _remove_prefix(self: str, prefix: str, /) -> str:\n \"\"\"\n This will remove the passed prefix, if it's there.\n Otherwise, it will return the string unchanged.\n \"\"\"\n if self.startswith(prefix):\n return self[len(prefix):]\n else:\n return self[:]\n\n\ndef get_db_type_enum_from_id(db_type_id):\n \"\"\"\n Gets an instance of either the PostgresType enum or the MathesarCustomType enum corresponding\n to the provided db_type_id. If the id doesn't correspond to any of the mentioned enums,\n returns None.\n \"\"\"\n try:\n return PostgresType(db_type_id)\n except ValueError:\n try:\n # Sometimes MA type identifiers are qualified like so: `mathesar_types.uri`.\n # We want to remove that prefix, when it's there, because MathesarCustomType\n # enum stores type ids without a qualifier (e.g. `uri`).\n possible_prefix = _ma_type_qualifier_prefix + '.'\n preprocessed_db_type_id = _remove_prefix(db_type_id, possible_prefix)\n return MathesarCustomType(preprocessed_db_type_id)\n except ValueError:\n return None\n\n\ndef _build_db_types_hinted():\n \"\"\"\n Builds up a map of db types to hintsets.\n \"\"\"\n # Start out by defining some hints manually.\n db_types_hinted = {\n PostgresType.BOOLEAN: tuple([\n hints.boolean\n ]),\n MathesarCustomType.URI: tuple([\n hints.uri\n ]),\n MathesarCustomType.EMAIL: tuple([\n hints.email\n ]),\n }\n\n # Then, start adding hints automatically.\n # This is for many-to-many relationships, i.e. adding multiple identical hintsets to the\n # hintsets of multiple db types.\n def _add_to_db_type_hintsets(db_types, hints):\n \"\"\"\n Mutates db_types_hinted to map every hint in `hints` to every DB type in `db_types`.\n \"\"\"\n for db_type in db_types:\n if db_type in db_types_hinted:\n updated_hintset = tuple(set(db_types_hinted[db_type] + tuple(hints)))\n db_types_hinted[db_type] = updated_hintset\n else:\n db_types_hinted[db_type] = tuple(hints)\n\n # all types get the \"any\" hint\n all_db_types = known_db_types\n hints_for_all_db_types = (hints.any,)\n _add_to_db_type_hintsets(all_db_types, hints_for_all_db_types)\n\n # string-like types get the \"string_like\" hint\n string_like_db_types = (\n PostgresType.CHAR,\n PostgresType.CHARACTER,\n PostgresType.CHARACTER_VARYING,\n PostgresType.NAME,\n PostgresType.TEXT,\n MathesarCustomType.URI,\n MathesarCustomType.EMAIL,\n )\n hints_for_string_like_types = (hints.string_like,)\n _add_to_db_type_hintsets(string_like_db_types, hints_for_string_like_types)\n\n # numeric types get the \"comparable\" hint\n numeric_db_types = (\n PostgresType.BIGINT,\n PostgresType.DECIMAL,\n PostgresType.DOUBLE_PRECISION,\n PostgresType.FLOAT,\n PostgresType.INTEGER,\n PostgresType.SMALLINT,\n PostgresType.NUMERIC,\n PostgresType.REAL,\n PostgresType.MONEY,\n )\n hints_for_numeric_db_types = (hints.comparable,)\n _add_to_db_type_hintsets(numeric_db_types, hints_for_numeric_db_types)\n\n # time related types get the \"comparable\" hint\n time_related_db_types = (\n PostgresType.DATE,\n PostgresType.TIME,\n PostgresType.TIME_WITH_TIME_ZONE,\n PostgresType.TIME_WITHOUT_TIME_ZONE,\n PostgresType.TIMESTAMP,\n PostgresType.TIMESTAMP_WITH_TIME_ZONE,\n PostgresType.TIMESTAMP_WITHOUT_TIME_ZONE,\n PostgresType.INTERVAL,\n )\n hints_for_time_related_types = (hints.comparable,)\n _add_to_db_type_hintsets(time_related_db_types, hints_for_time_related_types)\n\n return frozendict(db_types_hinted)\n\n\ndb_types_hinted = _build_db_types_hinted()\n\n\nSCHEMA = f\"{constants.MATHESAR_PREFIX}types\"\n# Since we want to have our identifiers quoted appropriately for use in\n# PostgreSQL, we want to use the postgres dialect preparer to set this up.\npreparer = create_engine(\"postgresql://\").dialect.identifier_preparer\n\n\n# Should usually equal `mathesar_types`\n_ma_type_qualifier_prefix = preparer.quote_schema(SCHEMA)\n\n\ndef get_qualified_name(name):\n return \".\".join([_ma_type_qualifier_prefix, name])\n\n\ndef get_available_types(engine):\n \"\"\"\n Returns a dict where the keys are database type names defined on the database associated with\n provided Engine, and the values are their SQLAlchemy classes.\n \"\"\"\n return engine.dialect.ischema_names\n\n\ndef get_available_known_db_types(engine):\n \"\"\"\n Returns database types that are both available on the database and known through our Enums\n above.\n \"\"\"\n available_db_types = get_available_types(engine)\n return tuple(\n known_db_type\n for known_db_type in known_db_types\n if known_db_type.value in available_db_types\n )\n\n\ndef get_db_type_name(sa_type, engine):\n try:\n db_type = sa_type.compile(dialect=engine.dialect)\n except TypeError:\n db_type = sa_type().compile(dialect=engine.dialect)\n return db_type\n", "path": "db/types/base.py"}]} | 3,209 | 354 |
gh_patches_debug_7990 | rasdani/github-patches | git_diff | CiviWiki__OpenCiviWiki-1380 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
enable Rich terminal output
## Task
- [ ] add Rich to this project with the command `poetry add rich --group dev`
- [ ] follow the Rich [configuration instructions](https://rich.readthedocs.io/en/stable/introduction.html)
- [ ] add the following code to the `LOGGING = {...}` configuration in the project settings
```py
"formatters": {
"rich": {"datefmt": "[%X]"},
},
```
```py
"console": {
"class": "rich.logging.RichHandler",
"filters": ["require_debug_true"],
"formatter": "rich",
"level": "DEBUG",
"rich_tracebacks": True,
"tracebacks_show_locals": True,
},
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `project/core/settings.py`
Content:
```
1 """
2 Django settings for civiwiki project.
3 Darius Calliet May 12, 2016
4
5 Production settings file to select proper environment variables.
6 """
7 import os
8
9 # False if not in os.environ
10 DEBUG = os.getenv("DEBUG", False)
11
12 # defaults to second value if not found in os.environ
13 DJANGO_HOST = os.getenv("DJANGO_HOST", "LOCALHOST")
14
15 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
16 SECRET_KEY = os.getenv("DJANGO_SECRET_KEY", "TEST_KEY_FOR_DEVELOPMENT")
17 ALLOWED_HOSTS = [".herokuapp.com", ".civiwiki.org", "127.0.0.1", "localhost", "0.0.0.0"]
18
19 INSTALLED_APPS = (
20 "django.contrib.admin",
21 "django.contrib.auth",
22 "django.contrib.contenttypes",
23 "django.contrib.sessions",
24 "django.contrib.messages",
25 "django.contrib.staticfiles",
26 "django_extensions",
27 "storages",
28 "core",
29 "rest_framework",
30 "accounts.apps.AccountsConfig",
31 "threads",
32 "notifications",
33 "corsheaders",
34 "taggit",
35 "categories",
36 "notification",
37 )
38
39 MIDDLEWARE = [
40 "corsheaders.middleware.CorsMiddleware",
41 "django.middleware.security.SecurityMiddleware",
42 "whitenoise.middleware.WhiteNoiseMiddleware",
43 "django.contrib.sessions.middleware.SessionMiddleware",
44 "django.middleware.common.CommonMiddleware",
45 "django.middleware.csrf.CsrfViewMiddleware",
46 "django.contrib.auth.middleware.AuthenticationMiddleware",
47 # 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
48 "django.contrib.messages.middleware.MessageMiddleware",
49 "django.middleware.clickjacking.XFrameOptionsMiddleware",
50 ]
51
52 CSRF_USE_SESSIONS = (
53 True # Store the CSRF token in the users session instead of in a cookie
54 )
55
56 CORS_ORIGIN_ALLOW_ALL = True
57 ROOT_URLCONF = "core.urls"
58
59 # SSL Setup
60 if DJANGO_HOST != "LOCALHOST":
61 SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
62 SECURE_SSL_REDIRECT = True
63 SESSION_COOKIE_SECURE = True
64 CSRF_COOKIE_SECURE = True
65
66 # Internationalization & Localization
67 LANGUAGE_CODE = "en-us"
68 TIME_ZONE = "UTC"
69 USE_I18N = True
70 USE_L10N = True
71 USE_TZ = True
72
73 TEMPLATES = [
74 {
75 "BACKEND": "django.template.backends.django.DjangoTemplates",
76 "DIRS": [
77 os.path.join(BASE_DIR, "threads/templates/threads"),
78 os.path.join(BASE_DIR, "accounts/templates/accounts"),
79 ], # TODO: Add non-webapp template directory
80 "APP_DIRS": True,
81 "OPTIONS": {
82 "context_processors": [
83 "django.template.context_processors.debug",
84 "django.template.context_processors.request",
85 "django.contrib.auth.context_processors.auth",
86 "django.contrib.messages.context_processors.messages",
87 ],
88 },
89 },
90 ]
91
92 WSGI_APPLICATION = "core.wsgi.application"
93
94 # Apex Contact for Production Errors
95 ADMINS = [("Development Team", "[email protected]")]
96
97 STATIC_URL = "/static/"
98 STATICFILES_DIRS = (os.path.join(BASE_DIR, "core/templates/static"),)
99 STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
100
101 MEDIA_ROOT = os.path.join(BASE_DIR, "media")
102 MEDIA_URL = "/media/"
103
104 # TODO: re-organize and simplify staticfiles settings
105 if "CIVIWIKI_LOCAL_NAME" not in os.environ:
106 STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
107
108 # Use DATABASE_URL in production
109 DATABASE_URL = os.getenv("DATABASE_URL")
110
111 if DATABASE_URL is not None:
112 DATABASES = {"default": DATABASE_URL}
113 else:
114 # Default to sqlite for simplicity in development
115 DATABASES = {
116 "default": {
117 "ENGINE": "django.db.backends.sqlite3",
118 "NAME": BASE_DIR + "/" + "db.sqlite3",
119 }
120 }
121
122 # Email Backend Setup
123 if "EMAIL_HOST" not in os.environ:
124 EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
125 EMAIL_HOST_USER = "[email protected]"
126 else:
127 EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
128 EMAIL_HOST = os.getenv("EMAIL_HOST")
129 EMAIL_PORT = os.getenv("EMAIL_PORT")
130 EMAIL_HOST_USER = os.getenv("EMAIL_HOST_USER")
131 EMAIL_HOST_PASSWORD = os.getenv("EMAIL_HOST_PASSWORD")
132 EMAIL_USE_SSL = True
133 DEFAULT_FROM_EMAIL = EMAIL_HOST
134
135 # Notification API Settings
136 NOTIFICATIONS_SOFT_DELETE = True
137 NOTIFICATIONS_USE_JSONFIELD = True
138
139 # Django REST API Settings
140 DEFAULT_RENDERER_CLASSES = ("rest_framework.renderers.JSONRenderer",)
141
142 if DEBUG:
143 # Browsable HTML - Enabled only in Debug mode (dev)
144 DEFAULT_RENDERER_CLASSES = DEFAULT_RENDERER_CLASSES + (
145 "rest_framework.renderers.BrowsableAPIRenderer",
146 )
147
148 REST_FRAMEWORK = {
149 "DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.IsAuthenticated",),
150 "DEFAULT_RENDERER_CLASSES": DEFAULT_RENDERER_CLASSES,
151 "DEFAULT_AUTHENTICATION_CLASSES": (
152 "rest_framework.authentication.BasicAuthentication",
153 "rest_framework.authentication.SessionAuthentication",
154 ),
155 }
156
157 # CORS Settings
158 CORS_ORIGIN_ALLOW_ALL = True
159
160 # Custom User model
161 AUTH_USER_MODEL = "accounts.User"
162
163 DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
164
165 # Login Logout URLS
166 LOGIN_URL = "login/"
167 LOGIN_REDIRECT_URL = "/"
168 LOGOUT_REDIRECT_URL = "/"
169
170 AUTH_PASSWORD_VALIDATORS = [
171 {
172 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator", # noqa: E501
173 },
174 {
175 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
176 "OPTIONS": {
177 "min_length": 4,
178 },
179 },
180 {
181 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
182 },
183 {
184 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
185 },
186 ]
187
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/project/core/settings.py b/project/core/settings.py
--- a/project/core/settings.py
+++ b/project/core/settings.py
@@ -184,3 +184,20 @@
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
+
+LOGGING = {
+ "version": 1,
+ "disable_existing_loggers": False,
+ "formatters": {"rich": {"datefmt": "[%X]"}},
+ "handlers": {
+ "console": {
+ "class": "rich.logging.RichHandler",
+ "formatter": "rich",
+ "level": "WARNING",
+ # "filters": ["require_debug_true"],
+ "rich_tracebacks": True,
+ "tracebacks_show_locals": True,
+ }
+ },
+ "loggers": {"django": {"handlers": ["console"]}},
+}
| {"golden_diff": "diff --git a/project/core/settings.py b/project/core/settings.py\n--- a/project/core/settings.py\n+++ b/project/core/settings.py\n@@ -184,3 +184,20 @@\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n ]\n+\n+LOGGING = {\n+ \"version\": 1,\n+ \"disable_existing_loggers\": False,\n+ \"formatters\": {\"rich\": {\"datefmt\": \"[%X]\"}},\n+ \"handlers\": {\n+ \"console\": {\n+ \"class\": \"rich.logging.RichHandler\",\n+ \"formatter\": \"rich\",\n+ \"level\": \"WARNING\",\n+ # \"filters\": [\"require_debug_true\"],\n+ \"rich_tracebacks\": True,\n+ \"tracebacks_show_locals\": True,\n+ }\n+ },\n+ \"loggers\": {\"django\": {\"handlers\": [\"console\"]}},\n+}\n", "issue": "enable Rich terminal output\n## Task\r\n\r\n- [ ] add Rich to this project with the command `poetry add rich --group dev`\r\n- [ ] follow the Rich [configuration instructions](https://rich.readthedocs.io/en/stable/introduction.html)\r\n- [ ] add the following code to the `LOGGING = {...}` configuration in the project settings\r\n\r\n```py\r\n\"formatters\": {\r\n \"rich\": {\"datefmt\": \"[%X]\"},\r\n},\r\n```\r\n\r\n```py\r\n\"console\": {\r\n \"class\": \"rich.logging.RichHandler\",\r\n \"filters\": [\"require_debug_true\"],\r\n \"formatter\": \"rich\",\r\n \"level\": \"DEBUG\",\r\n \"rich_tracebacks\": True,\r\n \"tracebacks_show_locals\": True,\r\n},\r\n```\n", "before_files": [{"content": "\"\"\"\nDjango settings for civiwiki project.\nDarius Calliet May 12, 2016\n\nProduction settings file to select proper environment variables.\n\"\"\"\nimport os\n\n# False if not in os.environ\nDEBUG = os.getenv(\"DEBUG\", False)\n\n# defaults to second value if not found in os.environ\nDJANGO_HOST = os.getenv(\"DJANGO_HOST\", \"LOCALHOST\")\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nSECRET_KEY = os.getenv(\"DJANGO_SECRET_KEY\", \"TEST_KEY_FOR_DEVELOPMENT\")\nALLOWED_HOSTS = [\".herokuapp.com\", \".civiwiki.org\", \"127.0.0.1\", \"localhost\", \"0.0.0.0\"]\n\nINSTALLED_APPS = (\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django_extensions\",\n \"storages\",\n \"core\",\n \"rest_framework\",\n \"accounts.apps.AccountsConfig\",\n \"threads\",\n \"notifications\",\n \"corsheaders\",\n \"taggit\",\n \"categories\",\n \"notification\",\n)\n\nMIDDLEWARE = [\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n # 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nCSRF_USE_SESSIONS = (\n True # Store the CSRF token in the users session instead of in a cookie\n)\n\nCORS_ORIGIN_ALLOW_ALL = True\nROOT_URLCONF = \"core.urls\"\n\n# SSL Setup\nif DJANGO_HOST != \"LOCALHOST\":\n SECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n SECURE_SSL_REDIRECT = True\n SESSION_COOKIE_SECURE = True\n CSRF_COOKIE_SECURE = True\n\n# Internationalization & Localization\nLANGUAGE_CODE = \"en-us\"\nTIME_ZONE = \"UTC\"\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\n os.path.join(BASE_DIR, \"threads/templates/threads\"),\n os.path.join(BASE_DIR, \"accounts/templates/accounts\"),\n ], # TODO: Add non-webapp template directory\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"core.wsgi.application\"\n\n# Apex Contact for Production Errors\nADMINS = [(\"Development Team\", \"[email protected]\")]\n\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = (os.path.join(BASE_DIR, \"core/templates/static\"),)\nSTATIC_ROOT = os.path.join(BASE_DIR, \"staticfiles\")\n\nMEDIA_ROOT = os.path.join(BASE_DIR, \"media\")\nMEDIA_URL = \"/media/\"\n\n# TODO: re-organize and simplify staticfiles settings\nif \"CIVIWIKI_LOCAL_NAME\" not in os.environ:\n STATICFILES_STORAGE = \"whitenoise.storage.CompressedManifestStaticFilesStorage\"\n\n# Use DATABASE_URL in production\nDATABASE_URL = os.getenv(\"DATABASE_URL\")\n\nif DATABASE_URL is not None:\n DATABASES = {\"default\": DATABASE_URL}\nelse:\n # Default to sqlite for simplicity in development\n DATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": BASE_DIR + \"/\" + \"db.sqlite3\",\n }\n }\n\n# Email Backend Setup\nif \"EMAIL_HOST\" not in os.environ:\n EMAIL_BACKEND = \"django.core.mail.backends.console.EmailBackend\"\n EMAIL_HOST_USER = \"[email protected]\"\nelse:\n EMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\n EMAIL_HOST = os.getenv(\"EMAIL_HOST\")\n EMAIL_PORT = os.getenv(\"EMAIL_PORT\")\n EMAIL_HOST_USER = os.getenv(\"EMAIL_HOST_USER\")\n EMAIL_HOST_PASSWORD = os.getenv(\"EMAIL_HOST_PASSWORD\")\n EMAIL_USE_SSL = True\n DEFAULT_FROM_EMAIL = EMAIL_HOST\n\n# Notification API Settings\nNOTIFICATIONS_SOFT_DELETE = True\nNOTIFICATIONS_USE_JSONFIELD = True\n\n# Django REST API Settings\nDEFAULT_RENDERER_CLASSES = (\"rest_framework.renderers.JSONRenderer\",)\n\nif DEBUG:\n # Browsable HTML - Enabled only in Debug mode (dev)\n DEFAULT_RENDERER_CLASSES = DEFAULT_RENDERER_CLASSES + (\n \"rest_framework.renderers.BrowsableAPIRenderer\",\n )\n\nREST_FRAMEWORK = {\n \"DEFAULT_PERMISSION_CLASSES\": (\"rest_framework.permissions.IsAuthenticated\",),\n \"DEFAULT_RENDERER_CLASSES\": DEFAULT_RENDERER_CLASSES,\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"rest_framework.authentication.BasicAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n ),\n}\n\n# CORS Settings\nCORS_ORIGIN_ALLOW_ALL = True\n\n# Custom User model\nAUTH_USER_MODEL = \"accounts.User\"\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\n# Login Logout URLS\nLOGIN_URL = \"login/\"\nLOGIN_REDIRECT_URL = \"/\"\nLOGOUT_REDIRECT_URL = \"/\"\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\", # noqa: E501\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n \"OPTIONS\": {\n \"min_length\": 4,\n },\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n", "path": "project/core/settings.py"}], "after_files": [{"content": "\"\"\"\nDjango settings for civiwiki project.\nDarius Calliet May 12, 2016\n\nProduction settings file to select proper environment variables.\n\"\"\"\nimport os\n\n# False if not in os.environ\nDEBUG = os.getenv(\"DEBUG\", False)\n\n# defaults to second value if not found in os.environ\nDJANGO_HOST = os.getenv(\"DJANGO_HOST\", \"LOCALHOST\")\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nSECRET_KEY = os.getenv(\"DJANGO_SECRET_KEY\", \"TEST_KEY_FOR_DEVELOPMENT\")\nALLOWED_HOSTS = [\".herokuapp.com\", \".civiwiki.org\", \"127.0.0.1\", \"localhost\", \"0.0.0.0\"]\n\nINSTALLED_APPS = (\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django_extensions\",\n \"storages\",\n \"core\",\n \"rest_framework\",\n \"accounts.apps.AccountsConfig\",\n \"threads\",\n \"notifications\",\n \"corsheaders\",\n \"taggit\",\n \"categories\",\n \"notification\",\n)\n\nMIDDLEWARE = [\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n # 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nCSRF_USE_SESSIONS = (\n True # Store the CSRF token in the users session instead of in a cookie\n)\n\nCORS_ORIGIN_ALLOW_ALL = True\nROOT_URLCONF = \"core.urls\"\n\n# SSL Setup\nif DJANGO_HOST != \"LOCALHOST\":\n SECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n SECURE_SSL_REDIRECT = True\n SESSION_COOKIE_SECURE = True\n CSRF_COOKIE_SECURE = True\n\n# Internationalization & Localization\nLANGUAGE_CODE = \"en-us\"\nTIME_ZONE = \"UTC\"\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\n os.path.join(BASE_DIR, \"threads/templates/threads\"),\n os.path.join(BASE_DIR, \"accounts/templates/accounts\"),\n ], # TODO: Add non-webapp template directory\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"core.wsgi.application\"\n\n# Apex Contact for Production Errors\nADMINS = [(\"Development Team\", \"[email protected]\")]\n\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = (os.path.join(BASE_DIR, \"core/templates/static\"),)\nSTATIC_ROOT = os.path.join(BASE_DIR, \"staticfiles\")\n\nMEDIA_ROOT = os.path.join(BASE_DIR, \"media\")\nMEDIA_URL = \"/media/\"\n\n# TODO: re-organize and simplify staticfiles settings\nif \"CIVIWIKI_LOCAL_NAME\" not in os.environ:\n STATICFILES_STORAGE = \"whitenoise.storage.CompressedManifestStaticFilesStorage\"\n\n# Use DATABASE_URL in production\nDATABASE_URL = os.getenv(\"DATABASE_URL\")\n\nif DATABASE_URL is not None:\n DATABASES = {\"default\": DATABASE_URL}\nelse:\n # Default to sqlite for simplicity in development\n DATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": BASE_DIR + \"/\" + \"db.sqlite3\",\n }\n }\n\n# Email Backend Setup\nif \"EMAIL_HOST\" not in os.environ:\n EMAIL_BACKEND = \"django.core.mail.backends.console.EmailBackend\"\n EMAIL_HOST_USER = \"[email protected]\"\nelse:\n EMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\n EMAIL_HOST = os.getenv(\"EMAIL_HOST\")\n EMAIL_PORT = os.getenv(\"EMAIL_PORT\")\n EMAIL_HOST_USER = os.getenv(\"EMAIL_HOST_USER\")\n EMAIL_HOST_PASSWORD = os.getenv(\"EMAIL_HOST_PASSWORD\")\n EMAIL_USE_SSL = True\n DEFAULT_FROM_EMAIL = EMAIL_HOST\n\n# Notification API Settings\nNOTIFICATIONS_SOFT_DELETE = True\nNOTIFICATIONS_USE_JSONFIELD = True\n\n# Django REST API Settings\nDEFAULT_RENDERER_CLASSES = (\"rest_framework.renderers.JSONRenderer\",)\n\nif DEBUG:\n # Browsable HTML - Enabled only in Debug mode (dev)\n DEFAULT_RENDERER_CLASSES = DEFAULT_RENDERER_CLASSES + (\n \"rest_framework.renderers.BrowsableAPIRenderer\",\n )\n\nREST_FRAMEWORK = {\n \"DEFAULT_PERMISSION_CLASSES\": (\"rest_framework.permissions.IsAuthenticated\",),\n \"DEFAULT_RENDERER_CLASSES\": DEFAULT_RENDERER_CLASSES,\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"rest_framework.authentication.BasicAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n ),\n}\n\n# CORS Settings\nCORS_ORIGIN_ALLOW_ALL = True\n\n# Custom User model\nAUTH_USER_MODEL = \"accounts.User\"\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\n# Login Logout URLS\nLOGIN_URL = \"login/\"\nLOGIN_REDIRECT_URL = \"/\"\nLOGOUT_REDIRECT_URL = \"/\"\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\", # noqa: E501\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n \"OPTIONS\": {\n \"min_length\": 4,\n },\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\"rich\": {\"datefmt\": \"[%X]\"}},\n \"handlers\": {\n \"console\": {\n \"class\": \"rich.logging.RichHandler\",\n \"formatter\": \"rich\",\n \"level\": \"WARNING\",\n # \"filters\": [\"require_debug_true\"],\n \"rich_tracebacks\": True,\n \"tracebacks_show_locals\": True,\n }\n },\n \"loggers\": {\"django\": {\"handlers\": [\"console\"]}},\n}\n", "path": "project/core/settings.py"}]} | 2,139 | 196 |
gh_patches_debug_54184 | rasdani/github-patches | git_diff | pyro-ppl__pyro-2846 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[bug] Runtime error during SVI inference when using poutine.do()
### Issue Description
Setting: a simple model with 2 latent Gaussians z1 and z2, giving rise to x ~ N( z1+z2, I).
In this setting p(z2 | x, z1) should be the same as p(z2 | x, do(z1)).
I wanted to check whether the current Pyro interface reflects this and it seems it does not.
My initial thought is that there is a difference in how .do() and .condition() broadcast the constants across the plate context.
### Environment
- OS and python version: MacOS 10.14.6, Python: 3.8.6
- PyTorch version: 1.9.0.dev20210502 (nightly version)
- Pyro version: 1.6.0.
### Code Snippet
Replication code:
https://pastebin.com/Ki2PYX7z
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyro/poutine/do_messenger.py`
Content:
```
1 # Copyright (c) 2017-2019 Uber Technologies, Inc.
2 # SPDX-License-Identifier: Apache-2.0
3
4 import numbers
5 import warnings
6
7 import torch
8
9 from .messenger import Messenger
10 from .runtime import apply_stack
11
12
13 class DoMessenger(Messenger):
14 """
15 Given a stochastic function with some sample statements
16 and a dictionary of values at names,
17 set the return values of those sites equal to the values
18 as if they were hard-coded to those values
19 and introduce fresh sample sites with the same names
20 whose values do not propagate.
21
22 Composes freely with :func:`~pyro.poutine.handlers.condition`
23 to represent counterfactual distributions over potential outcomes.
24 See Single World Intervention Graphs [1] for additional details and theory.
25
26 Consider the following Pyro program:
27
28 >>> def model(x):
29 ... s = pyro.param("s", torch.tensor(0.5))
30 ... z = pyro.sample("z", dist.Normal(x, s))
31 ... return z ** 2
32
33 To intervene with a value for site `z`, we can write
34
35 >>> intervened_model = pyro.poutine.do(model, data={"z": torch.tensor(1.)})
36
37 This is equivalent to replacing `z = pyro.sample("z", ...)` with
38 `z = torch.tensor(1.)`
39 and introducing a fresh sample site pyro.sample("z", ...) whose value is not used elsewhere.
40
41 References
42
43 [1] `Single World Intervention Graphs: A Primer`,
44 Thomas Richardson, James Robins
45
46 :param fn: a stochastic function (callable containing Pyro primitive calls)
47 :param data: a ``dict`` mapping sample site names to interventions
48 :returns: stochastic function decorated with a :class:`~pyro.poutine.do_messenger.DoMessenger`
49 """
50 def __init__(self, data):
51 super().__init__()
52 self.data = data
53 self._intervener_id = str(id(self))
54
55 def _pyro_sample(self, msg):
56 if msg.get('_intervener_id', None) != self._intervener_id and \
57 self.data.get(msg['name']) is not None:
58
59 if msg.get('_intervener_id', None) is not None:
60 warnings.warn(
61 "Attempting to intervene on variable {} multiple times,"
62 "this is almost certainly incorrect behavior".format(msg['name']),
63 RuntimeWarning)
64
65 msg['_intervener_id'] = self._intervener_id
66
67 # split node, avoid reapplying self recursively to new node
68 new_msg = msg.copy()
69 apply_stack(new_msg)
70
71 # apply intervention
72 intervention = self.data[msg['name']]
73 msg['name'] = msg['name'] + "__CF" # mangle old name
74
75 if isinstance(intervention, (numbers.Number, torch.Tensor)):
76 msg['value'] = intervention
77 msg['is_observed'] = True
78 msg['stop'] = True
79 else:
80 raise NotImplementedError(
81 "Interventions of type {} not implemented (yet)".format(type(intervention)))
82
83 return None
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pyro/poutine/do_messenger.py b/pyro/poutine/do_messenger.py
--- a/pyro/poutine/do_messenger.py
+++ b/pyro/poutine/do_messenger.py
@@ -66,6 +66,7 @@
# split node, avoid reapplying self recursively to new node
new_msg = msg.copy()
+ new_msg["cond_indep_stack"] = () # avoid entering plates twice
apply_stack(new_msg)
# apply intervention
| {"golden_diff": "diff --git a/pyro/poutine/do_messenger.py b/pyro/poutine/do_messenger.py\n--- a/pyro/poutine/do_messenger.py\n+++ b/pyro/poutine/do_messenger.py\n@@ -66,6 +66,7 @@\n \n # split node, avoid reapplying self recursively to new node\n new_msg = msg.copy()\n+ new_msg[\"cond_indep_stack\"] = () # avoid entering plates twice\n apply_stack(new_msg)\n \n # apply intervention\n", "issue": "[bug] Runtime error during SVI inference when using poutine.do()\n### Issue Description\r\n\r\nSetting: a simple model with 2 latent Gaussians z1 and z2, giving rise to x ~ N( z1+z2, I).\r\n\r\nIn this setting p(z2 | x, z1) should be the same as p(z2 | x, do(z1)). \r\n\r\nI wanted to check whether the current Pyro interface reflects this and it seems it does not.\r\n\r\nMy initial thought is that there is a difference in how .do() and .condition() broadcast the constants across the plate context.\r\n\r\n### Environment\r\n\r\n - OS and python version: MacOS 10.14.6, Python: 3.8.6\r\n - PyTorch version: 1.9.0.dev20210502 (nightly version)\r\n - Pyro version: 1.6.0.\r\n\r\n### Code Snippet\r\n\r\nReplication code:\r\nhttps://pastebin.com/Ki2PYX7z\r\n\n", "before_files": [{"content": "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport numbers\nimport warnings\n\nimport torch\n\nfrom .messenger import Messenger\nfrom .runtime import apply_stack\n\n\nclass DoMessenger(Messenger):\n \"\"\"\n Given a stochastic function with some sample statements\n and a dictionary of values at names,\n set the return values of those sites equal to the values\n as if they were hard-coded to those values\n and introduce fresh sample sites with the same names\n whose values do not propagate.\n\n Composes freely with :func:`~pyro.poutine.handlers.condition`\n to represent counterfactual distributions over potential outcomes.\n See Single World Intervention Graphs [1] for additional details and theory.\n\n Consider the following Pyro program:\n\n >>> def model(x):\n ... s = pyro.param(\"s\", torch.tensor(0.5))\n ... z = pyro.sample(\"z\", dist.Normal(x, s))\n ... return z ** 2\n\n To intervene with a value for site `z`, we can write\n\n >>> intervened_model = pyro.poutine.do(model, data={\"z\": torch.tensor(1.)})\n\n This is equivalent to replacing `z = pyro.sample(\"z\", ...)` with\n `z = torch.tensor(1.)`\n and introducing a fresh sample site pyro.sample(\"z\", ...) whose value is not used elsewhere.\n\n References\n\n [1] `Single World Intervention Graphs: A Primer`,\n Thomas Richardson, James Robins\n\n :param fn: a stochastic function (callable containing Pyro primitive calls)\n :param data: a ``dict`` mapping sample site names to interventions\n :returns: stochastic function decorated with a :class:`~pyro.poutine.do_messenger.DoMessenger`\n \"\"\"\n def __init__(self, data):\n super().__init__()\n self.data = data\n self._intervener_id = str(id(self))\n\n def _pyro_sample(self, msg):\n if msg.get('_intervener_id', None) != self._intervener_id and \\\n self.data.get(msg['name']) is not None:\n\n if msg.get('_intervener_id', None) is not None:\n warnings.warn(\n \"Attempting to intervene on variable {} multiple times,\"\n \"this is almost certainly incorrect behavior\".format(msg['name']),\n RuntimeWarning)\n\n msg['_intervener_id'] = self._intervener_id\n\n # split node, avoid reapplying self recursively to new node\n new_msg = msg.copy()\n apply_stack(new_msg)\n\n # apply intervention\n intervention = self.data[msg['name']]\n msg['name'] = msg['name'] + \"__CF\" # mangle old name\n\n if isinstance(intervention, (numbers.Number, torch.Tensor)):\n msg['value'] = intervention\n msg['is_observed'] = True\n msg['stop'] = True\n else:\n raise NotImplementedError(\n \"Interventions of type {} not implemented (yet)\".format(type(intervention)))\n\n return None\n", "path": "pyro/poutine/do_messenger.py"}], "after_files": [{"content": "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport numbers\nimport warnings\n\nimport torch\n\nfrom .messenger import Messenger\nfrom .runtime import apply_stack\n\n\nclass DoMessenger(Messenger):\n \"\"\"\n Given a stochastic function with some sample statements\n and a dictionary of values at names,\n set the return values of those sites equal to the values\n as if they were hard-coded to those values\n and introduce fresh sample sites with the same names\n whose values do not propagate.\n\n Composes freely with :func:`~pyro.poutine.handlers.condition`\n to represent counterfactual distributions over potential outcomes.\n See Single World Intervention Graphs [1] for additional details and theory.\n\n Consider the following Pyro program:\n\n >>> def model(x):\n ... s = pyro.param(\"s\", torch.tensor(0.5))\n ... z = pyro.sample(\"z\", dist.Normal(x, s))\n ... return z ** 2\n\n To intervene with a value for site `z`, we can write\n\n >>> intervened_model = pyro.poutine.do(model, data={\"z\": torch.tensor(1.)})\n\n This is equivalent to replacing `z = pyro.sample(\"z\", ...)` with\n `z = torch.tensor(1.)`\n and introducing a fresh sample site pyro.sample(\"z\", ...) whose value is not used elsewhere.\n\n References\n\n [1] `Single World Intervention Graphs: A Primer`,\n Thomas Richardson, James Robins\n\n :param fn: a stochastic function (callable containing Pyro primitive calls)\n :param data: a ``dict`` mapping sample site names to interventions\n :returns: stochastic function decorated with a :class:`~pyro.poutine.do_messenger.DoMessenger`\n \"\"\"\n def __init__(self, data):\n super().__init__()\n self.data = data\n self._intervener_id = str(id(self))\n\n def _pyro_sample(self, msg):\n if msg.get('_intervener_id', None) != self._intervener_id and \\\n self.data.get(msg['name']) is not None:\n\n if msg.get('_intervener_id', None) is not None:\n warnings.warn(\n \"Attempting to intervene on variable {} multiple times,\"\n \"this is almost certainly incorrect behavior\".format(msg['name']),\n RuntimeWarning)\n\n msg['_intervener_id'] = self._intervener_id\n\n # split node, avoid reapplying self recursively to new node\n new_msg = msg.copy()\n new_msg[\"cond_indep_stack\"] = () # avoid entering plates twice\n apply_stack(new_msg)\n\n # apply intervention\n intervention = self.data[msg['name']]\n msg['name'] = msg['name'] + \"__CF\" # mangle old name\n\n if isinstance(intervention, (numbers.Number, torch.Tensor)):\n msg['value'] = intervention\n msg['is_observed'] = True\n msg['stop'] = True\n else:\n raise NotImplementedError(\n \"Interventions of type {} not implemented (yet)\".format(type(intervention)))\n\n return None\n", "path": "pyro/poutine/do_messenger.py"}]} | 1,317 | 110 |
gh_patches_debug_1515 | rasdani/github-patches | git_diff | docker__docker-py-832 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
decode_auth function does not handle utf-8 logins or password
HI
I have found that the function **decode_auth** (line 96, [file](https://github.com/docker/docker-py/blob/master/docker/auth/auth.py)) fails when decoding UTF-8 passwords from the .dockercfg file, and **load_config** returning an empty config.
I have checked and docker hub can handle UTF-8 passwords, this code proves that:
``` python
# coding=utf-8
from docker import Client
cred = { 'username': <user>, 'password': <utf-8 password> }
c = Client(base_url='unix://var/run/docker.sock')
res = c.pull(repository='<private container>', tag='latest', auth_config=cred)
print(res)
```
Thank you
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/auth/auth.py`
Content:
```
1 # Copyright 2013 dotCloud inc.
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import base64
16 import json
17 import logging
18 import os
19 import warnings
20
21 import six
22
23 from .. import constants
24 from .. import errors
25
26 INDEX_NAME = 'index.docker.io'
27 INDEX_URL = 'https://{0}/v1/'.format(INDEX_NAME)
28 DOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')
29 LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'
30
31 log = logging.getLogger(__name__)
32
33
34 def resolve_repository_name(repo_name, insecure=False):
35 if insecure:
36 warnings.warn(
37 constants.INSECURE_REGISTRY_DEPRECATION_WARNING.format(
38 'resolve_repository_name()'
39 ), DeprecationWarning
40 )
41
42 if '://' in repo_name:
43 raise errors.InvalidRepository(
44 'Repository name cannot contain a scheme ({0})'.format(repo_name))
45 parts = repo_name.split('/', 1)
46 if '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost':
47 # This is a docker index repo (ex: foo/bar or ubuntu)
48 return INDEX_NAME, repo_name
49 if len(parts) < 2:
50 raise errors.InvalidRepository(
51 'Invalid repository name ({0})'.format(repo_name))
52
53 if 'index.docker.io' in parts[0]:
54 raise errors.InvalidRepository(
55 'Invalid repository name, try "{0}" instead'.format(parts[1])
56 )
57
58 return parts[0], parts[1]
59
60
61 def resolve_authconfig(authconfig, registry=None):
62 """
63 Returns the authentication data from the given auth configuration for a
64 specific registry. As with the Docker client, legacy entries in the config
65 with full URLs are stripped down to hostnames before checking for a match.
66 Returns None if no match was found.
67 """
68 # Default to the public index server
69 registry = convert_to_hostname(registry) if registry else INDEX_NAME
70 log.debug("Looking for auth entry for {0}".format(repr(registry)))
71
72 if registry in authconfig:
73 log.debug("Found {0}".format(repr(registry)))
74 return authconfig[registry]
75
76 for key, config in six.iteritems(authconfig):
77 if convert_to_hostname(key) == registry:
78 log.debug("Found {0}".format(repr(key)))
79 return config
80
81 log.debug("No entry found")
82 return None
83
84
85 def convert_to_hostname(url):
86 return url.replace('http://', '').replace('https://', '').split('/', 1)[0]
87
88
89 def encode_auth(auth_info):
90 return base64.b64encode(auth_info.get('username', '') + b':' +
91 auth_info.get('password', ''))
92
93
94 def decode_auth(auth):
95 if isinstance(auth, six.string_types):
96 auth = auth.encode('ascii')
97 s = base64.b64decode(auth)
98 login, pwd = s.split(b':', 1)
99 return login.decode('ascii'), pwd.decode('ascii')
100
101
102 def encode_header(auth):
103 auth_json = json.dumps(auth).encode('ascii')
104 return base64.urlsafe_b64encode(auth_json)
105
106
107 def parse_auth(entries):
108 """
109 Parses authentication entries
110
111 Args:
112 entries: Dict of authentication entries.
113
114 Returns:
115 Authentication registry.
116 """
117
118 conf = {}
119 for registry, entry in six.iteritems(entries):
120 username, password = decode_auth(entry['auth'])
121 log.debug(
122 'Found entry (registry={0}, username={1})'
123 .format(repr(registry), repr(username))
124 )
125 conf[registry] = {
126 'username': username,
127 'password': password,
128 'email': entry['email'],
129 'serveraddress': registry,
130 }
131 return conf
132
133
134 def find_config_file(config_path=None):
135 environment_path = os.path.join(
136 os.environ.get('DOCKER_CONFIG'),
137 os.path.basename(DOCKER_CONFIG_FILENAME)
138 ) if os.environ.get('DOCKER_CONFIG') else None
139
140 paths = [
141 config_path, # 1
142 environment_path, # 2
143 os.path.join(os.path.expanduser('~'), DOCKER_CONFIG_FILENAME), # 3
144 os.path.join(
145 os.path.expanduser('~'), LEGACY_DOCKER_CONFIG_FILENAME
146 ) # 4
147 ]
148
149 for path in paths:
150 if path and os.path.exists(path):
151 return path
152 return None
153
154
155 def load_config(config_path=None):
156 """
157 Loads authentication data from a Docker configuration file in the given
158 root directory or if config_path is passed use given path.
159 Lookup priority:
160 explicit config_path parameter > DOCKER_CONFIG environment variable >
161 ~/.docker/config.json > ~/.dockercfg
162 """
163
164 config_file = find_config_file(config_path)
165
166 if not config_file:
167 log.debug("File doesn't exist")
168 return {}
169
170 try:
171 with open(config_file) as f:
172 data = json.load(f)
173 if data.get('auths'):
174 log.debug("Found 'auths' section")
175 return parse_auth(data['auths'])
176 else:
177 log.debug("Couldn't find 'auths' section")
178 f.seek(0)
179 return parse_auth(json.load(f))
180 except (IOError, KeyError, ValueError) as e:
181 # Likely missing new Docker config file or it's in an
182 # unknown format, continue to attempt to read old location
183 # and format.
184 log.debug(e)
185
186 log.debug("Attempting to parse legacy auth file format")
187 try:
188 data = []
189 with open(config_file) as f:
190 for line in f.readlines():
191 data.append(line.strip().split(' = ')[1])
192 if len(data) < 2:
193 # Not enough data
194 raise errors.InvalidConfigFile(
195 'Invalid or empty configuration file!'
196 )
197
198 username, password = decode_auth(data[0])
199 return {
200 INDEX_NAME: {
201 'username': username,
202 'password': password,
203 'email': data[1],
204 'serveraddress': INDEX_URL,
205 }
206 }
207 except Exception as e:
208 log.debug(e)
209 pass
210
211 log.debug("All parsing attempts failed - returning empty config")
212 return {}
213
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docker/auth/auth.py b/docker/auth/auth.py
--- a/docker/auth/auth.py
+++ b/docker/auth/auth.py
@@ -96,7 +96,7 @@
auth = auth.encode('ascii')
s = base64.b64decode(auth)
login, pwd = s.split(b':', 1)
- return login.decode('ascii'), pwd.decode('ascii')
+ return login.decode('utf8'), pwd.decode('utf8')
def encode_header(auth):
| {"golden_diff": "diff --git a/docker/auth/auth.py b/docker/auth/auth.py\n--- a/docker/auth/auth.py\n+++ b/docker/auth/auth.py\n@@ -96,7 +96,7 @@\n auth = auth.encode('ascii')\n s = base64.b64decode(auth)\n login, pwd = s.split(b':', 1)\n- return login.decode('ascii'), pwd.decode('ascii')\n+ return login.decode('utf8'), pwd.decode('utf8')\n \n \n def encode_header(auth):\n", "issue": "decode_auth function does not handle utf-8 logins or password\nHI\n\nI have found that the function **decode_auth** (line 96, [file](https://github.com/docker/docker-py/blob/master/docker/auth/auth.py)) fails when decoding UTF-8 passwords from the .dockercfg file, and **load_config** returning an empty config.\n\nI have checked and docker hub can handle UTF-8 passwords, this code proves that:\n\n``` python\n# coding=utf-8\nfrom docker import Client\ncred = { 'username': <user>, 'password': <utf-8 password> }\nc = Client(base_url='unix://var/run/docker.sock')\nres = c.pull(repository='<private container>', tag='latest', auth_config=cred)\nprint(res)\n```\n\nThank you\n\n", "before_files": [{"content": "# Copyright 2013 dotCloud inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport base64\nimport json\nimport logging\nimport os\nimport warnings\n\nimport six\n\nfrom .. import constants\nfrom .. import errors\n\nINDEX_NAME = 'index.docker.io'\nINDEX_URL = 'https://{0}/v1/'.format(INDEX_NAME)\nDOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')\nLEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'\n\nlog = logging.getLogger(__name__)\n\n\ndef resolve_repository_name(repo_name, insecure=False):\n if insecure:\n warnings.warn(\n constants.INSECURE_REGISTRY_DEPRECATION_WARNING.format(\n 'resolve_repository_name()'\n ), DeprecationWarning\n )\n\n if '://' in repo_name:\n raise errors.InvalidRepository(\n 'Repository name cannot contain a scheme ({0})'.format(repo_name))\n parts = repo_name.split('/', 1)\n if '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost':\n # This is a docker index repo (ex: foo/bar or ubuntu)\n return INDEX_NAME, repo_name\n if len(parts) < 2:\n raise errors.InvalidRepository(\n 'Invalid repository name ({0})'.format(repo_name))\n\n if 'index.docker.io' in parts[0]:\n raise errors.InvalidRepository(\n 'Invalid repository name, try \"{0}\" instead'.format(parts[1])\n )\n\n return parts[0], parts[1]\n\n\ndef resolve_authconfig(authconfig, registry=None):\n \"\"\"\n Returns the authentication data from the given auth configuration for a\n specific registry. As with the Docker client, legacy entries in the config\n with full URLs are stripped down to hostnames before checking for a match.\n Returns None if no match was found.\n \"\"\"\n # Default to the public index server\n registry = convert_to_hostname(registry) if registry else INDEX_NAME\n log.debug(\"Looking for auth entry for {0}\".format(repr(registry)))\n\n if registry in authconfig:\n log.debug(\"Found {0}\".format(repr(registry)))\n return authconfig[registry]\n\n for key, config in six.iteritems(authconfig):\n if convert_to_hostname(key) == registry:\n log.debug(\"Found {0}\".format(repr(key)))\n return config\n\n log.debug(\"No entry found\")\n return None\n\n\ndef convert_to_hostname(url):\n return url.replace('http://', '').replace('https://', '').split('/', 1)[0]\n\n\ndef encode_auth(auth_info):\n return base64.b64encode(auth_info.get('username', '') + b':' +\n auth_info.get('password', ''))\n\n\ndef decode_auth(auth):\n if isinstance(auth, six.string_types):\n auth = auth.encode('ascii')\n s = base64.b64decode(auth)\n login, pwd = s.split(b':', 1)\n return login.decode('ascii'), pwd.decode('ascii')\n\n\ndef encode_header(auth):\n auth_json = json.dumps(auth).encode('ascii')\n return base64.urlsafe_b64encode(auth_json)\n\n\ndef parse_auth(entries):\n \"\"\"\n Parses authentication entries\n\n Args:\n entries: Dict of authentication entries.\n\n Returns:\n Authentication registry.\n \"\"\"\n\n conf = {}\n for registry, entry in six.iteritems(entries):\n username, password = decode_auth(entry['auth'])\n log.debug(\n 'Found entry (registry={0}, username={1})'\n .format(repr(registry), repr(username))\n )\n conf[registry] = {\n 'username': username,\n 'password': password,\n 'email': entry['email'],\n 'serveraddress': registry,\n }\n return conf\n\n\ndef find_config_file(config_path=None):\n environment_path = os.path.join(\n os.environ.get('DOCKER_CONFIG'),\n os.path.basename(DOCKER_CONFIG_FILENAME)\n ) if os.environ.get('DOCKER_CONFIG') else None\n\n paths = [\n config_path, # 1\n environment_path, # 2\n os.path.join(os.path.expanduser('~'), DOCKER_CONFIG_FILENAME), # 3\n os.path.join(\n os.path.expanduser('~'), LEGACY_DOCKER_CONFIG_FILENAME\n ) # 4\n ]\n\n for path in paths:\n if path and os.path.exists(path):\n return path\n return None\n\n\ndef load_config(config_path=None):\n \"\"\"\n Loads authentication data from a Docker configuration file in the given\n root directory or if config_path is passed use given path.\n Lookup priority:\n explicit config_path parameter > DOCKER_CONFIG environment variable >\n ~/.docker/config.json > ~/.dockercfg\n \"\"\"\n\n config_file = find_config_file(config_path)\n\n if not config_file:\n log.debug(\"File doesn't exist\")\n return {}\n\n try:\n with open(config_file) as f:\n data = json.load(f)\n if data.get('auths'):\n log.debug(\"Found 'auths' section\")\n return parse_auth(data['auths'])\n else:\n log.debug(\"Couldn't find 'auths' section\")\n f.seek(0)\n return parse_auth(json.load(f))\n except (IOError, KeyError, ValueError) as e:\n # Likely missing new Docker config file or it's in an\n # unknown format, continue to attempt to read old location\n # and format.\n log.debug(e)\n\n log.debug(\"Attempting to parse legacy auth file format\")\n try:\n data = []\n with open(config_file) as f:\n for line in f.readlines():\n data.append(line.strip().split(' = ')[1])\n if len(data) < 2:\n # Not enough data\n raise errors.InvalidConfigFile(\n 'Invalid or empty configuration file!'\n )\n\n username, password = decode_auth(data[0])\n return {\n INDEX_NAME: {\n 'username': username,\n 'password': password,\n 'email': data[1],\n 'serveraddress': INDEX_URL,\n }\n }\n except Exception as e:\n log.debug(e)\n pass\n\n log.debug(\"All parsing attempts failed - returning empty config\")\n return {}\n", "path": "docker/auth/auth.py"}], "after_files": [{"content": "# Copyright 2013 dotCloud inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport base64\nimport json\nimport logging\nimport os\nimport warnings\n\nimport six\n\nfrom .. import constants\nfrom .. import errors\n\nINDEX_NAME = 'index.docker.io'\nINDEX_URL = 'https://{0}/v1/'.format(INDEX_NAME)\nDOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')\nLEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'\n\nlog = logging.getLogger(__name__)\n\n\ndef resolve_repository_name(repo_name, insecure=False):\n if insecure:\n warnings.warn(\n constants.INSECURE_REGISTRY_DEPRECATION_WARNING.format(\n 'resolve_repository_name()'\n ), DeprecationWarning\n )\n\n if '://' in repo_name:\n raise errors.InvalidRepository(\n 'Repository name cannot contain a scheme ({0})'.format(repo_name))\n parts = repo_name.split('/', 1)\n if '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost':\n # This is a docker index repo (ex: foo/bar or ubuntu)\n return INDEX_NAME, repo_name\n if len(parts) < 2:\n raise errors.InvalidRepository(\n 'Invalid repository name ({0})'.format(repo_name))\n\n if 'index.docker.io' in parts[0]:\n raise errors.InvalidRepository(\n 'Invalid repository name, try \"{0}\" instead'.format(parts[1])\n )\n\n return parts[0], parts[1]\n\n\ndef resolve_authconfig(authconfig, registry=None):\n \"\"\"\n Returns the authentication data from the given auth configuration for a\n specific registry. As with the Docker client, legacy entries in the config\n with full URLs are stripped down to hostnames before checking for a match.\n Returns None if no match was found.\n \"\"\"\n # Default to the public index server\n registry = convert_to_hostname(registry) if registry else INDEX_NAME\n log.debug(\"Looking for auth entry for {0}\".format(repr(registry)))\n\n if registry in authconfig:\n log.debug(\"Found {0}\".format(repr(registry)))\n return authconfig[registry]\n\n for key, config in six.iteritems(authconfig):\n if convert_to_hostname(key) == registry:\n log.debug(\"Found {0}\".format(repr(key)))\n return config\n\n log.debug(\"No entry found\")\n return None\n\n\ndef convert_to_hostname(url):\n return url.replace('http://', '').replace('https://', '').split('/', 1)[0]\n\n\ndef encode_auth(auth_info):\n return base64.b64encode(auth_info.get('username', '') + b':' +\n auth_info.get('password', ''))\n\n\ndef decode_auth(auth):\n if isinstance(auth, six.string_types):\n auth = auth.encode('ascii')\n s = base64.b64decode(auth)\n login, pwd = s.split(b':', 1)\n return login.decode('utf8'), pwd.decode('utf8')\n\n\ndef encode_header(auth):\n auth_json = json.dumps(auth).encode('ascii')\n return base64.urlsafe_b64encode(auth_json)\n\n\ndef parse_auth(entries):\n \"\"\"\n Parses authentication entries\n\n Args:\n entries: Dict of authentication entries.\n\n Returns:\n Authentication registry.\n \"\"\"\n\n conf = {}\n for registry, entry in six.iteritems(entries):\n username, password = decode_auth(entry['auth'])\n log.debug(\n 'Found entry (registry={0}, username={1})'\n .format(repr(registry), repr(username))\n )\n conf[registry] = {\n 'username': username,\n 'password': password,\n 'email': entry['email'],\n 'serveraddress': registry,\n }\n return conf\n\n\ndef find_config_file(config_path=None):\n environment_path = os.path.join(\n os.environ.get('DOCKER_CONFIG'),\n os.path.basename(DOCKER_CONFIG_FILENAME)\n ) if os.environ.get('DOCKER_CONFIG') else None\n\n paths = [\n config_path, # 1\n environment_path, # 2\n os.path.join(os.path.expanduser('~'), DOCKER_CONFIG_FILENAME), # 3\n os.path.join(\n os.path.expanduser('~'), LEGACY_DOCKER_CONFIG_FILENAME\n ) # 4\n ]\n\n for path in paths:\n if path and os.path.exists(path):\n return path\n return None\n\n\ndef load_config(config_path=None):\n \"\"\"\n Loads authentication data from a Docker configuration file in the given\n root directory or if config_path is passed use given path.\n Lookup priority:\n explicit config_path parameter > DOCKER_CONFIG environment variable >\n ~/.docker/config.json > ~/.dockercfg\n \"\"\"\n\n config_file = find_config_file(config_path)\n\n if not config_file:\n log.debug(\"File doesn't exist\")\n return {}\n\n try:\n with open(config_file) as f:\n data = json.load(f)\n if data.get('auths'):\n log.debug(\"Found 'auths' section\")\n return parse_auth(data['auths'])\n else:\n log.debug(\"Couldn't find 'auths' section\")\n f.seek(0)\n return parse_auth(json.load(f))\n except (IOError, KeyError, ValueError) as e:\n # Likely missing new Docker config file or it's in an\n # unknown format, continue to attempt to read old location\n # and format.\n log.debug(e)\n\n log.debug(\"Attempting to parse legacy auth file format\")\n try:\n data = []\n with open(config_file) as f:\n for line in f.readlines():\n data.append(line.strip().split(' = ')[1])\n if len(data) < 2:\n # Not enough data\n raise errors.InvalidConfigFile(\n 'Invalid or empty configuration file!'\n )\n\n username, password = decode_auth(data[0])\n return {\n INDEX_NAME: {\n 'username': username,\n 'password': password,\n 'email': data[1],\n 'serveraddress': INDEX_URL,\n }\n }\n except Exception as e:\n log.debug(e)\n pass\n\n log.debug(\"All parsing attempts failed - returning empty config\")\n return {}\n", "path": "docker/auth/auth.py"}]} | 2,422 | 107 |
gh_patches_debug_2456 | rasdani/github-patches | git_diff | biolab__orange3-text-358 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Guardian: Fix failing tests on Travis
<!--
This is an issue template. Please fill in the relevant details in the
sections below.
-->
##### Text version
<!-- From menu _Options→Add-ons→Orange3-Text_ or code `orangecontrib.text.version.full_version` -->
0.3.0
##### Orange version
<!-- From menu _Help→About→Version_ or code `Orange.version.full_version` -->
3.15.dev
##### Expected behavior
Tests pass.
##### Actual behavior
Guardian tests is failing.
##### Steps to reproduce the behavior
##### Additional info (worksheets, data, screenshots, ...)
Fix tests.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `orangecontrib/text/guardian.py`
Content:
```
1 """ This module fetches data from The Guardian API.
2
3 To use first create :class:`TheGuardianCredentials`:
4
5 >>> from orangecontrib.text.guardian import TheGuardianCredentials
6 >>> credentials = TheGuardianCredentials('<your-api-key>')
7
8 Then create :class:`TheGuardianAPI` object and use it for searching:
9
10 >>> from orangecontrib.text.guardian import TheGuardianAPI
11 >>> api = TheGuardianAPI(credentials)
12 >>> corpus = api.search('Slovenia', max_documents=10)
13 >>> len(corpus)
14 10
15
16 """
17
18 import requests
19 import math
20 import json
21
22 from Orange import data
23
24 from orangecontrib.text.corpus import Corpus
25
26
27 BASE_URL = 'http://content.guardianapis.com/search'
28 ARTICLES_PER_PAGE = 10
29
30
31 class TheGuardianCredentials:
32 """ The Guardian API credentials. """
33 def __init__(self, key):
34 """
35 Args:
36 key (str): The Guardian API key. Use `test` for testing purposes.
37 """
38 self.key = key
39
40 @property
41 def valid(self):
42 """ Check if given API key is valid. """
43 response = requests.get(BASE_URL, {'api-key': self.key})
44 return response.status_code != 403 # 403 == Forbidden
45
46 def __eq__(self, other):
47 return self.key == other.key
48
49
50 class TheGuardianAPI:
51 attributes = []
52
53 class_vars = [
54 (data.DiscreteVariable('Section'), lambda doc: doc['sectionName']),
55 ]
56
57 tv = data.TimeVariable('Publication Date')
58 metas = [
59 (data.StringVariable('Headline'), lambda doc: doc['fields']['headline']),
60 (data.StringVariable('Content'), lambda doc: doc['fields']['bodyText']),
61 (data.StringVariable('Trail Text'), lambda doc: doc['fields']['trailText']),
62 (data.StringVariable('HTML'), lambda doc: doc['fields']['body']),
63 (tv, lambda doc: TheGuardianAPI.tv.parse(doc['webPublicationDate'])),
64 (data.DiscreteVariable('Type'), lambda doc: doc['type']),
65 (data.DiscreteVariable('Language'), lambda doc: doc['fields']['lang']),
66 (data.StringVariable('Tags'),
67 lambda doc: ', '.join(tag['webTitle'] for tag in doc['tags'])),
68 (data.StringVariable('URL'), lambda doc: doc['webUrl']),
69 (data.ContinuousVariable('Word Count', number_of_decimals=0),
70 lambda doc: doc['fields']['wordcount']),
71 ]
72
73 text_features = [metas[0][0], metas[1][0]] # Headline + Content
74 title_indices = [-1] # Headline
75
76 def __init__(self, credentials, on_progress=None, should_break=None):
77 """
78 Args:
79 credentials (:class:`TheGuardianCredentials`): The Guardian Creentials.
80 on_progress (callable): Function for progress reporting.
81 should_break (callable): Function for early stopping.
82 """
83 self.per_page = ARTICLES_PER_PAGE
84 self.pages = 0
85 self.credentials = credentials
86 self.on_progress = on_progress or (lambda x, y: None)
87 self.should_break = should_break or (lambda: False)
88
89 self.results = []
90
91 def _search(self, query, from_date, to_date, page=1):
92 data = self._build_query(query, from_date, to_date, page)
93
94 response = requests.get(BASE_URL, data)
95 parsed = json.loads(response.text)
96
97 if page == 1: # store number of pages
98 self.pages = parsed['response']['pages']
99
100 self.results.extend(parsed['response']['results'])
101
102 def _build_query(self, query, from_date=None, to_date=None, page=1):
103 data = {
104 'q': query,
105 'api-key': self.credentials.key,
106 'page': str(page),
107 'show-fields': 'headline,trailText,body,bodyText,lang,wordcount',
108 'show-tags': 'all',
109 }
110 if from_date is not None:
111 data['from-date'] = from_date
112 if to_date is not None:
113 data['to-date'] = to_date
114
115 return data
116
117 def search(self, query, from_date=None, to_date=None, max_documents=None,
118 accumulate=False):
119 """
120 Search The Guardian API for articles.
121
122 Args:
123 query (str): A query for searching the articles by
124 from_date (str): Search only articles newer than the date provided.
125 Date should be in ISO format; e.g. '2016-12-31'.
126 to_date (str): Search only articles older than the date provided.
127 Date should be in ISO format; e.g. '2016-12-31'.
128 max_documents (int): Maximum number of documents to retrieve.
129 When not given, retrieve all documents.
130 accumulate (bool): A flag indicating whether to accumulate results
131 of multiple consequent search calls.
132
133 Returns:
134 :ref:`Corpus`
135 """
136 if not accumulate:
137 self.results = []
138
139 self._search(query, from_date, to_date)
140
141 pages = math.ceil(max_documents/self.per_page) if max_documents else self.pages
142 self.on_progress(self.per_page, pages * self.per_page)
143
144 for p in range(2, pages+1): # to one based
145 if self.should_break():
146 break
147 self._search(query, from_date, to_date, p)
148 self.on_progress(p*self.per_page, pages * self.per_page)
149
150 c = Corpus.from_documents(
151 self.results, 'The Guardian', self.attributes, self.class_vars,
152 self.metas, title_indices=self.title_indices)
153 c.text_features = self.text_features
154 return c
155
156
157 if __name__ == '__main__':
158 credentials = TheGuardianCredentials('')
159 print(credentials.valid)
160 api = TheGuardianAPI(credentials=credentials)
161 c = api.search('refugees', max_documents=10)
162 print(c)
163
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/orangecontrib/text/guardian.py b/orangecontrib/text/guardian.py
--- a/orangecontrib/text/guardian.py
+++ b/orangecontrib/text/guardian.py
@@ -155,7 +155,7 @@
if __name__ == '__main__':
- credentials = TheGuardianCredentials('')
+ credentials = TheGuardianCredentials('test')
print(credentials.valid)
api = TheGuardianAPI(credentials=credentials)
c = api.search('refugees', max_documents=10)
| {"golden_diff": "diff --git a/orangecontrib/text/guardian.py b/orangecontrib/text/guardian.py\n--- a/orangecontrib/text/guardian.py\n+++ b/orangecontrib/text/guardian.py\n@@ -155,7 +155,7 @@\n \n \n if __name__ == '__main__':\n- credentials = TheGuardianCredentials('')\n+ credentials = TheGuardianCredentials('test')\n print(credentials.valid)\n api = TheGuardianAPI(credentials=credentials)\n c = api.search('refugees', max_documents=10)\n", "issue": "Guardian: Fix failing tests on Travis\n<!--\r\nThis is an issue template. Please fill in the relevant details in the\r\nsections below.\r\n-->\r\n\r\n##### Text version\r\n<!-- From menu _Options\u2192Add-ons\u2192Orange3-Text_ or code `orangecontrib.text.version.full_version` -->\r\n0.3.0\r\n\r\n##### Orange version\r\n<!-- From menu _Help\u2192About\u2192Version_ or code `Orange.version.full_version` -->\r\n3.15.dev\r\n\r\n##### Expected behavior\r\nTests pass.\r\n\r\n\r\n##### Actual behavior\r\nGuardian tests is failing.\r\n\r\n\r\n##### Steps to reproduce the behavior\r\n\r\n\r\n\r\n##### Additional info (worksheets, data, screenshots, ...)\r\nFix tests.\r\n\r\n\n", "before_files": [{"content": "\"\"\" This module fetches data from The Guardian API.\n\nTo use first create :class:`TheGuardianCredentials`:\n\n >>> from orangecontrib.text.guardian import TheGuardianCredentials\n >>> credentials = TheGuardianCredentials('<your-api-key>')\n\nThen create :class:`TheGuardianAPI` object and use it for searching:\n\n >>> from orangecontrib.text.guardian import TheGuardianAPI\n >>> api = TheGuardianAPI(credentials)\n >>> corpus = api.search('Slovenia', max_documents=10)\n >>> len(corpus)\n 10\n\n\"\"\"\n\nimport requests\nimport math\nimport json\n\nfrom Orange import data\n\nfrom orangecontrib.text.corpus import Corpus\n\n\nBASE_URL = 'http://content.guardianapis.com/search'\nARTICLES_PER_PAGE = 10\n\n\nclass TheGuardianCredentials:\n \"\"\" The Guardian API credentials. \"\"\"\n def __init__(self, key):\n \"\"\"\n Args:\n key (str): The Guardian API key. Use `test` for testing purposes.\n \"\"\"\n self.key = key\n\n @property\n def valid(self):\n \"\"\" Check if given API key is valid. \"\"\"\n response = requests.get(BASE_URL, {'api-key': self.key})\n return response.status_code != 403 # 403 == Forbidden\n\n def __eq__(self, other):\n return self.key == other.key\n\n\nclass TheGuardianAPI:\n attributes = []\n\n class_vars = [\n (data.DiscreteVariable('Section'), lambda doc: doc['sectionName']),\n ]\n\n tv = data.TimeVariable('Publication Date')\n metas = [\n (data.StringVariable('Headline'), lambda doc: doc['fields']['headline']),\n (data.StringVariable('Content'), lambda doc: doc['fields']['bodyText']),\n (data.StringVariable('Trail Text'), lambda doc: doc['fields']['trailText']),\n (data.StringVariable('HTML'), lambda doc: doc['fields']['body']),\n (tv, lambda doc: TheGuardianAPI.tv.parse(doc['webPublicationDate'])),\n (data.DiscreteVariable('Type'), lambda doc: doc['type']),\n (data.DiscreteVariable('Language'), lambda doc: doc['fields']['lang']),\n (data.StringVariable('Tags'),\n lambda doc: ', '.join(tag['webTitle'] for tag in doc['tags'])),\n (data.StringVariable('URL'), lambda doc: doc['webUrl']),\n (data.ContinuousVariable('Word Count', number_of_decimals=0),\n lambda doc: doc['fields']['wordcount']),\n ]\n\n text_features = [metas[0][0], metas[1][0]] # Headline + Content\n title_indices = [-1] # Headline\n\n def __init__(self, credentials, on_progress=None, should_break=None):\n \"\"\"\n Args:\n credentials (:class:`TheGuardianCredentials`): The Guardian Creentials.\n on_progress (callable): Function for progress reporting.\n should_break (callable): Function for early stopping.\n \"\"\"\n self.per_page = ARTICLES_PER_PAGE\n self.pages = 0\n self.credentials = credentials\n self.on_progress = on_progress or (lambda x, y: None)\n self.should_break = should_break or (lambda: False)\n\n self.results = []\n\n def _search(self, query, from_date, to_date, page=1):\n data = self._build_query(query, from_date, to_date, page)\n\n response = requests.get(BASE_URL, data)\n parsed = json.loads(response.text)\n\n if page == 1: # store number of pages\n self.pages = parsed['response']['pages']\n\n self.results.extend(parsed['response']['results'])\n\n def _build_query(self, query, from_date=None, to_date=None, page=1):\n data = {\n 'q': query,\n 'api-key': self.credentials.key,\n 'page': str(page),\n 'show-fields': 'headline,trailText,body,bodyText,lang,wordcount',\n 'show-tags': 'all',\n }\n if from_date is not None:\n data['from-date'] = from_date\n if to_date is not None:\n data['to-date'] = to_date\n\n return data\n\n def search(self, query, from_date=None, to_date=None, max_documents=None,\n accumulate=False):\n \"\"\"\n Search The Guardian API for articles.\n\n Args:\n query (str): A query for searching the articles by\n from_date (str): Search only articles newer than the date provided.\n Date should be in ISO format; e.g. '2016-12-31'.\n to_date (str): Search only articles older than the date provided.\n Date should be in ISO format; e.g. '2016-12-31'.\n max_documents (int): Maximum number of documents to retrieve.\n When not given, retrieve all documents.\n accumulate (bool): A flag indicating whether to accumulate results\n of multiple consequent search calls.\n\n Returns:\n :ref:`Corpus`\n \"\"\"\n if not accumulate:\n self.results = []\n\n self._search(query, from_date, to_date)\n\n pages = math.ceil(max_documents/self.per_page) if max_documents else self.pages\n self.on_progress(self.per_page, pages * self.per_page)\n\n for p in range(2, pages+1): # to one based\n if self.should_break():\n break\n self._search(query, from_date, to_date, p)\n self.on_progress(p*self.per_page, pages * self.per_page)\n\n c = Corpus.from_documents(\n self.results, 'The Guardian', self.attributes, self.class_vars,\n self.metas, title_indices=self.title_indices)\n c.text_features = self.text_features\n return c\n\n\nif __name__ == '__main__':\n credentials = TheGuardianCredentials('')\n print(credentials.valid)\n api = TheGuardianAPI(credentials=credentials)\n c = api.search('refugees', max_documents=10)\n print(c)\n", "path": "orangecontrib/text/guardian.py"}], "after_files": [{"content": "\"\"\" This module fetches data from The Guardian API.\n\nTo use first create :class:`TheGuardianCredentials`:\n\n >>> from orangecontrib.text.guardian import TheGuardianCredentials\n >>> credentials = TheGuardianCredentials('<your-api-key>')\n\nThen create :class:`TheGuardianAPI` object and use it for searching:\n\n >>> from orangecontrib.text.guardian import TheGuardianAPI\n >>> api = TheGuardianAPI(credentials)\n >>> corpus = api.search('Slovenia', max_documents=10)\n >>> len(corpus)\n 10\n\n\"\"\"\n\nimport requests\nimport math\nimport json\n\nfrom Orange import data\n\nfrom orangecontrib.text.corpus import Corpus\n\n\nBASE_URL = 'http://content.guardianapis.com/search'\nARTICLES_PER_PAGE = 10\n\n\nclass TheGuardianCredentials:\n \"\"\" The Guardian API credentials. \"\"\"\n def __init__(self, key):\n \"\"\"\n Args:\n key (str): The Guardian API key. Use `test` for testing purposes.\n \"\"\"\n self.key = key\n\n @property\n def valid(self):\n \"\"\" Check if given API key is valid. \"\"\"\n response = requests.get(BASE_URL, {'api-key': self.key})\n return response.status_code != 403 # 403 == Forbidden\n\n def __eq__(self, other):\n return self.key == other.key\n\n\nclass TheGuardianAPI:\n attributes = []\n\n class_vars = [\n (data.DiscreteVariable('Section'), lambda doc: doc['sectionName']),\n ]\n\n tv = data.TimeVariable('Publication Date')\n metas = [\n (data.StringVariable('Headline'), lambda doc: doc['fields']['headline']),\n (data.StringVariable('Content'), lambda doc: doc['fields']['bodyText']),\n (data.StringVariable('Trail Text'), lambda doc: doc['fields']['trailText']),\n (data.StringVariable('HTML'), lambda doc: doc['fields']['body']),\n (tv, lambda doc: TheGuardianAPI.tv.parse(doc['webPublicationDate'])),\n (data.DiscreteVariable('Type'), lambda doc: doc['type']),\n (data.DiscreteVariable('Language'), lambda doc: doc['fields']['lang']),\n (data.StringVariable('Tags'),\n lambda doc: ', '.join(tag['webTitle'] for tag in doc['tags'])),\n (data.StringVariable('URL'), lambda doc: doc['webUrl']),\n (data.ContinuousVariable('Word Count', number_of_decimals=0),\n lambda doc: doc['fields']['wordcount']),\n ]\n\n text_features = [metas[0][0], metas[1][0]] # Headline + Content\n title_indices = [-1] # Headline\n\n def __init__(self, credentials, on_progress=None, should_break=None):\n \"\"\"\n Args:\n credentials (:class:`TheGuardianCredentials`): The Guardian Creentials.\n on_progress (callable): Function for progress reporting.\n should_break (callable): Function for early stopping.\n \"\"\"\n self.per_page = ARTICLES_PER_PAGE\n self.pages = 0\n self.credentials = credentials\n self.on_progress = on_progress or (lambda x, y: None)\n self.should_break = should_break or (lambda: False)\n\n self.results = []\n\n def _search(self, query, from_date, to_date, page=1):\n data = self._build_query(query, from_date, to_date, page)\n\n response = requests.get(BASE_URL, data)\n parsed = json.loads(response.text)\n\n if page == 1: # store number of pages\n self.pages = parsed['response']['pages']\n\n self.results.extend(parsed['response']['results'])\n\n def _build_query(self, query, from_date=None, to_date=None, page=1):\n data = {\n 'q': query,\n 'api-key': self.credentials.key,\n 'page': str(page),\n 'show-fields': 'headline,trailText,body,bodyText,lang,wordcount',\n 'show-tags': 'all',\n }\n if from_date is not None:\n data['from-date'] = from_date\n if to_date is not None:\n data['to-date'] = to_date\n\n return data\n\n def search(self, query, from_date=None, to_date=None, max_documents=None,\n accumulate=False):\n \"\"\"\n Search The Guardian API for articles.\n\n Args:\n query (str): A query for searching the articles by\n from_date (str): Search only articles newer than the date provided.\n Date should be in ISO format; e.g. '2016-12-31'.\n to_date (str): Search only articles older than the date provided.\n Date should be in ISO format; e.g. '2016-12-31'.\n max_documents (int): Maximum number of documents to retrieve.\n When not given, retrieve all documents.\n accumulate (bool): A flag indicating whether to accumulate results\n of multiple consequent search calls.\n\n Returns:\n :ref:`Corpus`\n \"\"\"\n if not accumulate:\n self.results = []\n\n self._search(query, from_date, to_date)\n\n pages = math.ceil(max_documents/self.per_page) if max_documents else self.pages\n self.on_progress(self.per_page, pages * self.per_page)\n\n for p in range(2, pages+1): # to one based\n if self.should_break():\n break\n self._search(query, from_date, to_date, p)\n self.on_progress(p*self.per_page, pages * self.per_page)\n\n c = Corpus.from_documents(\n self.results, 'The Guardian', self.attributes, self.class_vars,\n self.metas, title_indices=self.title_indices)\n c.text_features = self.text_features\n return c\n\n\nif __name__ == '__main__':\n credentials = TheGuardianCredentials('test')\n print(credentials.valid)\n api = TheGuardianAPI(credentials=credentials)\n c = api.search('refugees', max_documents=10)\n print(c)\n", "path": "orangecontrib/text/guardian.py"}]} | 2,107 | 119 |
gh_patches_debug_58564 | rasdani/github-patches | git_diff | codespell-project__codespell-2626 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`python setup.py check` → `twine check`
Because `setup.py ...` is deprecated, we need an alternative to `setup.py check` such as `twine`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #! /usr/bin/env python
2
3 from setuptools import setup
4
5 if __name__ == "__main__":
6 setup()
7
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
deleted file mode 100755
--- a/setup.py
+++ /dev/null
@@ -1,6 +0,0 @@
-#! /usr/bin/env python
-
-from setuptools import setup
-
-if __name__ == "__main__":
- setup()
| {"golden_diff": "diff --git a/setup.py b/setup.py\ndeleted file mode 100755\n--- a/setup.py\n+++ /dev/null\n@@ -1,6 +0,0 @@\n-#! /usr/bin/env python\n-\n-from setuptools import setup\n-\n-if __name__ == \"__main__\":\n- setup()\n", "issue": "`python setup.py check` \u2192 `twine check`\nBecause `setup.py ...` is deprecated, we need an alternative to `setup.py check` such as `twine`.\n", "before_files": [{"content": "#! /usr/bin/env python\n\nfrom setuptools import setup\n\nif __name__ == \"__main__\":\n setup()\n", "path": "setup.py"}], "after_files": [{"content": null, "path": "setup.py"}]} | 326 | 68 |
gh_patches_debug_5766 | rasdani/github-patches | git_diff | napari__napari-4259 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Previously selected point deleted when deleting layer
## 🐛 Bug
Recently selected points are erroneously removed when deleting new layers with the delete key. (reproduced with points and labels layer)
## To Reproduce
Steps to reproduce the behaviour:
1. Create a point on a points layer
2. Create a new points layer
3. Select the newly created points layer from the layer list (visually deselecting the point)
4. Delete newly created layer using the delete key, the last selected point will also be deleted
Please note that this issue does not occur when the layer is deleted using the bin icon, leading me to believe it is a keybinding issue (and the point must still be 'selected' in come capacity)
<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->
https://user-images.githubusercontent.com/95660545/156966137-b2a645a6-25ae-42b4-baf7-137e7506e20a.mp4
## Expected behaviour
It is expected that only the newly created points layer (with no points assigned to it) should be deleted, not the point as well.
<!-- A clear and concise description of what you expected to happen. -->
## Environment
napari: 0.4.15.dev68+gdd3a2afd
Platform: Windows-10-10.0.19044-SP0
Python: 3.9.7 (default, Sep 16 2021, 16:59:28) [MSC v.1916 64 bit (AMD64)]
Qt: 5.15.2
PyQt5: 5.15.6
NumPy: 1.21.5
SciPy: 1.7.3
Dask: 2022.01.0
VisPy: 0.9.6
OpenGL:
- GL version: 4.6.0 - Build 26.20.100.7372
- MAX_TEXTURE_SIZE: 16384
Screens:
- screen 1: resolution 1920x1080, scale 1.0
Plugins:
- console: 0.0.4
- scikit-image: 0.4.15.dev68+gdd3a2afd
- svg: 0.1.6
napari contributors (2019). napari: a multi-dimensional image viewer for python. doi:10.5281/zenodo.3555620
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `napari/_qt/containers/qt_layer_list.py`
Content:
```
1 from __future__ import annotations
2
3 from typing import TYPE_CHECKING
4
5 from qtpy.QtCore import QSortFilterProxyModel, Qt
6
7 from ...layers import Layer
8 from ...utils.translations import trans
9 from ._base_item_model import SortRole, _BaseEventedItemModel
10 from ._layer_delegate import LayerDelegate
11 from .qt_list_view import QtListView
12
13 if TYPE_CHECKING:
14 from qtpy.QtGui import QKeyEvent
15 from qtpy.QtWidgets import QWidget
16
17 from ...components.layerlist import LayerList
18
19
20 class ReverseProxyModel(QSortFilterProxyModel):
21 """Proxy Model that reverses the view order of a _BaseEventedItemModel."""
22
23 def __init__(self, model: _BaseEventedItemModel) -> None:
24 super().__init__()
25 self.setSourceModel(model)
26 self.setSortRole(SortRole)
27 self.sort(0, Qt.DescendingOrder)
28
29 def dropMimeData(self, data, action, destRow, col, parent):
30 """Handle destination row for dropping with reversed indices."""
31 row = 0 if destRow == -1 else self.sourceModel().rowCount() - destRow
32 return self.sourceModel().dropMimeData(data, action, row, col, parent)
33
34
35 class QtLayerList(QtListView[Layer]):
36 """QItemView subclass specialized for the LayerList.
37
38 This is as mostly for targetting with QSS, applying the delegate and
39 reversing the view with ReverseProxyModel.
40 """
41
42 def __init__(self, root: LayerList, parent: QWidget = None):
43 super().__init__(root, parent)
44 self.setItemDelegate(LayerDelegate())
45 self.setToolTip(trans._('Layer list'))
46 font = self.font()
47 font.setPointSize(12)
48 self.setFont(font)
49
50 # This reverses the order of the items in the view,
51 # so items at the end of the list are at the top.
52 self.setModel(ReverseProxyModel(self.model()))
53
54 def keyPressEvent(self, e: QKeyEvent) -> None:
55 """Override Qt event to pass events to the viewer."""
56 if e.key() != Qt.Key_Space:
57 super().keyPressEvent(e)
58
59 e.ignore() # pass key events up to viewer
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/napari/_qt/containers/qt_layer_list.py b/napari/_qt/containers/qt_layer_list.py
--- a/napari/_qt/containers/qt_layer_list.py
+++ b/napari/_qt/containers/qt_layer_list.py
@@ -53,7 +53,7 @@
def keyPressEvent(self, e: QKeyEvent) -> None:
"""Override Qt event to pass events to the viewer."""
- if e.key() != Qt.Key_Space:
+ if e.key() != Qt.Key.Key_Space:
super().keyPressEvent(e)
-
- e.ignore() # pass key events up to viewer
+ if e.key() not in (Qt.Key.Key_Backspace, Qt.Key.Key_Delete):
+ e.ignore() # pass key events up to viewer
| {"golden_diff": "diff --git a/napari/_qt/containers/qt_layer_list.py b/napari/_qt/containers/qt_layer_list.py\n--- a/napari/_qt/containers/qt_layer_list.py\n+++ b/napari/_qt/containers/qt_layer_list.py\n@@ -53,7 +53,7 @@\n \n def keyPressEvent(self, e: QKeyEvent) -> None:\n \"\"\"Override Qt event to pass events to the viewer.\"\"\"\n- if e.key() != Qt.Key_Space:\n+ if e.key() != Qt.Key.Key_Space:\n super().keyPressEvent(e)\n-\n- e.ignore() # pass key events up to viewer\n+ if e.key() not in (Qt.Key.Key_Backspace, Qt.Key.Key_Delete):\n+ e.ignore() # pass key events up to viewer\n", "issue": "Previously selected point deleted when deleting layer\n## \ud83d\udc1b Bug\r\n\r\nRecently selected points are erroneously removed when deleting new layers with the delete key. (reproduced with points and labels layer)\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behaviour:\r\n\r\n1. Create a point on a points layer\r\n2. Create a new points layer\r\n3. Select the newly created points layer from the layer list (visually deselecting the point)\r\n4. Delete newly created layer using the delete key, the last selected point will also be deleted\r\n\r\nPlease note that this issue does not occur when the layer is deleted using the bin icon, leading me to believe it is a keybinding issue (and the point must still be 'selected' in come capacity)\r\n<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->\r\n\r\n\r\nhttps://user-images.githubusercontent.com/95660545/156966137-b2a645a6-25ae-42b4-baf7-137e7506e20a.mp4\r\n\r\n\r\n## Expected behaviour\r\nIt is expected that only the newly created points layer (with no points assigned to it) should be deleted, not the point as well.\r\n\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n## Environment\r\n\r\nnapari: 0.4.15.dev68+gdd3a2afd\r\nPlatform: Windows-10-10.0.19044-SP0\r\nPython: 3.9.7 (default, Sep 16 2021, 16:59:28) [MSC v.1916 64 bit (AMD64)]\r\nQt: 5.15.2\r\nPyQt5: 5.15.6\r\nNumPy: 1.21.5\r\nSciPy: 1.7.3\r\nDask: 2022.01.0\r\nVisPy: 0.9.6\r\n\r\nOpenGL:\r\n- GL version: 4.6.0 - Build 26.20.100.7372\r\n- MAX_TEXTURE_SIZE: 16384\r\n\r\nScreens:\r\n- screen 1: resolution 1920x1080, scale 1.0\r\n\r\nPlugins:\r\n- console: 0.0.4\r\n- scikit-image: 0.4.15.dev68+gdd3a2afd\r\n- svg: 0.1.6\r\n\r\nnapari contributors (2019). napari: a multi-dimensional image viewer for python. doi:10.5281/zenodo.3555620\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom qtpy.QtCore import QSortFilterProxyModel, Qt\n\nfrom ...layers import Layer\nfrom ...utils.translations import trans\nfrom ._base_item_model import SortRole, _BaseEventedItemModel\nfrom ._layer_delegate import LayerDelegate\nfrom .qt_list_view import QtListView\n\nif TYPE_CHECKING:\n from qtpy.QtGui import QKeyEvent\n from qtpy.QtWidgets import QWidget\n\n from ...components.layerlist import LayerList\n\n\nclass ReverseProxyModel(QSortFilterProxyModel):\n \"\"\"Proxy Model that reverses the view order of a _BaseEventedItemModel.\"\"\"\n\n def __init__(self, model: _BaseEventedItemModel) -> None:\n super().__init__()\n self.setSourceModel(model)\n self.setSortRole(SortRole)\n self.sort(0, Qt.DescendingOrder)\n\n def dropMimeData(self, data, action, destRow, col, parent):\n \"\"\"Handle destination row for dropping with reversed indices.\"\"\"\n row = 0 if destRow == -1 else self.sourceModel().rowCount() - destRow\n return self.sourceModel().dropMimeData(data, action, row, col, parent)\n\n\nclass QtLayerList(QtListView[Layer]):\n \"\"\"QItemView subclass specialized for the LayerList.\n\n This is as mostly for targetting with QSS, applying the delegate and\n reversing the view with ReverseProxyModel.\n \"\"\"\n\n def __init__(self, root: LayerList, parent: QWidget = None):\n super().__init__(root, parent)\n self.setItemDelegate(LayerDelegate())\n self.setToolTip(trans._('Layer list'))\n font = self.font()\n font.setPointSize(12)\n self.setFont(font)\n\n # This reverses the order of the items in the view,\n # so items at the end of the list are at the top.\n self.setModel(ReverseProxyModel(self.model()))\n\n def keyPressEvent(self, e: QKeyEvent) -> None:\n \"\"\"Override Qt event to pass events to the viewer.\"\"\"\n if e.key() != Qt.Key_Space:\n super().keyPressEvent(e)\n\n e.ignore() # pass key events up to viewer\n", "path": "napari/_qt/containers/qt_layer_list.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom qtpy.QtCore import QSortFilterProxyModel, Qt\n\nfrom ...layers import Layer\nfrom ...utils.translations import trans\nfrom ._base_item_model import SortRole, _BaseEventedItemModel\nfrom ._layer_delegate import LayerDelegate\nfrom .qt_list_view import QtListView\n\nif TYPE_CHECKING:\n from qtpy.QtGui import QKeyEvent\n from qtpy.QtWidgets import QWidget\n\n from ...components.layerlist import LayerList\n\n\nclass ReverseProxyModel(QSortFilterProxyModel):\n \"\"\"Proxy Model that reverses the view order of a _BaseEventedItemModel.\"\"\"\n\n def __init__(self, model: _BaseEventedItemModel) -> None:\n super().__init__()\n self.setSourceModel(model)\n self.setSortRole(SortRole)\n self.sort(0, Qt.DescendingOrder)\n\n def dropMimeData(self, data, action, destRow, col, parent):\n \"\"\"Handle destination row for dropping with reversed indices.\"\"\"\n row = 0 if destRow == -1 else self.sourceModel().rowCount() - destRow\n return self.sourceModel().dropMimeData(data, action, row, col, parent)\n\n\nclass QtLayerList(QtListView[Layer]):\n \"\"\"QItemView subclass specialized for the LayerList.\n\n This is as mostly for targetting with QSS, applying the delegate and\n reversing the view with ReverseProxyModel.\n \"\"\"\n\n def __init__(self, root: LayerList, parent: QWidget = None):\n super().__init__(root, parent)\n self.setItemDelegate(LayerDelegate())\n self.setToolTip(trans._('Layer list'))\n font = self.font()\n font.setPointSize(12)\n self.setFont(font)\n\n # This reverses the order of the items in the view,\n # so items at the end of the list are at the top.\n self.setModel(ReverseProxyModel(self.model()))\n\n def keyPressEvent(self, e: QKeyEvent) -> None:\n \"\"\"Override Qt event to pass events to the viewer.\"\"\"\n if e.key() != Qt.Key.Key_Space:\n super().keyPressEvent(e)\n if e.key() not in (Qt.Key.Key_Backspace, Qt.Key.Key_Delete):\n e.ignore() # pass key events up to viewer\n", "path": "napari/_qt/containers/qt_layer_list.py"}]} | 1,456 | 175 |
gh_patches_debug_16311 | rasdani/github-patches | git_diff | spotify__luigi-368 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
LuigiConfigParser::add_config_path() raises if instance() hasn't been accessed
To add a path to the list of config paths, one currently has to do:
``` python
LuigiConfigParser.instance() # remove this and get an exception
LuigiConfigParser.add_config_path(my_path)
```
because `add_config_path` tries to reload `cls._instance` which is initialized with `None`. Wouldn't it be cleaner to do a check there and only reload a non-null instance?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `luigi/configuration.py`
Content:
```
1
2 import os
3 import logging
4 from ConfigParser import ConfigParser, NoOptionError, NoSectionError
5
6
7 class LuigiConfigParser(ConfigParser):
8 NO_DEFAULT = object()
9 _instance = None
10 _config_paths = ['/etc/luigi/client.cfg', 'client.cfg']
11 if 'LUIGI_CONFIG_PATH' in os.environ:
12 _config_paths.append(os.environ['LUIGI_CONFIG_PATH'])
13
14 @classmethod
15 def add_config_path(cls, path):
16 cls._config_paths.append(path)
17 cls._instance.reload()
18
19 @classmethod
20 def instance(cls, *args, **kwargs):
21 """ Singleton getter """
22 if cls._instance is None:
23 cls._instance = cls(*args, **kwargs)
24 loaded = cls._instance.reload()
25 logging.getLogger('luigi-interface').info('Loaded %r', loaded)
26
27 return cls._instance
28
29 def reload(self):
30 return self._instance.read(self._config_paths)
31
32 def _get_with_default(self, method, section, option, default, expected_type=None):
33 """ Gets the value of the section/option using method. Returns default if value
34 is not found. Raises an exception if the default value is not None and doesn't match
35 the expected_type.
36 """
37 try:
38 return method(self, section, option)
39 except (NoOptionError, NoSectionError):
40 if default is LuigiConfigParser.NO_DEFAULT:
41 raise
42 if expected_type is not None and default is not None and \
43 not isinstance(default, expected_type):
44 raise
45 return default
46
47 def get(self, section, option, default=NO_DEFAULT):
48 return self._get_with_default(ConfigParser.get, section, option, default)
49
50 def getboolean(self, section, option, default=NO_DEFAULT):
51 return self._get_with_default(ConfigParser.getboolean, section, option, default, bool)
52
53 def getint(self, section, option, default=NO_DEFAULT):
54 return self._get_with_default(ConfigParser.getint, section, option, default, int)
55
56 def getfloat(self, section, option, default=NO_DEFAULT):
57 return self._get_with_default(ConfigParser.getfloat, section, option, default, float)
58
59 def set(self, section, option, value):
60 if not ConfigParser.has_section(self, section):
61 ConfigParser.add_section(self, section)
62
63 return ConfigParser.set(self, section, option, value)
64
65 def get_config():
66 """ Convenience method (for backwards compatibility) for accessing config singleton """
67 return LuigiConfigParser.instance()
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/luigi/configuration.py b/luigi/configuration.py
--- a/luigi/configuration.py
+++ b/luigi/configuration.py
@@ -14,7 +14,7 @@
@classmethod
def add_config_path(cls, path):
cls._config_paths.append(path)
- cls._instance.reload()
+ cls.reload()
@classmethod
def instance(cls, *args, **kwargs):
@@ -26,8 +26,9 @@
return cls._instance
- def reload(self):
- return self._instance.read(self._config_paths)
+ @classmethod
+ def reload(cls):
+ return cls.instance().read(cls._config_paths)
def _get_with_default(self, method, section, option, default, expected_type=None):
""" Gets the value of the section/option using method. Returns default if value
| {"golden_diff": "diff --git a/luigi/configuration.py b/luigi/configuration.py\n--- a/luigi/configuration.py\n+++ b/luigi/configuration.py\n@@ -14,7 +14,7 @@\n @classmethod\n def add_config_path(cls, path):\n cls._config_paths.append(path)\n- cls._instance.reload()\n+ cls.reload()\n \n @classmethod\n def instance(cls, *args, **kwargs):\n@@ -26,8 +26,9 @@\n \n return cls._instance\n \n- def reload(self):\n- return self._instance.read(self._config_paths)\n+ @classmethod\n+ def reload(cls):\n+ return cls.instance().read(cls._config_paths)\n \n def _get_with_default(self, method, section, option, default, expected_type=None):\n \"\"\" Gets the value of the section/option using method. Returns default if value\n", "issue": "LuigiConfigParser::add_config_path() raises if instance() hasn't been accessed\nTo add a path to the list of config paths, one currently has to do:\n\n``` python\nLuigiConfigParser.instance() # remove this and get an exception\nLuigiConfigParser.add_config_path(my_path)\n```\n\nbecause `add_config_path` tries to reload `cls._instance` which is initialized with `None`. Wouldn't it be cleaner to do a check there and only reload a non-null instance?\n\n", "before_files": [{"content": "\nimport os\nimport logging\nfrom ConfigParser import ConfigParser, NoOptionError, NoSectionError\n\n\nclass LuigiConfigParser(ConfigParser):\n NO_DEFAULT = object()\n _instance = None\n _config_paths = ['/etc/luigi/client.cfg', 'client.cfg']\n if 'LUIGI_CONFIG_PATH' in os.environ:\n _config_paths.append(os.environ['LUIGI_CONFIG_PATH'])\n\n @classmethod\n def add_config_path(cls, path):\n cls._config_paths.append(path)\n cls._instance.reload()\n\n @classmethod\n def instance(cls, *args, **kwargs):\n \"\"\" Singleton getter \"\"\"\n if cls._instance is None:\n cls._instance = cls(*args, **kwargs)\n loaded = cls._instance.reload()\n logging.getLogger('luigi-interface').info('Loaded %r', loaded)\n\n return cls._instance\n\n def reload(self):\n return self._instance.read(self._config_paths)\n\n def _get_with_default(self, method, section, option, default, expected_type=None):\n \"\"\" Gets the value of the section/option using method. Returns default if value\n is not found. Raises an exception if the default value is not None and doesn't match\n the expected_type.\n \"\"\"\n try:\n return method(self, section, option)\n except (NoOptionError, NoSectionError):\n if default is LuigiConfigParser.NO_DEFAULT:\n raise\n if expected_type is not None and default is not None and \\\n not isinstance(default, expected_type):\n raise\n return default\n\n def get(self, section, option, default=NO_DEFAULT):\n return self._get_with_default(ConfigParser.get, section, option, default)\n\n def getboolean(self, section, option, default=NO_DEFAULT):\n return self._get_with_default(ConfigParser.getboolean, section, option, default, bool)\n\n def getint(self, section, option, default=NO_DEFAULT):\n return self._get_with_default(ConfigParser.getint, section, option, default, int)\n\n def getfloat(self, section, option, default=NO_DEFAULT):\n return self._get_with_default(ConfigParser.getfloat, section, option, default, float)\n\n def set(self, section, option, value):\n if not ConfigParser.has_section(self, section):\n ConfigParser.add_section(self, section)\n\n return ConfigParser.set(self, section, option, value)\n\ndef get_config():\n \"\"\" Convenience method (for backwards compatibility) for accessing config singleton \"\"\"\n return LuigiConfigParser.instance()\n", "path": "luigi/configuration.py"}], "after_files": [{"content": "\nimport os\nimport logging\nfrom ConfigParser import ConfigParser, NoOptionError, NoSectionError\n\n\nclass LuigiConfigParser(ConfigParser):\n NO_DEFAULT = object()\n _instance = None\n _config_paths = ['/etc/luigi/client.cfg', 'client.cfg']\n if 'LUIGI_CONFIG_PATH' in os.environ:\n _config_paths.append(os.environ['LUIGI_CONFIG_PATH'])\n\n @classmethod\n def add_config_path(cls, path):\n cls._config_paths.append(path)\n cls.reload()\n\n @classmethod\n def instance(cls, *args, **kwargs):\n \"\"\" Singleton getter \"\"\"\n if cls._instance is None:\n cls._instance = cls(*args, **kwargs)\n loaded = cls._instance.reload()\n logging.getLogger('luigi-interface').info('Loaded %r', loaded)\n\n return cls._instance\n\n @classmethod\n def reload(cls):\n return cls.instance().read(cls._config_paths)\n\n def _get_with_default(self, method, section, option, default, expected_type=None):\n \"\"\" Gets the value of the section/option using method. Returns default if value\n is not found. Raises an exception if the default value is not None and doesn't match\n the expected_type.\n \"\"\"\n try:\n return method(self, section, option)\n except (NoOptionError, NoSectionError):\n if default is LuigiConfigParser.NO_DEFAULT:\n raise\n if expected_type is not None and default is not None and \\\n not isinstance(default, expected_type):\n raise\n return default\n\n def get(self, section, option, default=NO_DEFAULT):\n return self._get_with_default(ConfigParser.get, section, option, default)\n\n def getboolean(self, section, option, default=NO_DEFAULT):\n return self._get_with_default(ConfigParser.getboolean, section, option, default, bool)\n\n def getint(self, section, option, default=NO_DEFAULT):\n return self._get_with_default(ConfigParser.getint, section, option, default, int)\n\n def getfloat(self, section, option, default=NO_DEFAULT):\n return self._get_with_default(ConfigParser.getfloat, section, option, default, float)\n\n def set(self, section, option, value):\n if not ConfigParser.has_section(self, section):\n ConfigParser.add_section(self, section)\n\n return ConfigParser.set(self, section, option, value)\n\ndef get_config():\n \"\"\" Convenience method (for backwards compatibility) for accessing config singleton \"\"\"\n return LuigiConfigParser.instance()\n", "path": "luigi/configuration.py"}]} | 1,036 | 192 |
gh_patches_debug_8797 | rasdani/github-patches | git_diff | Kinto__kinto-1340 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`kinto create-user` doesn't override the password if the user already exists.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/plugins/accounts/scripts.py`
Content:
```
1 import logging
2 import getpass
3 from pyramid.settings import asbool
4
5 from .utils import hash_password
6 from .views import AccountIdGenerator
7
8
9 logger = logging.getLogger(__name__)
10
11
12 def create_user(env, username=None, password=None):
13 """Administrative command to create a new user."""
14 registry = env['registry']
15 settings = registry.settings
16 readonly_mode = asbool(settings.get('readonly', False))
17 if readonly_mode:
18 message = 'Cannot create a user with a readonly server.'
19 logger.error(message)
20 return 51
21
22 if 'kinto.plugins.accounts' not in settings['includes']:
23 message = 'Cannot create a user when the accounts plugin is not installed.'
24 logger.error(message)
25 return 52
26
27 try:
28 validator = AccountIdGenerator()
29 if username is None:
30 username = input('Username: ')
31 while not validator.match(username):
32 print('{} is not a valid username.')
33 print('Username should match {0!r}, please try again.'.format(validator.regexp))
34 username = input('Username: ')
35
36 if password is None:
37 while True: # The user didn't entered twice the same password
38 password = getpass.getpass('Please enter a password for {}: '.format(username))
39 confirm = getpass.getpass('Please confirm the password: '.format(username))
40
41 if password != confirm:
42 print('Sorry, passwords do not match, please try again.')
43 else:
44 break
45 except EOFError:
46 print('User creation aborted')
47 return 53
48
49 print("Creating user '{}'".format(username))
50 record = {'id': username, 'password': hash_password(password)}
51 registry.storage.create(collection_id='account',
52 parent_id=username,
53 record=record,
54 ignore_conflict=True)
55 registry.permission.add_principal_to_ace('/accounts/{}'.format(username),
56 'write',
57 'account:{}'.format(username))
58
59 return 0
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kinto/plugins/accounts/scripts.py b/kinto/plugins/accounts/scripts.py
--- a/kinto/plugins/accounts/scripts.py
+++ b/kinto/plugins/accounts/scripts.py
@@ -1,5 +1,7 @@
import logging
import getpass
+
+import transaction as current_transaction
from pyramid.settings import asbool
from .utils import hash_password
@@ -56,4 +58,6 @@
'write',
'account:{}'.format(username))
+ current_transaction.commit()
+
return 0
| {"golden_diff": "diff --git a/kinto/plugins/accounts/scripts.py b/kinto/plugins/accounts/scripts.py\n--- a/kinto/plugins/accounts/scripts.py\n+++ b/kinto/plugins/accounts/scripts.py\n@@ -1,5 +1,7 @@\n import logging\n import getpass\n+\n+import transaction as current_transaction\n from pyramid.settings import asbool\n \n from .utils import hash_password\n@@ -56,4 +58,6 @@\n 'write',\n 'account:{}'.format(username))\n \n+ current_transaction.commit()\n+\n return 0\n", "issue": "`kinto create-user` doesn't override the password if the user already exists.\n\n", "before_files": [{"content": "import logging\nimport getpass\nfrom pyramid.settings import asbool\n\nfrom .utils import hash_password\nfrom .views import AccountIdGenerator\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef create_user(env, username=None, password=None):\n \"\"\"Administrative command to create a new user.\"\"\"\n registry = env['registry']\n settings = registry.settings\n readonly_mode = asbool(settings.get('readonly', False))\n if readonly_mode:\n message = 'Cannot create a user with a readonly server.'\n logger.error(message)\n return 51\n\n if 'kinto.plugins.accounts' not in settings['includes']:\n message = 'Cannot create a user when the accounts plugin is not installed.'\n logger.error(message)\n return 52\n\n try:\n validator = AccountIdGenerator()\n if username is None:\n username = input('Username: ')\n while not validator.match(username):\n print('{} is not a valid username.')\n print('Username should match {0!r}, please try again.'.format(validator.regexp))\n username = input('Username: ')\n\n if password is None:\n while True: # The user didn't entered twice the same password\n password = getpass.getpass('Please enter a password for {}: '.format(username))\n confirm = getpass.getpass('Please confirm the password: '.format(username))\n\n if password != confirm:\n print('Sorry, passwords do not match, please try again.')\n else:\n break\n except EOFError:\n print('User creation aborted')\n return 53\n\n print(\"Creating user '{}'\".format(username))\n record = {'id': username, 'password': hash_password(password)}\n registry.storage.create(collection_id='account',\n parent_id=username,\n record=record,\n ignore_conflict=True)\n registry.permission.add_principal_to_ace('/accounts/{}'.format(username),\n 'write',\n 'account:{}'.format(username))\n\n return 0\n", "path": "kinto/plugins/accounts/scripts.py"}], "after_files": [{"content": "import logging\nimport getpass\n\nimport transaction as current_transaction\nfrom pyramid.settings import asbool\n\nfrom .utils import hash_password\nfrom .views import AccountIdGenerator\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef create_user(env, username=None, password=None):\n \"\"\"Administrative command to create a new user.\"\"\"\n registry = env['registry']\n settings = registry.settings\n readonly_mode = asbool(settings.get('readonly', False))\n if readonly_mode:\n message = 'Cannot create a user with a readonly server.'\n logger.error(message)\n return 51\n\n if 'kinto.plugins.accounts' not in settings['includes']:\n message = 'Cannot create a user when the accounts plugin is not installed.'\n logger.error(message)\n return 52\n\n try:\n validator = AccountIdGenerator()\n if username is None:\n username = input('Username: ')\n while not validator.match(username):\n print('{} is not a valid username.')\n print('Username should match {0!r}, please try again.'.format(validator.regexp))\n username = input('Username: ')\n\n if password is None:\n while True: # The user didn't entered twice the same password\n password = getpass.getpass('Please enter a password for {}: '.format(username))\n confirm = getpass.getpass('Please confirm the password: '.format(username))\n\n if password != confirm:\n print('Sorry, passwords do not match, please try again.')\n else:\n break\n except EOFError:\n print('User creation aborted')\n return 53\n\n print(\"Creating user '{}'\".format(username))\n record = {'id': username, 'password': hash_password(password)}\n registry.storage.create(collection_id='account',\n parent_id=username,\n record=record,\n ignore_conflict=True)\n registry.permission.add_principal_to_ace('/accounts/{}'.format(username),\n 'write',\n 'account:{}'.format(username))\n\n current_transaction.commit()\n\n return 0\n", "path": "kinto/plugins/accounts/scripts.py"}]} | 805 | 112 |
gh_patches_debug_15585 | rasdani/github-patches | git_diff | Parsl__parsl-1987 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
garbage collection vs usage tracking shutdown race condition
**Describe the bug**
I've seen this exception at least once in CI local tests. I think it comes from job completion garbage collection racing with DFK shutdown usage reporting, in examining the task dictionary.
```
parsl/tests/test_staging/test_elaborate_noop_file.py:60:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
parsl/dataflow/dflow.py:1051: in cleanup
self.usage_tracker.send_message()
parsl/dataflow/usage_tracking/usage.py:224: in send_message
message = self.construct_end_message()
parsl/dataflow/usage_tracking/usage.py:182: in construct_end_message
app_fails = len([t for t in self.dfk.tasks if
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
.0 = <dict_keyiterator object at 0x7f86107ca040>
> app_fails = len([t for t in self.dfk.tasks if
self.dfk.tasks[t]['status'] in FINAL_FAILURE_STATES])
E RuntimeError: dictionary changed size during iteration
```
**To Reproduce**
non-deterministic in CI
**Environment**
CI, near master 02d3b93ad4fd97b2411c000cf0b8820c76e5dfef
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsl/dataflow/usage_tracking/usage.py`
Content:
```
1 import uuid
2 import time
3 import hashlib
4 import os
5 import getpass
6 import json
7 import logging
8 import socket
9 import sys
10 import platform
11 import multiprocessing as mp
12
13 from parsl.dataflow.states import FINAL_FAILURE_STATES
14 from parsl.version import VERSION as PARSL_VERSION
15
16 logger = logging.getLogger(__name__)
17
18
19 def async_process(fn):
20 """ Decorator function to launch a function as a separate process """
21
22 def run(*args, **kwargs):
23 proc = mp.Process(target=fn, args=args, kwargs=kwargs, name="Usage-Tracking")
24 proc.start()
25 return proc
26
27 return run
28
29
30 @async_process
31 def udp_messenger(domain_name, UDP_IP, UDP_PORT, sock_timeout, message):
32 """Send UDP messages to usage tracker asynchronously
33
34 This multiprocessing based messenger was written to overcome the limitations
35 of signalling/terminating a thread that is blocked on a system call. This
36 messenger is created as a separate process, and initialized with 2 queues,
37 to_send to receive messages to be sent to the internet.
38
39 Args:
40 - domain_name (str) : Domain name string
41 - UDP_IP (str) : IP address YYY.YYY.YYY.YYY
42 - UDP_PORT (int) : UDP port to send out on
43 - sock_timeout (int) : Socket timeout
44 - to_send (multiprocessing.Queue) : Queue of outgoing messages to internet
45 """
46 try:
47 if message is None:
48 raise ValueError("message was none")
49
50 encoded_message = bytes(message, "utf-8")
51
52 if encoded_message is None:
53 raise ValueError("utf-8 encoding of message failed")
54
55 if domain_name:
56 try:
57 UDP_IP = socket.gethostbyname(domain_name)
58 except Exception:
59 # (False, "Domain lookup failed, defaulting to {0}".format(UDP_IP))
60 pass
61
62 if UDP_IP is None:
63 raise Exception("UDP_IP is None")
64
65 if UDP_PORT is None:
66 raise Exception("UDP_PORT is None")
67
68 sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
69 sock.settimeout(sock_timeout)
70 sock.sendto(bytes(message, "utf-8"), (UDP_IP, UDP_PORT))
71 sock.close()
72
73 except socket.timeout:
74 logger.debug("Failed to send usage tracking data: socket timeout")
75 except OSError as e:
76 logger.debug("Failed to send usage tracking data: OSError: {}".format(e))
77 except Exception as e:
78 logger.debug("Failed to send usage tracking data: Exception: {}".format(e))
79
80
81 class UsageTracker (object):
82 """Anonymized Usage Tracking for Parsl.
83
84 Client for this is here : https://github.com/Parsl/parsl_tracking
85 This issue captures the discussion that went into functionality
86 implemented here : https://github.com/Parsl/parsl/issues/34
87
88 """
89
90 def __init__(self, dfk, ip='52.3.111.203', port=50077,
91 domain_name='tracking.parsl-project.org'):
92 """Initialize usage tracking unless the user has opted-out.
93
94 We will try to resolve the hostname specified in kwarg:domain_name
95 and if that fails attempt to use the kwarg:ip. Determining the
96 IP and sending message is threaded to avoid slowing down DFK
97 initialization.
98
99 Tracks usage stats by inspecting the internal state of the dfk.
100
101 Args:
102 - dfk (DFK object) : Data Flow Kernel object
103
104 KWargs:
105 - ip (string) : IP address
106 - port (int) : Port number, Default:50077
107 - domain_name (string) : Domain name, will override IP
108 Default: tracking.parsl-project.org
109 """
110
111 self.domain_name = domain_name
112 self.ip = ip
113 # The sock timeout will only apply to UDP send and not domain resolution
114 self.sock_timeout = 5
115 self.UDP_PORT = port
116 self.UDP_IP = None
117 self.procs = []
118 self.dfk = dfk
119 self.config = self.dfk.config
120 self.uuid = str(uuid.uuid4())
121 self.parsl_version = PARSL_VERSION
122 self.python_version = "{}.{}.{}".format(sys.version_info.major,
123 sys.version_info.minor,
124 sys.version_info.micro)
125 self.tracking_enabled = self.check_tracking_enabled()
126 logger.debug("Tracking status: {}".format(self.tracking_enabled))
127 self.initialized = False # Once first message is sent this will be True
128
129 def check_tracking_enabled(self):
130 """By default tracking is enabled.
131
132 Tracking is disabled if :
133 1. config["globals"]["usageTracking"] is set to False (Bool)
134 2. Environment variable PARSL_TRACKING is set to false (case insensitive)
135
136 """
137 track = True # By default we track usage
138
139 if not self.config.usage_tracking:
140 track = False
141
142 envvar = str(os.environ.get("PARSL_TRACKING", True)).lower()
143 if envvar == "false":
144 track = False
145
146 return track
147
148 def construct_start_message(self):
149 """Collect preliminary run info at the start of the DFK.
150
151 Returns :
152 - Message dict dumped as json string, ready for UDP
153 """
154 uname = getpass.getuser().encode('latin1')
155 hashed_username = hashlib.sha256(uname).hexdigest()[0:10]
156 hname = socket.gethostname().encode('latin1')
157 hashed_hostname = hashlib.sha256(hname).hexdigest()[0:10]
158 message = {'uuid': self.uuid,
159 'uname': hashed_username,
160 'hname': hashed_hostname,
161 'test': False, # this field previously indicated if parsl
162 # was being run in test mode, and is
163 # retained for protocol compatibility
164 'parsl_v': self.parsl_version,
165 'python_v': self.python_version,
166 'os': platform.system(),
167 'os_v': platform.release(),
168 'start': time.time()}
169
170 return json.dumps(message)
171
172 def construct_end_message(self):
173 """Collect the final run information at the time of DFK cleanup.
174
175 Returns:
176 - Message dict dumped as json string, ready for UDP
177 """
178 app_count = self.dfk.task_count
179
180 site_count = len([x for x in self.dfk.config.executors if x.managed])
181
182 app_fails = len([t for t in self.dfk.tasks if
183 self.dfk.tasks[t]['status'] in FINAL_FAILURE_STATES])
184
185 message = {'uuid': self.uuid,
186 'end': time.time(),
187 't_apps': app_count,
188 'sites': site_count,
189 'c_time': None,
190 'failed': app_fails,
191 'test': False, # see comment in construct_start_message
192 }
193
194 return json.dumps(message)
195
196 def send_UDP_message(self, message):
197 """Send UDP message."""
198 x = 0
199 if self.tracking_enabled:
200 try:
201 proc = udp_messenger(self.domain_name, self.UDP_IP, self.UDP_PORT, self.sock_timeout, message)
202 self.procs.append(proc)
203 except Exception as e:
204 logger.debug("Usage tracking failed: {}".format(e))
205 else:
206 x = -1
207
208 return x
209
210 def send_message(self):
211 """Send message over UDP.
212
213 If tracking is disables, the bytes_sent will always be set to -1
214
215 Returns:
216 (bytes_sent, time_taken)
217 """
218 start = time.time()
219 message = None
220 if not self.initialized:
221 message = self.construct_start_message()
222 self.initialized = True
223 else:
224 message = self.construct_end_message()
225
226 self.send_UDP_message(message)
227 end = time.time()
228
229 return end - start
230
231 def __del__(self):
232 return self.close()
233
234 def close(self):
235 """We terminate (SIGTERM) the processes added to the self.procs list """
236 for proc in self.procs:
237 proc.terminate()
238
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parsl/dataflow/usage_tracking/usage.py b/parsl/dataflow/usage_tracking/usage.py
--- a/parsl/dataflow/usage_tracking/usage.py
+++ b/parsl/dataflow/usage_tracking/usage.py
@@ -10,7 +10,6 @@
import platform
import multiprocessing as mp
-from parsl.dataflow.states import FINAL_FAILURE_STATES
from parsl.version import VERSION as PARSL_VERSION
logger = logging.getLogger(__name__)
@@ -179,8 +178,7 @@
site_count = len([x for x in self.dfk.config.executors if x.managed])
- app_fails = len([t for t in self.dfk.tasks if
- self.dfk.tasks[t]['status'] in FINAL_FAILURE_STATES])
+ app_fails = self.dfk.tasks_failed_count + self.dfk.tasks_dep_fail_count
message = {'uuid': self.uuid,
'end': time.time(),
| {"golden_diff": "diff --git a/parsl/dataflow/usage_tracking/usage.py b/parsl/dataflow/usage_tracking/usage.py\n--- a/parsl/dataflow/usage_tracking/usage.py\n+++ b/parsl/dataflow/usage_tracking/usage.py\n@@ -10,7 +10,6 @@\n import platform\n import multiprocessing as mp\n \n-from parsl.dataflow.states import FINAL_FAILURE_STATES\n from parsl.version import VERSION as PARSL_VERSION\n \n logger = logging.getLogger(__name__)\n@@ -179,8 +178,7 @@\n \n site_count = len([x for x in self.dfk.config.executors if x.managed])\n \n- app_fails = len([t for t in self.dfk.tasks if\n- self.dfk.tasks[t]['status'] in FINAL_FAILURE_STATES])\n+ app_fails = self.dfk.tasks_failed_count + self.dfk.tasks_dep_fail_count\n \n message = {'uuid': self.uuid,\n 'end': time.time(),\n", "issue": "garbage collection vs usage tracking shutdown race condition\n**Describe the bug**\r\nI've seen this exception at least once in CI local tests. I think it comes from job completion garbage collection racing with DFK shutdown usage reporting, in examining the task dictionary.\r\n\r\n```\r\nparsl/tests/test_staging/test_elaborate_noop_file.py:60: \r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \r\nparsl/dataflow/dflow.py:1051: in cleanup\r\n self.usage_tracker.send_message()\r\nparsl/dataflow/usage_tracking/usage.py:224: in send_message\r\n message = self.construct_end_message()\r\nparsl/dataflow/usage_tracking/usage.py:182: in construct_end_message\r\n app_fails = len([t for t in self.dfk.tasks if\r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \r\n.0 = <dict_keyiterator object at 0x7f86107ca040>\r\n> app_fails = len([t for t in self.dfk.tasks if\r\n self.dfk.tasks[t]['status'] in FINAL_FAILURE_STATES])\r\nE RuntimeError: dictionary changed size during iteration\r\n\r\n```\r\n\r\n\r\n**To Reproduce**\r\nnon-deterministic in CI\r\n\r\n**Environment**\r\nCI, near master 02d3b93ad4fd97b2411c000cf0b8820c76e5dfef\r\n\n", "before_files": [{"content": "import uuid\nimport time\nimport hashlib\nimport os\nimport getpass\nimport json\nimport logging\nimport socket\nimport sys\nimport platform\nimport multiprocessing as mp\n\nfrom parsl.dataflow.states import FINAL_FAILURE_STATES\nfrom parsl.version import VERSION as PARSL_VERSION\n\nlogger = logging.getLogger(__name__)\n\n\ndef async_process(fn):\n \"\"\" Decorator function to launch a function as a separate process \"\"\"\n\n def run(*args, **kwargs):\n proc = mp.Process(target=fn, args=args, kwargs=kwargs, name=\"Usage-Tracking\")\n proc.start()\n return proc\n\n return run\n\n\n@async_process\ndef udp_messenger(domain_name, UDP_IP, UDP_PORT, sock_timeout, message):\n \"\"\"Send UDP messages to usage tracker asynchronously\n\n This multiprocessing based messenger was written to overcome the limitations\n of signalling/terminating a thread that is blocked on a system call. This\n messenger is created as a separate process, and initialized with 2 queues,\n to_send to receive messages to be sent to the internet.\n\n Args:\n - domain_name (str) : Domain name string\n - UDP_IP (str) : IP address YYY.YYY.YYY.YYY\n - UDP_PORT (int) : UDP port to send out on\n - sock_timeout (int) : Socket timeout\n - to_send (multiprocessing.Queue) : Queue of outgoing messages to internet\n \"\"\"\n try:\n if message is None:\n raise ValueError(\"message was none\")\n\n encoded_message = bytes(message, \"utf-8\")\n\n if encoded_message is None:\n raise ValueError(\"utf-8 encoding of message failed\")\n\n if domain_name:\n try:\n UDP_IP = socket.gethostbyname(domain_name)\n except Exception:\n # (False, \"Domain lookup failed, defaulting to {0}\".format(UDP_IP))\n pass\n\n if UDP_IP is None:\n raise Exception(\"UDP_IP is None\")\n\n if UDP_PORT is None:\n raise Exception(\"UDP_PORT is None\")\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP\n sock.settimeout(sock_timeout)\n sock.sendto(bytes(message, \"utf-8\"), (UDP_IP, UDP_PORT))\n sock.close()\n\n except socket.timeout:\n logger.debug(\"Failed to send usage tracking data: socket timeout\")\n except OSError as e:\n logger.debug(\"Failed to send usage tracking data: OSError: {}\".format(e))\n except Exception as e:\n logger.debug(\"Failed to send usage tracking data: Exception: {}\".format(e))\n\n\nclass UsageTracker (object):\n \"\"\"Anonymized Usage Tracking for Parsl.\n\n Client for this is here : https://github.com/Parsl/parsl_tracking\n This issue captures the discussion that went into functionality\n implemented here : https://github.com/Parsl/parsl/issues/34\n\n \"\"\"\n\n def __init__(self, dfk, ip='52.3.111.203', port=50077,\n domain_name='tracking.parsl-project.org'):\n \"\"\"Initialize usage tracking unless the user has opted-out.\n\n We will try to resolve the hostname specified in kwarg:domain_name\n and if that fails attempt to use the kwarg:ip. Determining the\n IP and sending message is threaded to avoid slowing down DFK\n initialization.\n\n Tracks usage stats by inspecting the internal state of the dfk.\n\n Args:\n - dfk (DFK object) : Data Flow Kernel object\n\n KWargs:\n - ip (string) : IP address\n - port (int) : Port number, Default:50077\n - domain_name (string) : Domain name, will override IP\n Default: tracking.parsl-project.org\n \"\"\"\n\n self.domain_name = domain_name\n self.ip = ip\n # The sock timeout will only apply to UDP send and not domain resolution\n self.sock_timeout = 5\n self.UDP_PORT = port\n self.UDP_IP = None\n self.procs = []\n self.dfk = dfk\n self.config = self.dfk.config\n self.uuid = str(uuid.uuid4())\n self.parsl_version = PARSL_VERSION\n self.python_version = \"{}.{}.{}\".format(sys.version_info.major,\n sys.version_info.minor,\n sys.version_info.micro)\n self.tracking_enabled = self.check_tracking_enabled()\n logger.debug(\"Tracking status: {}\".format(self.tracking_enabled))\n self.initialized = False # Once first message is sent this will be True\n\n def check_tracking_enabled(self):\n \"\"\"By default tracking is enabled.\n\n Tracking is disabled if :\n 1. config[\"globals\"][\"usageTracking\"] is set to False (Bool)\n 2. Environment variable PARSL_TRACKING is set to false (case insensitive)\n\n \"\"\"\n track = True # By default we track usage\n\n if not self.config.usage_tracking:\n track = False\n\n envvar = str(os.environ.get(\"PARSL_TRACKING\", True)).lower()\n if envvar == \"false\":\n track = False\n\n return track\n\n def construct_start_message(self):\n \"\"\"Collect preliminary run info at the start of the DFK.\n\n Returns :\n - Message dict dumped as json string, ready for UDP\n \"\"\"\n uname = getpass.getuser().encode('latin1')\n hashed_username = hashlib.sha256(uname).hexdigest()[0:10]\n hname = socket.gethostname().encode('latin1')\n hashed_hostname = hashlib.sha256(hname).hexdigest()[0:10]\n message = {'uuid': self.uuid,\n 'uname': hashed_username,\n 'hname': hashed_hostname,\n 'test': False, # this field previously indicated if parsl\n # was being run in test mode, and is\n # retained for protocol compatibility\n 'parsl_v': self.parsl_version,\n 'python_v': self.python_version,\n 'os': platform.system(),\n 'os_v': platform.release(),\n 'start': time.time()}\n\n return json.dumps(message)\n\n def construct_end_message(self):\n \"\"\"Collect the final run information at the time of DFK cleanup.\n\n Returns:\n - Message dict dumped as json string, ready for UDP\n \"\"\"\n app_count = self.dfk.task_count\n\n site_count = len([x for x in self.dfk.config.executors if x.managed])\n\n app_fails = len([t for t in self.dfk.tasks if\n self.dfk.tasks[t]['status'] in FINAL_FAILURE_STATES])\n\n message = {'uuid': self.uuid,\n 'end': time.time(),\n 't_apps': app_count,\n 'sites': site_count,\n 'c_time': None,\n 'failed': app_fails,\n 'test': False, # see comment in construct_start_message\n }\n\n return json.dumps(message)\n\n def send_UDP_message(self, message):\n \"\"\"Send UDP message.\"\"\"\n x = 0\n if self.tracking_enabled:\n try:\n proc = udp_messenger(self.domain_name, self.UDP_IP, self.UDP_PORT, self.sock_timeout, message)\n self.procs.append(proc)\n except Exception as e:\n logger.debug(\"Usage tracking failed: {}\".format(e))\n else:\n x = -1\n\n return x\n\n def send_message(self):\n \"\"\"Send message over UDP.\n\n If tracking is disables, the bytes_sent will always be set to -1\n\n Returns:\n (bytes_sent, time_taken)\n \"\"\"\n start = time.time()\n message = None\n if not self.initialized:\n message = self.construct_start_message()\n self.initialized = True\n else:\n message = self.construct_end_message()\n\n self.send_UDP_message(message)\n end = time.time()\n\n return end - start\n\n def __del__(self):\n return self.close()\n\n def close(self):\n \"\"\"We terminate (SIGTERM) the processes added to the self.procs list \"\"\"\n for proc in self.procs:\n proc.terminate()\n", "path": "parsl/dataflow/usage_tracking/usage.py"}], "after_files": [{"content": "import uuid\nimport time\nimport hashlib\nimport os\nimport getpass\nimport json\nimport logging\nimport socket\nimport sys\nimport platform\nimport multiprocessing as mp\n\nfrom parsl.version import VERSION as PARSL_VERSION\n\nlogger = logging.getLogger(__name__)\n\n\ndef async_process(fn):\n \"\"\" Decorator function to launch a function as a separate process \"\"\"\n\n def run(*args, **kwargs):\n proc = mp.Process(target=fn, args=args, kwargs=kwargs, name=\"Usage-Tracking\")\n proc.start()\n return proc\n\n return run\n\n\n@async_process\ndef udp_messenger(domain_name, UDP_IP, UDP_PORT, sock_timeout, message):\n \"\"\"Send UDP messages to usage tracker asynchronously\n\n This multiprocessing based messenger was written to overcome the limitations\n of signalling/terminating a thread that is blocked on a system call. This\n messenger is created as a separate process, and initialized with 2 queues,\n to_send to receive messages to be sent to the internet.\n\n Args:\n - domain_name (str) : Domain name string\n - UDP_IP (str) : IP address YYY.YYY.YYY.YYY\n - UDP_PORT (int) : UDP port to send out on\n - sock_timeout (int) : Socket timeout\n - to_send (multiprocessing.Queue) : Queue of outgoing messages to internet\n \"\"\"\n try:\n if message is None:\n raise ValueError(\"message was none\")\n\n encoded_message = bytes(message, \"utf-8\")\n\n if encoded_message is None:\n raise ValueError(\"utf-8 encoding of message failed\")\n\n if domain_name:\n try:\n UDP_IP = socket.gethostbyname(domain_name)\n except Exception:\n # (False, \"Domain lookup failed, defaulting to {0}\".format(UDP_IP))\n pass\n\n if UDP_IP is None:\n raise Exception(\"UDP_IP is None\")\n\n if UDP_PORT is None:\n raise Exception(\"UDP_PORT is None\")\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP\n sock.settimeout(sock_timeout)\n sock.sendto(bytes(message, \"utf-8\"), (UDP_IP, UDP_PORT))\n sock.close()\n\n except socket.timeout:\n logger.debug(\"Failed to send usage tracking data: socket timeout\")\n except OSError as e:\n logger.debug(\"Failed to send usage tracking data: OSError: {}\".format(e))\n except Exception as e:\n logger.debug(\"Failed to send usage tracking data: Exception: {}\".format(e))\n\n\nclass UsageTracker (object):\n \"\"\"Anonymized Usage Tracking for Parsl.\n\n Client for this is here : https://github.com/Parsl/parsl_tracking\n This issue captures the discussion that went into functionality\n implemented here : https://github.com/Parsl/parsl/issues/34\n\n \"\"\"\n\n def __init__(self, dfk, ip='52.3.111.203', port=50077,\n domain_name='tracking.parsl-project.org'):\n \"\"\"Initialize usage tracking unless the user has opted-out.\n\n We will try to resolve the hostname specified in kwarg:domain_name\n and if that fails attempt to use the kwarg:ip. Determining the\n IP and sending message is threaded to avoid slowing down DFK\n initialization.\n\n Tracks usage stats by inspecting the internal state of the dfk.\n\n Args:\n - dfk (DFK object) : Data Flow Kernel object\n\n KWargs:\n - ip (string) : IP address\n - port (int) : Port number, Default:50077\n - domain_name (string) : Domain name, will override IP\n Default: tracking.parsl-project.org\n \"\"\"\n\n self.domain_name = domain_name\n self.ip = ip\n # The sock timeout will only apply to UDP send and not domain resolution\n self.sock_timeout = 5\n self.UDP_PORT = port\n self.UDP_IP = None\n self.procs = []\n self.dfk = dfk\n self.config = self.dfk.config\n self.uuid = str(uuid.uuid4())\n self.parsl_version = PARSL_VERSION\n self.python_version = \"{}.{}.{}\".format(sys.version_info.major,\n sys.version_info.minor,\n sys.version_info.micro)\n self.tracking_enabled = self.check_tracking_enabled()\n logger.debug(\"Tracking status: {}\".format(self.tracking_enabled))\n self.initialized = False # Once first message is sent this will be True\n\n def check_tracking_enabled(self):\n \"\"\"By default tracking is enabled.\n\n Tracking is disabled if :\n 1. config[\"globals\"][\"usageTracking\"] is set to False (Bool)\n 2. Environment variable PARSL_TRACKING is set to false (case insensitive)\n\n \"\"\"\n track = True # By default we track usage\n\n if not self.config.usage_tracking:\n track = False\n\n envvar = str(os.environ.get(\"PARSL_TRACKING\", True)).lower()\n if envvar == \"false\":\n track = False\n\n return track\n\n def construct_start_message(self):\n \"\"\"Collect preliminary run info at the start of the DFK.\n\n Returns :\n - Message dict dumped as json string, ready for UDP\n \"\"\"\n uname = getpass.getuser().encode('latin1')\n hashed_username = hashlib.sha256(uname).hexdigest()[0:10]\n hname = socket.gethostname().encode('latin1')\n hashed_hostname = hashlib.sha256(hname).hexdigest()[0:10]\n message = {'uuid': self.uuid,\n 'uname': hashed_username,\n 'hname': hashed_hostname,\n 'test': False, # this field previously indicated if parsl\n # was being run in test mode, and is\n # retained for protocol compatibility\n 'parsl_v': self.parsl_version,\n 'python_v': self.python_version,\n 'os': platform.system(),\n 'os_v': platform.release(),\n 'start': time.time()}\n\n return json.dumps(message)\n\n def construct_end_message(self):\n \"\"\"Collect the final run information at the time of DFK cleanup.\n\n Returns:\n - Message dict dumped as json string, ready for UDP\n \"\"\"\n app_count = self.dfk.task_count\n\n site_count = len([x for x in self.dfk.config.executors if x.managed])\n\n app_fails = self.dfk.tasks_failed_count + self.dfk.tasks_dep_fail_count\n\n message = {'uuid': self.uuid,\n 'end': time.time(),\n 't_apps': app_count,\n 'sites': site_count,\n 'c_time': None,\n 'failed': app_fails,\n 'test': False, # see comment in construct_start_message\n }\n\n return json.dumps(message)\n\n def send_UDP_message(self, message):\n \"\"\"Send UDP message.\"\"\"\n x = 0\n if self.tracking_enabled:\n try:\n proc = udp_messenger(self.domain_name, self.UDP_IP, self.UDP_PORT, self.sock_timeout, message)\n self.procs.append(proc)\n except Exception as e:\n logger.debug(\"Usage tracking failed: {}\".format(e))\n else:\n x = -1\n\n return x\n\n def send_message(self):\n \"\"\"Send message over UDP.\n\n If tracking is disables, the bytes_sent will always be set to -1\n\n Returns:\n (bytes_sent, time_taken)\n \"\"\"\n start = time.time()\n message = None\n if not self.initialized:\n message = self.construct_start_message()\n self.initialized = True\n else:\n message = self.construct_end_message()\n\n self.send_UDP_message(message)\n end = time.time()\n\n return end - start\n\n def __del__(self):\n return self.close()\n\n def close(self):\n \"\"\"We terminate (SIGTERM) the processes added to the self.procs list \"\"\"\n for proc in self.procs:\n proc.terminate()\n", "path": "parsl/dataflow/usage_tracking/usage.py"}]} | 3,006 | 211 |
gh_patches_debug_43304 | rasdani/github-patches | git_diff | ManimCommunity__manim-1000 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove 'MovingCameraScene.camera_frame' and 'MovingCameraScene.setup'
## Enhancement proposal
[`MovingCameraScene.setup` method](https://github.com/ManimCommunity/manim/blob/2faa89e3d367c370fd101893f03efab63109ceb0/manim/scene/moving_camera_scene.py#L94-L104) only sets the attribute `camera_frame` into the `Scene` instance, pointing to `renderer.camera.frame`, but since the property [`camera` exists in `Scene` class](https://github.com/ManimCommunity/manim/blob/57f228b8eb96f9c99517a7fdb8756b0c4969d7ff/manim/scene/scene.py#L101-L103), the object `renderer.camera.frame` can be accesed using `camera.frame`.
Changing `camera_frame` by `camera.frame` and removing the useless `setup` method, the example `FollowingGraphCamera` could be rewritten as:
```python
class FollowingGraphCamera(GraphScene, MovingCameraScene):
def setup(self):
GraphScene.setup(self)
def construct(self):
self.camera.frame.save_state()
self.setup_axes(animate=False)
graph = self.get_graph(lambda x: np.sin(x),
color=BLUE,
x_min=0,
x_max=3 * PI
)
moving_dot = Dot().move_to(graph.points[0]).set_color(ORANGE)
dot_at_start_graph = Dot().move_to(graph.points[0])
dot_at_end_graph = Dot().move_to(graph.points[-1])
self.add(graph, dot_at_end_graph, dot_at_start_graph, moving_dot)
self.play(self.camera.frame.animate.scale(0.5).move_to(moving_dot))
def update_curve(mob):
mob.move_to(moving_dot.get_center())
self.camera.frame.add_updater(update_curve)
self.play(MoveAlongPath(moving_dot, graph, rate_func=linear))
self.camera.frame.remove_updater(update_curve)
self.play(Restore(self.camera.frame))
```
Studying the original example, I was confused as to why accessing `camera.frame` was done using` camera_frame`.
### Additional comments
The `setup` method [made sense in the original implementation](https://github.com/ManimCommunity/manim/blame/2981fa2b32f0b3827317efc75df56c0585c6c0f0/scene/moving_camera_scene.py#L11-L17) but with the changes that have happened since then it's just dead code now.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `manim/scene/moving_camera_scene.py`
Content:
```
1 """A scene whose camera can be moved around.
2
3 .. SEEALSO::
4
5 :mod:`.moving_camera`
6
7
8 Examples
9 --------
10
11 .. manim:: ChangingCameraWidthAndRestore
12
13 class ChangingCameraWidthAndRestore(MovingCameraScene):
14 def construct(self):
15 text = Text("Hello World").set_color(BLUE)
16 self.add(text)
17 self.camera_frame.save_state()
18 self.play(self.camera_frame.animate.set(width=text.width * 1.2))
19 self.wait(0.3)
20 self.play(Restore(self.camera_frame))
21
22
23 .. manim:: MovingCameraCenter
24
25 class MovingCameraCenter(MovingCameraScene):
26 def construct(self):
27 s = Square(color=RED, fill_opacity=0.5).move_to(2 * LEFT)
28 t = Triangle(color=GREEN, fill_opacity=0.5).move_to(2 * RIGHT)
29 self.wait(0.3)
30 self.add(s, t)
31 self.play(self.camera_frame.animate.move_to(s))
32 self.wait(0.3)
33 self.play(self.camera_frame.animate.move_to(t))
34
35
36 .. manim:: MovingAndZoomingCamera
37
38 class MovingAndZoomingCamera(MovingCameraScene):
39 def construct(self):
40 s = Square(color=BLUE, fill_opacity=0.5).move_to(2 * LEFT)
41 t = Triangle(color=YELLOW, fill_opacity=0.5).move_to(2 * RIGHT)
42 self.add(s, t)
43 self.play(self.camera_frame.animate.move_to(s).set(width=s.width*2))
44 self.wait(0.3)
45 self.play(self.camera_frame.animate.move_to(t).set(width=t.width*2))
46
47 self.play(self.camera_frame.animate.move_to(ORIGIN).set(width=14))
48
49 .. manim:: MovingCameraOnGraph
50
51 class MovingCameraOnGraph(GraphScene, MovingCameraScene):
52 def setup(self):
53 GraphScene.setup(self)
54 MovingCameraScene.setup(self)
55 def construct(self):
56 self.camera_frame.save_state()
57 self.setup_axes(animate=False)
58 graph = self.get_graph(lambda x: np.sin(x),
59 color=WHITE,
60 x_min=0,
61 x_max=3 * PI
62 )
63 dot_at_start_graph = Dot().move_to(graph.points[0])
64 dot_at_end_graph = Dot().move_to(graph.points[-1])
65 self.add(graph, dot_at_end_graph, dot_at_start_graph)
66 self.play(self.camera_frame.animate.scale(0.5).move_to(dot_at_start_graph))
67 self.play(self.camera_frame.animate.move_to(dot_at_end_graph))
68 self.play(Restore(self.camera_frame))
69 self.wait()
70
71 """
72
73 __all__ = ["MovingCameraScene"]
74
75 from ..camera.moving_camera import MovingCamera
76 from ..scene.scene import Scene
77 from ..utils.iterables import list_update
78 from ..utils.family import extract_mobject_family_members
79
80
81 class MovingCameraScene(Scene):
82 """
83 This is a Scene, with special configurations and properties that
84 make it suitable for cases where the camera must be moved around.
85
86 .. SEEALSO::
87
88 :class:`.MovingCamera`
89 """
90
91 def __init__(self, camera_class=MovingCamera, **kwargs):
92 Scene.__init__(self, camera_class=camera_class, **kwargs)
93
94 def setup(self):
95 """
96 This method is used internally by Manim
97 to set up the scene for proper use.
98 """
99 Scene.setup(self)
100 assert isinstance(self.renderer.camera, MovingCamera)
101 self.camera_frame = self.renderer.camera.frame
102 # Hmm, this currently relies on the fact that MovingCamera
103 # willd default to a full-sized frame. Is that okay?
104 return self
105
106 def get_moving_mobjects(self, *animations):
107 """
108 This method returns a list of all of the Mobjects in the Scene that
109 are moving, that are also in the animations passed.
110
111 Parameters
112 ----------
113 *animations : Animation
114 The Animations whose mobjects will be checked.
115 """
116 moving_mobjects = Scene.get_moving_mobjects(self, *animations)
117 all_moving_mobjects = extract_mobject_family_members(moving_mobjects)
118 movement_indicators = self.renderer.camera.get_mobjects_indicating_movement()
119 for movement_indicator in movement_indicators:
120 if movement_indicator in all_moving_mobjects:
121 # When one of these is moving, the camera should
122 # consider all mobjects to be moving
123 return list_update(self.mobjects, moving_mobjects)
124 return moving_mobjects
125
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/manim/scene/moving_camera_scene.py b/manim/scene/moving_camera_scene.py
--- a/manim/scene/moving_camera_scene.py
+++ b/manim/scene/moving_camera_scene.py
@@ -14,10 +14,10 @@
def construct(self):
text = Text("Hello World").set_color(BLUE)
self.add(text)
- self.camera_frame.save_state()
- self.play(self.camera_frame.animate.set(width=text.width * 1.2))
+ self.camera.frame.save_state()
+ self.play(self.camera.frame.animate.set(width=text.width * 1.2))
self.wait(0.3)
- self.play(Restore(self.camera_frame))
+ self.play(Restore(self.camera.frame))
.. manim:: MovingCameraCenter
@@ -28,9 +28,9 @@
t = Triangle(color=GREEN, fill_opacity=0.5).move_to(2 * RIGHT)
self.wait(0.3)
self.add(s, t)
- self.play(self.camera_frame.animate.move_to(s))
+ self.play(self.camera.frame.animate.move_to(s))
self.wait(0.3)
- self.play(self.camera_frame.animate.move_to(t))
+ self.play(self.camera.frame.animate.move_to(t))
.. manim:: MovingAndZoomingCamera
@@ -40,20 +40,20 @@
s = Square(color=BLUE, fill_opacity=0.5).move_to(2 * LEFT)
t = Triangle(color=YELLOW, fill_opacity=0.5).move_to(2 * RIGHT)
self.add(s, t)
- self.play(self.camera_frame.animate.move_to(s).set(width=s.width*2))
+ self.play(self.camera.frame.animate.move_to(s).set(width=s.width*2))
self.wait(0.3)
- self.play(self.camera_frame.animate.move_to(t).set(width=t.width*2))
+ self.play(self.camera.frame.animate.move_to(t).set(width=t.width*2))
- self.play(self.camera_frame.animate.move_to(ORIGIN).set(width=14))
+ self.play(self.camera.frame.animate.move_to(ORIGIN).set(width=14))
.. manim:: MovingCameraOnGraph
class MovingCameraOnGraph(GraphScene, MovingCameraScene):
def setup(self):
GraphScene.setup(self)
- MovingCameraScene.setup(self)
+
def construct(self):
- self.camera_frame.save_state()
+ self.camera.frame.save_state()
self.setup_axes(animate=False)
graph = self.get_graph(lambda x: np.sin(x),
color=WHITE,
@@ -63,9 +63,9 @@
dot_at_start_graph = Dot().move_to(graph.points[0])
dot_at_end_graph = Dot().move_to(graph.points[-1])
self.add(graph, dot_at_end_graph, dot_at_start_graph)
- self.play(self.camera_frame.animate.scale(0.5).move_to(dot_at_start_graph))
- self.play(self.camera_frame.animate.move_to(dot_at_end_graph))
- self.play(Restore(self.camera_frame))
+ self.play(self.camera.frame.animate.scale(0.5).move_to(dot_at_start_graph))
+ self.play(self.camera.frame.animate.move_to(dot_at_end_graph))
+ self.play(Restore(self.camera.frame))
self.wait()
"""
@@ -91,18 +91,6 @@
def __init__(self, camera_class=MovingCamera, **kwargs):
Scene.__init__(self, camera_class=camera_class, **kwargs)
- def setup(self):
- """
- This method is used internally by Manim
- to set up the scene for proper use.
- """
- Scene.setup(self)
- assert isinstance(self.renderer.camera, MovingCamera)
- self.camera_frame = self.renderer.camera.frame
- # Hmm, this currently relies on the fact that MovingCamera
- # willd default to a full-sized frame. Is that okay?
- return self
-
def get_moving_mobjects(self, *animations):
"""
This method returns a list of all of the Mobjects in the Scene that
| {"golden_diff": "diff --git a/manim/scene/moving_camera_scene.py b/manim/scene/moving_camera_scene.py\n--- a/manim/scene/moving_camera_scene.py\n+++ b/manim/scene/moving_camera_scene.py\n@@ -14,10 +14,10 @@\n def construct(self):\n text = Text(\"Hello World\").set_color(BLUE)\n self.add(text)\n- self.camera_frame.save_state()\n- self.play(self.camera_frame.animate.set(width=text.width * 1.2))\n+ self.camera.frame.save_state()\n+ self.play(self.camera.frame.animate.set(width=text.width * 1.2))\n self.wait(0.3)\n- self.play(Restore(self.camera_frame))\n+ self.play(Restore(self.camera.frame))\n \n \n .. manim:: MovingCameraCenter\n@@ -28,9 +28,9 @@\n t = Triangle(color=GREEN, fill_opacity=0.5).move_to(2 * RIGHT)\n self.wait(0.3)\n self.add(s, t)\n- self.play(self.camera_frame.animate.move_to(s))\n+ self.play(self.camera.frame.animate.move_to(s))\n self.wait(0.3)\n- self.play(self.camera_frame.animate.move_to(t))\n+ self.play(self.camera.frame.animate.move_to(t))\n \n \n .. manim:: MovingAndZoomingCamera\n@@ -40,20 +40,20 @@\n s = Square(color=BLUE, fill_opacity=0.5).move_to(2 * LEFT)\n t = Triangle(color=YELLOW, fill_opacity=0.5).move_to(2 * RIGHT)\n self.add(s, t)\n- self.play(self.camera_frame.animate.move_to(s).set(width=s.width*2))\n+ self.play(self.camera.frame.animate.move_to(s).set(width=s.width*2))\n self.wait(0.3)\n- self.play(self.camera_frame.animate.move_to(t).set(width=t.width*2))\n+ self.play(self.camera.frame.animate.move_to(t).set(width=t.width*2))\n \n- self.play(self.camera_frame.animate.move_to(ORIGIN).set(width=14))\n+ self.play(self.camera.frame.animate.move_to(ORIGIN).set(width=14))\n \n .. manim:: MovingCameraOnGraph\n \n class MovingCameraOnGraph(GraphScene, MovingCameraScene):\n def setup(self):\n GraphScene.setup(self)\n- MovingCameraScene.setup(self)\n+\n def construct(self):\n- self.camera_frame.save_state()\n+ self.camera.frame.save_state()\n self.setup_axes(animate=False)\n graph = self.get_graph(lambda x: np.sin(x),\n color=WHITE,\n@@ -63,9 +63,9 @@\n dot_at_start_graph = Dot().move_to(graph.points[0])\n dot_at_end_graph = Dot().move_to(graph.points[-1])\n self.add(graph, dot_at_end_graph, dot_at_start_graph)\n- self.play(self.camera_frame.animate.scale(0.5).move_to(dot_at_start_graph))\n- self.play(self.camera_frame.animate.move_to(dot_at_end_graph))\n- self.play(Restore(self.camera_frame))\n+ self.play(self.camera.frame.animate.scale(0.5).move_to(dot_at_start_graph))\n+ self.play(self.camera.frame.animate.move_to(dot_at_end_graph))\n+ self.play(Restore(self.camera.frame))\n self.wait()\n \n \"\"\"\n@@ -91,18 +91,6 @@\n def __init__(self, camera_class=MovingCamera, **kwargs):\n Scene.__init__(self, camera_class=camera_class, **kwargs)\n \n- def setup(self):\n- \"\"\"\n- This method is used internally by Manim\n- to set up the scene for proper use.\n- \"\"\"\n- Scene.setup(self)\n- assert isinstance(self.renderer.camera, MovingCamera)\n- self.camera_frame = self.renderer.camera.frame\n- # Hmm, this currently relies on the fact that MovingCamera\n- # willd default to a full-sized frame. Is that okay?\n- return self\n-\n def get_moving_mobjects(self, *animations):\n \"\"\"\n This method returns a list of all of the Mobjects in the Scene that\n", "issue": "Remove 'MovingCameraScene.camera_frame' and 'MovingCameraScene.setup'\n## Enhancement proposal\r\n\r\n[`MovingCameraScene.setup` method](https://github.com/ManimCommunity/manim/blob/2faa89e3d367c370fd101893f03efab63109ceb0/manim/scene/moving_camera_scene.py#L94-L104) only sets the attribute `camera_frame` into the `Scene` instance, pointing to `renderer.camera.frame`, but since the property [`camera` exists in `Scene` class](https://github.com/ManimCommunity/manim/blob/57f228b8eb96f9c99517a7fdb8756b0c4969d7ff/manim/scene/scene.py#L101-L103), the object `renderer.camera.frame` can be accesed using `camera.frame`.\r\n\r\nChanging `camera_frame` by `camera.frame` and removing the useless `setup` method, the example `FollowingGraphCamera` could be rewritten as:\r\n\r\n```python\r\nclass FollowingGraphCamera(GraphScene, MovingCameraScene):\r\n def setup(self):\r\n GraphScene.setup(self)\r\n\r\n def construct(self):\r\n self.camera.frame.save_state()\r\n self.setup_axes(animate=False)\r\n graph = self.get_graph(lambda x: np.sin(x),\r\n color=BLUE,\r\n x_min=0,\r\n x_max=3 * PI\r\n )\r\n moving_dot = Dot().move_to(graph.points[0]).set_color(ORANGE)\r\n\r\n dot_at_start_graph = Dot().move_to(graph.points[0])\r\n dot_at_end_graph = Dot().move_to(graph.points[-1])\r\n self.add(graph, dot_at_end_graph, dot_at_start_graph, moving_dot)\r\n self.play(self.camera.frame.animate.scale(0.5).move_to(moving_dot))\r\n\r\n def update_curve(mob):\r\n mob.move_to(moving_dot.get_center())\r\n\r\n self.camera.frame.add_updater(update_curve)\r\n self.play(MoveAlongPath(moving_dot, graph, rate_func=linear))\r\n self.camera.frame.remove_updater(update_curve)\r\n\r\n self.play(Restore(self.camera.frame))\r\n```\r\n\r\nStudying the original example, I was confused as to why accessing `camera.frame` was done using` camera_frame`.\r\n\r\n### Additional comments\r\n\r\nThe `setup` method [made sense in the original implementation](https://github.com/ManimCommunity/manim/blame/2981fa2b32f0b3827317efc75df56c0585c6c0f0/scene/moving_camera_scene.py#L11-L17) but with the changes that have happened since then it's just dead code now.\n", "before_files": [{"content": "\"\"\"A scene whose camera can be moved around.\n\n.. SEEALSO::\n\n :mod:`.moving_camera`\n\n\nExamples\n--------\n\n.. manim:: ChangingCameraWidthAndRestore\n\n class ChangingCameraWidthAndRestore(MovingCameraScene):\n def construct(self):\n text = Text(\"Hello World\").set_color(BLUE)\n self.add(text)\n self.camera_frame.save_state()\n self.play(self.camera_frame.animate.set(width=text.width * 1.2))\n self.wait(0.3)\n self.play(Restore(self.camera_frame))\n\n\n.. manim:: MovingCameraCenter\n\n class MovingCameraCenter(MovingCameraScene):\n def construct(self):\n s = Square(color=RED, fill_opacity=0.5).move_to(2 * LEFT)\n t = Triangle(color=GREEN, fill_opacity=0.5).move_to(2 * RIGHT)\n self.wait(0.3)\n self.add(s, t)\n self.play(self.camera_frame.animate.move_to(s))\n self.wait(0.3)\n self.play(self.camera_frame.animate.move_to(t))\n\n\n.. manim:: MovingAndZoomingCamera\n\n class MovingAndZoomingCamera(MovingCameraScene):\n def construct(self):\n s = Square(color=BLUE, fill_opacity=0.5).move_to(2 * LEFT)\n t = Triangle(color=YELLOW, fill_opacity=0.5).move_to(2 * RIGHT)\n self.add(s, t)\n self.play(self.camera_frame.animate.move_to(s).set(width=s.width*2))\n self.wait(0.3)\n self.play(self.camera_frame.animate.move_to(t).set(width=t.width*2))\n\n self.play(self.camera_frame.animate.move_to(ORIGIN).set(width=14))\n\n.. manim:: MovingCameraOnGraph\n\n class MovingCameraOnGraph(GraphScene, MovingCameraScene):\n def setup(self):\n GraphScene.setup(self)\n MovingCameraScene.setup(self)\n def construct(self):\n self.camera_frame.save_state()\n self.setup_axes(animate=False)\n graph = self.get_graph(lambda x: np.sin(x),\n color=WHITE,\n x_min=0,\n x_max=3 * PI\n )\n dot_at_start_graph = Dot().move_to(graph.points[0])\n dot_at_end_graph = Dot().move_to(graph.points[-1])\n self.add(graph, dot_at_end_graph, dot_at_start_graph)\n self.play(self.camera_frame.animate.scale(0.5).move_to(dot_at_start_graph))\n self.play(self.camera_frame.animate.move_to(dot_at_end_graph))\n self.play(Restore(self.camera_frame))\n self.wait()\n\n\"\"\"\n\n__all__ = [\"MovingCameraScene\"]\n\nfrom ..camera.moving_camera import MovingCamera\nfrom ..scene.scene import Scene\nfrom ..utils.iterables import list_update\nfrom ..utils.family import extract_mobject_family_members\n\n\nclass MovingCameraScene(Scene):\n \"\"\"\n This is a Scene, with special configurations and properties that\n make it suitable for cases where the camera must be moved around.\n\n .. SEEALSO::\n\n :class:`.MovingCamera`\n \"\"\"\n\n def __init__(self, camera_class=MovingCamera, **kwargs):\n Scene.__init__(self, camera_class=camera_class, **kwargs)\n\n def setup(self):\n \"\"\"\n This method is used internally by Manim\n to set up the scene for proper use.\n \"\"\"\n Scene.setup(self)\n assert isinstance(self.renderer.camera, MovingCamera)\n self.camera_frame = self.renderer.camera.frame\n # Hmm, this currently relies on the fact that MovingCamera\n # willd default to a full-sized frame. Is that okay?\n return self\n\n def get_moving_mobjects(self, *animations):\n \"\"\"\n This method returns a list of all of the Mobjects in the Scene that\n are moving, that are also in the animations passed.\n\n Parameters\n ----------\n *animations : Animation\n The Animations whose mobjects will be checked.\n \"\"\"\n moving_mobjects = Scene.get_moving_mobjects(self, *animations)\n all_moving_mobjects = extract_mobject_family_members(moving_mobjects)\n movement_indicators = self.renderer.camera.get_mobjects_indicating_movement()\n for movement_indicator in movement_indicators:\n if movement_indicator in all_moving_mobjects:\n # When one of these is moving, the camera should\n # consider all mobjects to be moving\n return list_update(self.mobjects, moving_mobjects)\n return moving_mobjects\n", "path": "manim/scene/moving_camera_scene.py"}], "after_files": [{"content": "\"\"\"A scene whose camera can be moved around.\n\n.. SEEALSO::\n\n :mod:`.moving_camera`\n\n\nExamples\n--------\n\n.. manim:: ChangingCameraWidthAndRestore\n\n class ChangingCameraWidthAndRestore(MovingCameraScene):\n def construct(self):\n text = Text(\"Hello World\").set_color(BLUE)\n self.add(text)\n self.camera.frame.save_state()\n self.play(self.camera.frame.animate.set(width=text.width * 1.2))\n self.wait(0.3)\n self.play(Restore(self.camera.frame))\n\n\n.. manim:: MovingCameraCenter\n\n class MovingCameraCenter(MovingCameraScene):\n def construct(self):\n s = Square(color=RED, fill_opacity=0.5).move_to(2 * LEFT)\n t = Triangle(color=GREEN, fill_opacity=0.5).move_to(2 * RIGHT)\n self.wait(0.3)\n self.add(s, t)\n self.play(self.camera.frame.animate.move_to(s))\n self.wait(0.3)\n self.play(self.camera.frame.animate.move_to(t))\n\n\n.. manim:: MovingAndZoomingCamera\n\n class MovingAndZoomingCamera(MovingCameraScene):\n def construct(self):\n s = Square(color=BLUE, fill_opacity=0.5).move_to(2 * LEFT)\n t = Triangle(color=YELLOW, fill_opacity=0.5).move_to(2 * RIGHT)\n self.add(s, t)\n self.play(self.camera.frame.animate.move_to(s).set(width=s.width*2))\n self.wait(0.3)\n self.play(self.camera.frame.animate.move_to(t).set(width=t.width*2))\n\n self.play(self.camera.frame.animate.move_to(ORIGIN).set(width=14))\n\n.. manim:: MovingCameraOnGraph\n\n class MovingCameraOnGraph(GraphScene, MovingCameraScene):\n def setup(self):\n GraphScene.setup(self)\n\n def construct(self):\n self.camera.frame.save_state()\n self.setup_axes(animate=False)\n graph = self.get_graph(lambda x: np.sin(x),\n color=WHITE,\n x_min=0,\n x_max=3 * PI\n )\n dot_at_start_graph = Dot().move_to(graph.points[0])\n dot_at_end_graph = Dot().move_to(graph.points[-1])\n self.add(graph, dot_at_end_graph, dot_at_start_graph)\n self.play(self.camera.frame.animate.scale(0.5).move_to(dot_at_start_graph))\n self.play(self.camera.frame.animate.move_to(dot_at_end_graph))\n self.play(Restore(self.camera.frame))\n self.wait()\n\n\"\"\"\n\n__all__ = [\"MovingCameraScene\"]\n\nfrom ..camera.moving_camera import MovingCamera\nfrom ..scene.scene import Scene\nfrom ..utils.iterables import list_update\nfrom ..utils.family import extract_mobject_family_members\n\n\nclass MovingCameraScene(Scene):\n \"\"\"\n This is a Scene, with special configurations and properties that\n make it suitable for cases where the camera must be moved around.\n\n .. SEEALSO::\n\n :class:`.MovingCamera`\n \"\"\"\n\n def __init__(self, camera_class=MovingCamera, **kwargs):\n Scene.__init__(self, camera_class=camera_class, **kwargs)\n\n def get_moving_mobjects(self, *animations):\n \"\"\"\n This method returns a list of all of the Mobjects in the Scene that\n are moving, that are also in the animations passed.\n\n Parameters\n ----------\n *animations : Animation\n The Animations whose mobjects will be checked.\n \"\"\"\n moving_mobjects = Scene.get_moving_mobjects(self, *animations)\n all_moving_mobjects = extract_mobject_family_members(moving_mobjects)\n movement_indicators = self.renderer.camera.get_mobjects_indicating_movement()\n for movement_indicator in movement_indicators:\n if movement_indicator in all_moving_mobjects:\n # When one of these is moving, the camera should\n # consider all mobjects to be moving\n return list_update(self.mobjects, moving_mobjects)\n return moving_mobjects\n", "path": "manim/scene/moving_camera_scene.py"}]} | 2,101 | 907 |
gh_patches_debug_31523 | rasdani/github-patches | git_diff | freedomofpress__securedrop-3884 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove unnecessary Ansible callback for profile_tasks
# Feature request
## Description
The file at `install_files/ansible-base/callback_plugins/profile_tasks.py` was added via #1196, to provide additional information on task performance, with the goal of aiding developers in improving the server config workflow. Since we moved to Ansible v2 in #1146, the hardcoded plugin is no longer necessary.
Instead, we can ansible add a lint to `ansible.cfg` under `[defaults]`:
```
callback_whitelist = profile_tasks
```
The simplification is possible because task profiling was [added to Ansible core as of v2](https://docs.ansible.com/ansible/devel/plugins/callback/profile_tasks.html).
## User Stories
As a maintainer, I want to delete redundant code wherever possible, and lean on upstream to handle core functionality when appropriate.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `install_files/ansible-base/callback_plugins/profile_tasks.py`
Content:
```
1 # Source: https://github.com/jlafon/ansible-profile
2 # License: MIT
3 # More info: http://jlafon.io/ansible-profiling.html
4 # The profiling functionality will be provided by Ansible v2,
5 # since this callback_plugin has been merged into core,
6 # but we're including here to support older versions of Ansible.
7 import datetime
8 import os
9 import time
10
11
12 class CallbackModule(object):
13 """
14 A plugin for timing tasks
15 """
16 def __init__(self):
17 self.stats = {}
18 self.current = None
19
20 def playbook_on_task_start(self, name, is_conditional):
21 """
22 Logs the start of each task
23 """
24
25 if os.getenv("ANSIBLE_PROFILE_DISABLE") is not None:
26 return
27
28 if self.current is not None:
29 # Record the running time of the last executed task
30 self.stats[self.current] = time.time() - self.stats[self.current]
31
32 # Record the start time of the current task
33 self.current = name
34 self.stats[self.current] = time.time()
35
36 def playbook_on_stats(self, stats):
37 """
38 Prints the timings
39 """
40
41 if os.getenv("ANSIBLE_PROFILE_DISABLE") is not None:
42 return
43
44 # Record the timing of the very last task
45 if self.current is not None:
46 self.stats[self.current] = time.time() - self.stats[self.current]
47
48 # Sort the tasks by their running time
49 results = sorted(
50 self.stats.items(),
51 key=lambda value: value[1],
52 reverse=True,
53 )
54
55 # Just keep the top 10
56 results = results[:10]
57
58 # Print the timings
59 for name, elapsed in results:
60 print(
61 "{0:-<70}{1:->9}".format(
62 '{0} '.format(name),
63 ' {0:.02f}s'.format(elapsed),
64 )
65 )
66
67 total_seconds = sum([x[1] for x in self.stats.items()])
68 print("\nPlaybook finished: {0}, {1} total tasks."
69 " {2} elapsed. \n".format(
70 time.asctime(),
71 len(self.stats.items()),
72 datetime.timedelta(seconds=(int(total_seconds)))
73 )
74 )
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/install_files/ansible-base/callback_plugins/profile_tasks.py b/install_files/ansible-base/callback_plugins/profile_tasks.py
deleted file mode 100644
--- a/install_files/ansible-base/callback_plugins/profile_tasks.py
+++ /dev/null
@@ -1,74 +0,0 @@
-# Source: https://github.com/jlafon/ansible-profile
-# License: MIT
-# More info: http://jlafon.io/ansible-profiling.html
-# The profiling functionality will be provided by Ansible v2,
-# since this callback_plugin has been merged into core,
-# but we're including here to support older versions of Ansible.
-import datetime
-import os
-import time
-
-
-class CallbackModule(object):
- """
- A plugin for timing tasks
- """
- def __init__(self):
- self.stats = {}
- self.current = None
-
- def playbook_on_task_start(self, name, is_conditional):
- """
- Logs the start of each task
- """
-
- if os.getenv("ANSIBLE_PROFILE_DISABLE") is not None:
- return
-
- if self.current is not None:
- # Record the running time of the last executed task
- self.stats[self.current] = time.time() - self.stats[self.current]
-
- # Record the start time of the current task
- self.current = name
- self.stats[self.current] = time.time()
-
- def playbook_on_stats(self, stats):
- """
- Prints the timings
- """
-
- if os.getenv("ANSIBLE_PROFILE_DISABLE") is not None:
- return
-
- # Record the timing of the very last task
- if self.current is not None:
- self.stats[self.current] = time.time() - self.stats[self.current]
-
- # Sort the tasks by their running time
- results = sorted(
- self.stats.items(),
- key=lambda value: value[1],
- reverse=True,
- )
-
- # Just keep the top 10
- results = results[:10]
-
- # Print the timings
- for name, elapsed in results:
- print(
- "{0:-<70}{1:->9}".format(
- '{0} '.format(name),
- ' {0:.02f}s'.format(elapsed),
- )
- )
-
- total_seconds = sum([x[1] for x in self.stats.items()])
- print("\nPlaybook finished: {0}, {1} total tasks."
- " {2} elapsed. \n".format(
- time.asctime(),
- len(self.stats.items()),
- datetime.timedelta(seconds=(int(total_seconds)))
- )
- )
| {"golden_diff": "diff --git a/install_files/ansible-base/callback_plugins/profile_tasks.py b/install_files/ansible-base/callback_plugins/profile_tasks.py\ndeleted file mode 100644\n--- a/install_files/ansible-base/callback_plugins/profile_tasks.py\n+++ /dev/null\n@@ -1,74 +0,0 @@\n-# Source: https://github.com/jlafon/ansible-profile\n-# License: MIT\n-# More info: http://jlafon.io/ansible-profiling.html\n-# The profiling functionality will be provided by Ansible v2,\n-# since this callback_plugin has been merged into core,\n-# but we're including here to support older versions of Ansible.\n-import datetime\n-import os\n-import time\n-\n-\n-class CallbackModule(object):\n- \"\"\"\n- A plugin for timing tasks\n- \"\"\"\n- def __init__(self):\n- self.stats = {}\n- self.current = None\n-\n- def playbook_on_task_start(self, name, is_conditional):\n- \"\"\"\n- Logs the start of each task\n- \"\"\"\n-\n- if os.getenv(\"ANSIBLE_PROFILE_DISABLE\") is not None:\n- return\n-\n- if self.current is not None:\n- # Record the running time of the last executed task\n- self.stats[self.current] = time.time() - self.stats[self.current]\n-\n- # Record the start time of the current task\n- self.current = name\n- self.stats[self.current] = time.time()\n-\n- def playbook_on_stats(self, stats):\n- \"\"\"\n- Prints the timings\n- \"\"\"\n-\n- if os.getenv(\"ANSIBLE_PROFILE_DISABLE\") is not None:\n- return\n-\n- # Record the timing of the very last task\n- if self.current is not None:\n- self.stats[self.current] = time.time() - self.stats[self.current]\n-\n- # Sort the tasks by their running time\n- results = sorted(\n- self.stats.items(),\n- key=lambda value: value[1],\n- reverse=True,\n- )\n-\n- # Just keep the top 10\n- results = results[:10]\n-\n- # Print the timings\n- for name, elapsed in results:\n- print(\n- \"{0:-<70}{1:->9}\".format(\n- '{0} '.format(name),\n- ' {0:.02f}s'.format(elapsed),\n- )\n- )\n-\n- total_seconds = sum([x[1] for x in self.stats.items()])\n- print(\"\\nPlaybook finished: {0}, {1} total tasks.\"\n- \" {2} elapsed. \\n\".format(\n- time.asctime(),\n- len(self.stats.items()),\n- datetime.timedelta(seconds=(int(total_seconds)))\n- )\n- )\n", "issue": "Remove unnecessary Ansible callback for profile_tasks\n# Feature request\r\n\r\n## Description\r\n\r\nThe file at `install_files/ansible-base/callback_plugins/profile_tasks.py` was added via #1196, to provide additional information on task performance, with the goal of aiding developers in improving the server config workflow. Since we moved to Ansible v2 in #1146, the hardcoded plugin is no longer necessary.\r\n\r\nInstead, we can ansible add a lint to `ansible.cfg` under `[defaults]`:\r\n\r\n```\r\ncallback_whitelist = profile_tasks\r\n```\r\n\r\nThe simplification is possible because task profiling was [added to Ansible core as of v2](https://docs.ansible.com/ansible/devel/plugins/callback/profile_tasks.html).\r\n\r\n## User Stories\r\nAs a maintainer, I want to delete redundant code wherever possible, and lean on upstream to handle core functionality when appropriate.\r\n\n", "before_files": [{"content": "# Source: https://github.com/jlafon/ansible-profile\n# License: MIT\n# More info: http://jlafon.io/ansible-profiling.html\n# The profiling functionality will be provided by Ansible v2,\n# since this callback_plugin has been merged into core,\n# but we're including here to support older versions of Ansible.\nimport datetime\nimport os\nimport time\n\n\nclass CallbackModule(object):\n \"\"\"\n A plugin for timing tasks\n \"\"\"\n def __init__(self):\n self.stats = {}\n self.current = None\n\n def playbook_on_task_start(self, name, is_conditional):\n \"\"\"\n Logs the start of each task\n \"\"\"\n\n if os.getenv(\"ANSIBLE_PROFILE_DISABLE\") is not None:\n return\n\n if self.current is not None:\n # Record the running time of the last executed task\n self.stats[self.current] = time.time() - self.stats[self.current]\n\n # Record the start time of the current task\n self.current = name\n self.stats[self.current] = time.time()\n\n def playbook_on_stats(self, stats):\n \"\"\"\n Prints the timings\n \"\"\"\n\n if os.getenv(\"ANSIBLE_PROFILE_DISABLE\") is not None:\n return\n\n # Record the timing of the very last task\n if self.current is not None:\n self.stats[self.current] = time.time() - self.stats[self.current]\n\n # Sort the tasks by their running time\n results = sorted(\n self.stats.items(),\n key=lambda value: value[1],\n reverse=True,\n )\n\n # Just keep the top 10\n results = results[:10]\n\n # Print the timings\n for name, elapsed in results:\n print(\n \"{0:-<70}{1:->9}\".format(\n '{0} '.format(name),\n ' {0:.02f}s'.format(elapsed),\n )\n )\n\n total_seconds = sum([x[1] for x in self.stats.items()])\n print(\"\\nPlaybook finished: {0}, {1} total tasks.\"\n \" {2} elapsed. \\n\".format(\n time.asctime(),\n len(self.stats.items()),\n datetime.timedelta(seconds=(int(total_seconds)))\n )\n )\n", "path": "install_files/ansible-base/callback_plugins/profile_tasks.py"}], "after_files": [{"content": null, "path": "install_files/ansible-base/callback_plugins/profile_tasks.py"}]} | 1,074 | 610 |
gh_patches_debug_990 | rasdani/github-patches | git_diff | googleapis__python-bigquery-306 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Needs protobuf minimum version 3.12.0
If you are still having issues, please be sure to include as much information as possible:
#### Environment details
- OS type and version: MacOS
- Python version: `python --version` 3.8
- pip version: `pip --version` 20.2.3
- `google-cloud-bigquery` version: `pip show google-cloud-bigquery` 2.0.0
#### Steps to reproduce
1. Just install the package, import somewhere and run it.
#### Code example
```python
from google.cloud import bigquery
```
#### Stack trace
```
File "/.../venv/lib/python3.8/site-packages/google/protobuf/internal/python_message.py", line 570, in _GetFieldByName
return message_descriptor.fields_by_name[field_name]
KeyError: 'proto3_optional'
...
from google.cloud import bigquery
File "/.../venv/lib/python3.8/site-packages/google/cloud/bigquery/__init__.py", line 35, in <module>
from google.cloud.bigquery.client import Client
File "/.../venv/lib/python3.8/site-packages/google/cloud/bigquery/client.py", line 57, in <module>
from google.cloud.bigquery import _pandas_helpers
File "/.../venv/lib/python3.8/site-packages/google/cloud/bigquery/_pandas_helpers.py", line 36, in <module>
from google.cloud.bigquery import schema
File "/.../venv/lib/python3.8/site-packages/google/cloud/bigquery/schema.py", line 19, in <module>
from google.cloud.bigquery_v2 import types
File "/.../venv/lib/python3.8/site-packages/google/cloud/bigquery_v2/__init__.py", line 19, in <module>
from .types.encryption_config import EncryptionConfiguration
File "/.../venv/lib/python3.8/site-packages/google/cloud/bigquery_v2/types/__init__.py", line 18, in <module>
from .encryption_config import EncryptionConfiguration
File "/.../venv/lib/python3.8/site-packages/google/cloud/bigquery_v2/types/encryption_config.py", line 29, in <module>
class EncryptionConfiguration(proto.Message):
File "/.../venv/lib/python3.8/site-packages/proto/message.py", line 215, in __new__
field=[i.descriptor for i in fields],
File "/.../venv/lib/python3.8/site-packages/proto/message.py", line 215, in <listcomp>
field=[i.descriptor for i in fields],
File "/.../venv/lib/python3.8/site-packages/proto/fields.py", line 104, in descriptor
self._descriptor = descriptor_pb2.FieldDescriptorProto(
File "/.../venv/lib/python3.8/site-packages/google/protobuf/internal/python_message.py", line 509, in init
field = _GetFieldByName(message_descriptor, field_name)
File "/.../venv/lib/python3.8/site-packages/google/protobuf/internal/python_message.py", line 572, in _GetFieldByName
raise ValueError('Protocol message %s has no "%s" field.' %
ValueError: Protocol message FieldDescriptorProto has no "proto3_optional" field.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import io
16 import os
17
18 import setuptools
19
20
21 # Package metadata.
22
23 name = "google-cloud-bigquery"
24 description = "Google BigQuery API client library"
25
26 # Should be one of:
27 # 'Development Status :: 3 - Alpha'
28 # 'Development Status :: 4 - Beta'
29 # 'Development Status :: 5 - Production/Stable'
30 release_status = "Development Status :: 5 - Production/Stable"
31 dependencies = [
32 "google-api-core[grpc] >= 1.22.2, < 2.0.0dev",
33 "proto-plus >= 1.10.0",
34 "google-cloud-core >= 1.4.1, < 2.0dev",
35 "google-resumable-media >= 0.6.0, < 2.0dev",
36 "six >=1.13.0,< 2.0.0dev",
37 ]
38 extras = {
39 "bqstorage": [
40 "google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev",
41 # Due to an issue in pip's dependency resolver, the `grpc` extra is not
42 # installed, even though `google-cloud-bigquery-storage` specifies it
43 # as `google-api-core[grpc]`. We thus need to explicitly specify it here.
44 # See: https://github.com/googleapis/python-bigquery/issues/83 The
45 # grpc.Channel.close() method isn't added until 1.32.0.
46 # https://github.com/grpc/grpc/pull/15254
47 "grpcio >= 1.32.0, < 2.0dev",
48 "pyarrow >= 1.0.0, < 2.0dev",
49 ],
50 "pandas": [
51 "pandas>=0.23.0",
52 # pyarrow 1.0.0 is required for the use of timestamp_as_object keyword.
53 "pyarrow >= 1.0.0, < 2.0dev",
54 ],
55 "tqdm": ["tqdm >= 4.7.4, <5.0.0dev"],
56 "opentelemetry": [
57 "opentelemetry-api==0.9b0",
58 "opentelemetry-sdk==0.9b0",
59 "opentelemetry-instrumentation==0.9b0 ",
60 ],
61 }
62
63 all_extras = []
64
65 for extra in extras:
66 all_extras.extend(extras[extra])
67
68 extras["all"] = all_extras
69
70 # Setup boilerplate below this line.
71
72 package_root = os.path.abspath(os.path.dirname(__file__))
73
74 readme_filename = os.path.join(package_root, "README.rst")
75 with io.open(readme_filename, encoding="utf-8") as readme_file:
76 readme = readme_file.read()
77
78 version = {}
79 with open(os.path.join(package_root, "google/cloud/bigquery/version.py")) as fp:
80 exec(fp.read(), version)
81 version = version["__version__"]
82
83 # Only include packages under the 'google' namespace. Do not include tests,
84 # benchmarks, etc.
85 packages = [
86 package
87 for package in setuptools.PEP420PackageFinder.find()
88 if package.startswith("google")
89 ]
90
91 # Determine which namespaces are needed.
92 namespaces = ["google"]
93 if "google.cloud" in packages:
94 namespaces.append("google.cloud")
95
96
97 setuptools.setup(
98 name=name,
99 version=version,
100 description=description,
101 long_description=readme,
102 author="Google LLC",
103 author_email="[email protected]",
104 license="Apache 2.0",
105 url="https://github.com/googleapis/python-bigquery",
106 classifiers=[
107 release_status,
108 "Intended Audience :: Developers",
109 "License :: OSI Approved :: Apache Software License",
110 "Programming Language :: Python",
111 "Programming Language :: Python :: 3",
112 "Programming Language :: Python :: 3.6",
113 "Programming Language :: Python :: 3.7",
114 "Programming Language :: Python :: 3.8",
115 "Operating System :: OS Independent",
116 "Topic :: Internet",
117 ],
118 platforms="Posix; MacOS X; Windows",
119 packages=packages,
120 namespace_packages=namespaces,
121 install_requires=dependencies,
122 extras_require=extras,
123 python_requires=">=3.6",
124 include_package_data=True,
125 zip_safe=False,
126 )
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -34,6 +34,7 @@
"google-cloud-core >= 1.4.1, < 2.0dev",
"google-resumable-media >= 0.6.0, < 2.0dev",
"six >=1.13.0,< 2.0.0dev",
+ "protobuf >= 3.12.0",
]
extras = {
"bqstorage": [
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -34,6 +34,7 @@\n \"google-cloud-core >= 1.4.1, < 2.0dev\",\n \"google-resumable-media >= 0.6.0, < 2.0dev\",\n \"six >=1.13.0,< 2.0.0dev\",\n+ \"protobuf >= 3.12.0\",\n ]\n extras = {\n \"bqstorage\": [\n", "issue": "Needs protobuf minimum version 3.12.0\nIf you are still having issues, please be sure to include as much information as possible:\r\n\r\n#### Environment details\r\n\r\n - OS type and version: MacOS\r\n - Python version: `python --version` 3.8\r\n - pip version: `pip --version` 20.2.3\r\n - `google-cloud-bigquery` version: `pip show google-cloud-bigquery` 2.0.0\r\n\r\n#### Steps to reproduce\r\n\r\n 1. Just install the package, import somewhere and run it.\r\n\r\n#### Code example\r\n\r\n```python\r\nfrom google.cloud import bigquery\r\n```\r\n\r\n#### Stack trace\r\n```\r\nFile \"/.../venv/lib/python3.8/site-packages/google/protobuf/internal/python_message.py\", line 570, in _GetFieldByName\r\n return message_descriptor.fields_by_name[field_name]\r\nKeyError: 'proto3_optional'\r\n\r\n...\r\n\r\n from google.cloud import bigquery\r\n File \"/.../venv/lib/python3.8/site-packages/google/cloud/bigquery/__init__.py\", line 35, in <module>\r\n from google.cloud.bigquery.client import Client\r\n File \"/.../venv/lib/python3.8/site-packages/google/cloud/bigquery/client.py\", line 57, in <module>\r\n from google.cloud.bigquery import _pandas_helpers\r\n File \"/.../venv/lib/python3.8/site-packages/google/cloud/bigquery/_pandas_helpers.py\", line 36, in <module>\r\n from google.cloud.bigquery import schema\r\n File \"/.../venv/lib/python3.8/site-packages/google/cloud/bigquery/schema.py\", line 19, in <module>\r\n from google.cloud.bigquery_v2 import types\r\n File \"/.../venv/lib/python3.8/site-packages/google/cloud/bigquery_v2/__init__.py\", line 19, in <module>\r\n from .types.encryption_config import EncryptionConfiguration\r\n File \"/.../venv/lib/python3.8/site-packages/google/cloud/bigquery_v2/types/__init__.py\", line 18, in <module>\r\n from .encryption_config import EncryptionConfiguration\r\n File \"/.../venv/lib/python3.8/site-packages/google/cloud/bigquery_v2/types/encryption_config.py\", line 29, in <module>\r\n class EncryptionConfiguration(proto.Message):\r\n File \"/.../venv/lib/python3.8/site-packages/proto/message.py\", line 215, in __new__\r\n field=[i.descriptor for i in fields],\r\n File \"/.../venv/lib/python3.8/site-packages/proto/message.py\", line 215, in <listcomp>\r\n field=[i.descriptor for i in fields],\r\n File \"/.../venv/lib/python3.8/site-packages/proto/fields.py\", line 104, in descriptor\r\n self._descriptor = descriptor_pb2.FieldDescriptorProto(\r\n File \"/.../venv/lib/python3.8/site-packages/google/protobuf/internal/python_message.py\", line 509, in init\r\n field = _GetFieldByName(message_descriptor, field_name)\r\n File \"/.../venv/lib/python3.8/site-packages/google/protobuf/internal/python_message.py\", line 572, in _GetFieldByName\r\n raise ValueError('Protocol message %s has no \"%s\" field.' %\r\nValueError: Protocol message FieldDescriptorProto has no \"proto3_optional\" field.\r\n```\r\n\r\n\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-bigquery\"\ndescription = \"Google BigQuery API client library\"\n\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"google-api-core[grpc] >= 1.22.2, < 2.0.0dev\",\n \"proto-plus >= 1.10.0\",\n \"google-cloud-core >= 1.4.1, < 2.0dev\",\n \"google-resumable-media >= 0.6.0, < 2.0dev\",\n \"six >=1.13.0,< 2.0.0dev\",\n]\nextras = {\n \"bqstorage\": [\n \"google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev\",\n # Due to an issue in pip's dependency resolver, the `grpc` extra is not\n # installed, even though `google-cloud-bigquery-storage` specifies it\n # as `google-api-core[grpc]`. We thus need to explicitly specify it here.\n # See: https://github.com/googleapis/python-bigquery/issues/83 The\n # grpc.Channel.close() method isn't added until 1.32.0.\n # https://github.com/grpc/grpc/pull/15254\n \"grpcio >= 1.32.0, < 2.0dev\",\n \"pyarrow >= 1.0.0, < 2.0dev\",\n ],\n \"pandas\": [\n \"pandas>=0.23.0\",\n # pyarrow 1.0.0 is required for the use of timestamp_as_object keyword.\n \"pyarrow >= 1.0.0, < 2.0dev\",\n ],\n \"tqdm\": [\"tqdm >= 4.7.4, <5.0.0dev\"],\n \"opentelemetry\": [\n \"opentelemetry-api==0.9b0\",\n \"opentelemetry-sdk==0.9b0\",\n \"opentelemetry-instrumentation==0.9b0 \",\n ],\n}\n\nall_extras = []\n\nfor extra in extras:\n all_extras.extend(extras[extra])\n\nextras[\"all\"] = all_extras\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nversion = {}\nwith open(os.path.join(package_root, \"google/cloud/bigquery/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package\n for package in setuptools.PEP420PackageFinder.find()\n if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/googleapis/python-bigquery\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=3.6\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-bigquery\"\ndescription = \"Google BigQuery API client library\"\n\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"google-api-core[grpc] >= 1.22.2, < 2.0.0dev\",\n \"proto-plus >= 1.10.0\",\n \"google-cloud-core >= 1.4.1, < 2.0dev\",\n \"google-resumable-media >= 0.6.0, < 2.0dev\",\n \"six >=1.13.0,< 2.0.0dev\",\n \"protobuf >= 3.12.0\",\n]\nextras = {\n \"bqstorage\": [\n \"google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev\",\n # Due to an issue in pip's dependency resolver, the `grpc` extra is not\n # installed, even though `google-cloud-bigquery-storage` specifies it\n # as `google-api-core[grpc]`. We thus need to explicitly specify it here.\n # See: https://github.com/googleapis/python-bigquery/issues/83 The\n # grpc.Channel.close() method isn't added until 1.32.0.\n # https://github.com/grpc/grpc/pull/15254\n \"grpcio >= 1.32.0, < 2.0dev\",\n \"pyarrow >= 1.0.0, < 2.0dev\",\n ],\n \"pandas\": [\n \"pandas>=0.23.0\",\n # pyarrow 1.0.0 is required for the use of timestamp_as_object keyword.\n \"pyarrow >= 1.0.0, < 2.0dev\",\n ],\n \"tqdm\": [\"tqdm >= 4.7.4, <5.0.0dev\"],\n \"opentelemetry\": [\n \"opentelemetry-api==0.9b0\",\n \"opentelemetry-sdk==0.9b0\",\n \"opentelemetry-instrumentation==0.9b0 \",\n ],\n}\n\nall_extras = []\n\nfor extra in extras:\n all_extras.extend(extras[extra])\n\nextras[\"all\"] = all_extras\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nversion = {}\nwith open(os.path.join(package_root, \"google/cloud/bigquery/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package\n for package in setuptools.PEP420PackageFinder.find()\n if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/googleapis/python-bigquery\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=3.6\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "setup.py"}]} | 2,334 | 117 |
gh_patches_debug_30435 | rasdani/github-patches | git_diff | celery__celery-8982 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SecureSerializer fails on certain types and binary serializers
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [X] I have verified that the issue exists against the `main` branch of Celery.
- [ ] This has already been asked to the [discussions forum](https://github.com/celery/celery/discussions) first.
- [X] I have read the relevant section in the
[contribution guide](https://docs.celeryq.dev/en/main/contributing.html#other-bugs)
on reporting bugs.
- [X] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [X] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [X] I have checked the [commit log](https://github.com/celery/celery/commits/main)
to find out if the bug was already fixed in the main branch.
- [X] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
- [X] I have tried to reproduce the issue with [pytest-celery](https://docs.celeryq.dev/projects/pytest-celery/en/latest/userguide/celery-bug-report.html) and added the reproduction script below.
## Mandatory Debugging Information
- [ ] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [X] I have verified that the issue exists against the `main` branch of Celery.
- [ ] I have included the contents of ``pip freeze`` in the issue.
- [ ] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [ ] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- None
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**:
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
```
</p>
</details>
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
- **Minimal Python Version**: N/A or Unknown
- **Minimal Celery Version**: N/A or Unknown
- **Minimal Kombu Version**: N/A or Unknown
- **Minimal Broker Version**: N/A or Unknown
- **Minimal Result Backend Version**: N/A or Unknown
- **Minimal OS and/or Kernel Version**: N/A or Unknown
- **Minimal Broker Client Version**: N/A or Unknown
- **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
N/A
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
Alternatively, the pytest-celery plugin can be used to create standalone reproduction scripts
that can be added to this report. See the pytest-celery documentation for more information at
pytest-celery.readthedocs.io
-->
<details>
<p>
```python
app.conf.update(
security_key='/private/keys/celery/private.key',
security_certificate='/private/keys/celery/public.pem',
security_cert_store='/private/keys/celery/*.pem')
app.setup_security()
@app.task
def serializer_test_task(arg: Any) -> Any:
return arg
def test_serialize(data):
res = serializer_test_task.delay(data)
deserialized_value = res.get()
assert deserialized_value == data
test_serialize(data=b"foo") # fails to validate signature
#############
app.setup_security(serializer="pickle")
test_serialize(data="foo") # fails to serialize any value using pickle serializer
```
</p>
</details>
# Expected Behavior
# Actual Behavior
There are several bugs in the SecureSerializer feature:
- When using the 'json' serializer (default), it will always fail to validate the signature if the passed value is of type 'bytes'
- When using the other binary serializer (and not a string-based serializer like 'json'), for example 'pickle', the serializer is completely broken.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `celery/security/serialization.py`
Content:
```
1 """Secure serializer."""
2 from kombu.serialization import dumps, loads, registry
3 from kombu.utils.encoding import bytes_to_str, ensure_bytes, str_to_bytes
4
5 from celery.app.defaults import DEFAULT_SECURITY_DIGEST
6 from celery.utils.serialization import b64decode, b64encode
7
8 from .certificate import Certificate, FSCertStore
9 from .key import PrivateKey
10 from .utils import get_digest_algorithm, reraise_errors
11
12 __all__ = ('SecureSerializer', 'register_auth')
13
14
15 class SecureSerializer:
16 """Signed serializer."""
17
18 def __init__(self, key=None, cert=None, cert_store=None,
19 digest=DEFAULT_SECURITY_DIGEST, serializer='json'):
20 self._key = key
21 self._cert = cert
22 self._cert_store = cert_store
23 self._digest = get_digest_algorithm(digest)
24 self._serializer = serializer
25
26 def serialize(self, data):
27 """Serialize data structure into string."""
28 assert self._key is not None
29 assert self._cert is not None
30 with reraise_errors('Unable to serialize: {0!r}', (Exception,)):
31 content_type, content_encoding, body = dumps(
32 bytes_to_str(data), serializer=self._serializer)
33 # What we sign is the serialized body, not the body itself.
34 # this way the receiver doesn't have to decode the contents
35 # to verify the signature (and thus avoiding potential flaws
36 # in the decoding step).
37 body = ensure_bytes(body)
38 return self._pack(body, content_type, content_encoding,
39 signature=self._key.sign(body, self._digest),
40 signer=self._cert.get_id())
41
42 def deserialize(self, data):
43 """Deserialize data structure from string."""
44 assert self._cert_store is not None
45 with reraise_errors('Unable to deserialize: {0!r}', (Exception,)):
46 payload = self._unpack(data)
47 signature, signer, body = (payload['signature'],
48 payload['signer'],
49 payload['body'])
50 self._cert_store[signer].verify(body, signature, self._digest)
51 return loads(bytes_to_str(body), payload['content_type'],
52 payload['content_encoding'], force=True)
53
54 def _pack(self, body, content_type, content_encoding, signer, signature,
55 sep=str_to_bytes('\x00\x01')):
56 fields = sep.join(
57 ensure_bytes(s) for s in [signer, signature, content_type,
58 content_encoding, body]
59 )
60 return b64encode(fields)
61
62 def _unpack(self, payload, sep=str_to_bytes('\x00\x01')):
63 raw_payload = b64decode(ensure_bytes(payload))
64 first_sep = raw_payload.find(sep)
65
66 signer = raw_payload[:first_sep]
67 signer_cert = self._cert_store[signer]
68
69 # shift 3 bits right to get signature length
70 # 2048bit rsa key has a signature length of 256
71 # 4096bit rsa key has a signature length of 512
72 sig_len = signer_cert.get_pubkey().key_size >> 3
73 sep_len = len(sep)
74 signature_start_position = first_sep + sep_len
75 signature_end_position = signature_start_position + sig_len
76 signature = raw_payload[
77 signature_start_position:signature_end_position
78 ]
79
80 v = raw_payload[signature_end_position + sep_len:].split(sep)
81
82 return {
83 'signer': signer,
84 'signature': signature,
85 'content_type': bytes_to_str(v[0]),
86 'content_encoding': bytes_to_str(v[1]),
87 'body': bytes_to_str(v[2]),
88 }
89
90
91 def register_auth(key=None, key_password=None, cert=None, store=None,
92 digest=DEFAULT_SECURITY_DIGEST,
93 serializer='json'):
94 """Register security serializer."""
95 s = SecureSerializer(key and PrivateKey(key, password=key_password),
96 cert and Certificate(cert),
97 store and FSCertStore(store),
98 digest, serializer=serializer)
99 registry.register('auth', s.serialize, s.deserialize,
100 content_type='application/data',
101 content_encoding='utf-8')
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/celery/security/serialization.py b/celery/security/serialization.py
--- a/celery/security/serialization.py
+++ b/celery/security/serialization.py
@@ -29,7 +29,8 @@
assert self._cert is not None
with reraise_errors('Unable to serialize: {0!r}', (Exception,)):
content_type, content_encoding, body = dumps(
- bytes_to_str(data), serializer=self._serializer)
+ data, serializer=self._serializer)
+
# What we sign is the serialized body, not the body itself.
# this way the receiver doesn't have to decode the contents
# to verify the signature (and thus avoiding potential flaws
@@ -48,7 +49,7 @@
payload['signer'],
payload['body'])
self._cert_store[signer].verify(body, signature, self._digest)
- return loads(bytes_to_str(body), payload['content_type'],
+ return loads(body, payload['content_type'],
payload['content_encoding'], force=True)
def _pack(self, body, content_type, content_encoding, signer, signature,
@@ -84,7 +85,7 @@
'signature': signature,
'content_type': bytes_to_str(v[0]),
'content_encoding': bytes_to_str(v[1]),
- 'body': bytes_to_str(v[2]),
+ 'body': v[2],
}
| {"golden_diff": "diff --git a/celery/security/serialization.py b/celery/security/serialization.py\n--- a/celery/security/serialization.py\n+++ b/celery/security/serialization.py\n@@ -29,7 +29,8 @@\n assert self._cert is not None\n with reraise_errors('Unable to serialize: {0!r}', (Exception,)):\n content_type, content_encoding, body = dumps(\n- bytes_to_str(data), serializer=self._serializer)\n+ data, serializer=self._serializer)\n+\n # What we sign is the serialized body, not the body itself.\n # this way the receiver doesn't have to decode the contents\n # to verify the signature (and thus avoiding potential flaws\n@@ -48,7 +49,7 @@\n payload['signer'],\n payload['body'])\n self._cert_store[signer].verify(body, signature, self._digest)\n- return loads(bytes_to_str(body), payload['content_type'],\n+ return loads(body, payload['content_type'],\n payload['content_encoding'], force=True)\n \n def _pack(self, body, content_type, content_encoding, signer, signature,\n@@ -84,7 +85,7 @@\n 'signature': signature,\n 'content_type': bytes_to_str(v[0]),\n 'content_encoding': bytes_to_str(v[1]),\n- 'body': bytes_to_str(v[2]),\n+ 'body': v[2],\n }\n", "issue": "SecureSerializer fails on certain types and binary serializers\n<!--\r\nPlease fill this template entirely and do not erase parts of it.\r\nWe reserve the right to close without a response\r\nbug reports which are incomplete.\r\n-->\r\n# Checklist\r\n<!--\r\nTo check an item on the list replace [ ] with [x].\r\n-->\r\n- [X] I have verified that the issue exists against the `main` branch of Celery.\r\n- [ ] This has already been asked to the [discussions forum](https://github.com/celery/celery/discussions) first.\r\n- [X] I have read the relevant section in the\r\n [contribution guide](https://docs.celeryq.dev/en/main/contributing.html#other-bugs)\r\n on reporting bugs.\r\n- [X] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)\r\n for similar or identical bug reports.\r\n- [X] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)\r\n for existing proposed fixes.\r\n- [X] I have checked the [commit log](https://github.com/celery/celery/commits/main)\r\n to find out if the bug was already fixed in the main branch.\r\n- [X] I have included all related issues and possible duplicate issues\r\n in this issue (If there are none, check this box anyway).\r\n- [X] I have tried to reproduce the issue with [pytest-celery](https://docs.celeryq.dev/projects/pytest-celery/en/latest/userguide/celery-bug-report.html) and added the reproduction script below.\r\n\r\n## Mandatory Debugging Information\r\n\r\n- [ ] I have included the output of ``celery -A proj report`` in the issue.\r\n (if you are not able to do this, then at least specify the Celery\r\n version affected).\r\n- [X] I have verified that the issue exists against the `main` branch of Celery.\r\n- [ ] I have included the contents of ``pip freeze`` in the issue.\r\n- [ ] I have included all the versions of all the external dependencies required\r\n to reproduce this bug.\r\n\r\n## Optional Debugging Information\r\n<!--\r\nTry some of the below if you think they are relevant.\r\nIt will help us figure out the scope of the bug and how many users it affects.\r\n-->\r\n- [ ] I have tried reproducing the issue on more than one Python version\r\n and/or implementation.\r\n- [ ] I have tried reproducing the issue on more than one message broker and/or\r\n result backend.\r\n- [ ] I have tried reproducing the issue on more than one version of the message\r\n broker and/or result backend.\r\n- [ ] I have tried reproducing the issue on more than one operating system.\r\n- [ ] I have tried reproducing the issue on more than one workers pool.\r\n- [ ] I have tried reproducing the issue with autoscaling, retries,\r\n ETA/Countdown & rate limits disabled.\r\n- [ ] I have tried reproducing the issue after downgrading\r\n and/or upgrading Celery and its dependencies.\r\n\r\n## Related Issues and Possible Duplicates\r\n<!--\r\nPlease make sure to search and mention any related issues\r\nor possible duplicates to this issue as requested by the checklist above.\r\n\r\nThis may or may not include issues in other repositories that the Celery project\r\nmaintains or other repositories that are dependencies of Celery.\r\n\r\nIf you don't know how to mention issues, please refer to Github's documentation\r\non the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests\r\n-->\r\n\r\n#### Related Issues\r\n\r\n- None\r\n\r\n#### Possible Duplicates\r\n\r\n- None\r\n\r\n## Environment & Settings\r\n<!-- Include the contents of celery --version below -->\r\n**Celery version**:\r\n<!-- Include the output of celery -A proj report below -->\r\n<details>\r\n<summary><b><code>celery report</code> Output:</b></summary>\r\n<p>\r\n\r\n```\r\n```\r\n\r\n</p>\r\n</details>\r\n\r\n# Steps to Reproduce\r\n\r\n## Required Dependencies\r\n<!-- Please fill the required dependencies to reproduce this issue -->\r\n- **Minimal Python Version**: N/A or Unknown\r\n- **Minimal Celery Version**: N/A or Unknown\r\n- **Minimal Kombu Version**: N/A or Unknown\r\n- **Minimal Broker Version**: N/A or Unknown\r\n- **Minimal Result Backend Version**: N/A or Unknown\r\n- **Minimal OS and/or Kernel Version**: N/A or Unknown\r\n- **Minimal Broker Client Version**: N/A or Unknown\r\n- **Minimal Result Backend Client Version**: N/A or Unknown\r\n\r\n### Python Packages\r\n<!-- Please fill the contents of pip freeze below -->\r\n<details>\r\n<summary><b><code>pip freeze</code> Output:</b></summary>\r\n<p>\r\n\r\n```\r\n```\r\n\r\n</p>\r\n</details>\r\n\r\n### Other Dependencies\r\n<!--\r\nPlease provide system dependencies, configuration files\r\nand other dependency information if applicable\r\n-->\r\n<details>\r\n<p>\r\nN/A\r\n</p>\r\n</details>\r\n\r\n## Minimally Reproducible Test Case\r\n<!--\r\nPlease provide a reproducible test case.\r\nRefer to the Reporting Bugs section in our contribution guide.\r\n\r\nWe prefer submitting test cases in the form of a PR to our integration test suite.\r\nIf you can provide one, please mention the PR number below.\r\nIf not, please attach the most minimal code example required to reproduce the issue below.\r\nIf the test case is too large, please include a link to a gist or a repository below.\r\n\r\nAlternatively, the pytest-celery plugin can be used to create standalone reproduction scripts\r\nthat can be added to this report. See the pytest-celery documentation for more information at\r\npytest-celery.readthedocs.io\r\n-->\r\n\r\n<details>\r\n<p>\r\n\r\n```python\r\n\r\napp.conf.update(\r\n security_key='/private/keys/celery/private.key',\r\n security_certificate='/private/keys/celery/public.pem',\r\n security_cert_store='/private/keys/celery/*.pem')\r\n\r\napp.setup_security()\r\n\r\[email protected]\r\ndef serializer_test_task(arg: Any) -> Any:\r\n return arg\r\n\r\ndef test_serialize(data):\r\n res = serializer_test_task.delay(data)\r\n deserialized_value = res.get()\r\n assert deserialized_value == data\r\n\r\ntest_serialize(data=b\"foo\") # fails to validate signature\r\n\r\n#############\r\n\r\napp.setup_security(serializer=\"pickle\")\r\ntest_serialize(data=\"foo\") # fails to serialize any value using pickle serializer\r\n\r\n```\r\n\r\n</p>\r\n</details>\r\n\r\n# Expected Behavior\r\n\r\n\r\n# Actual Behavior\r\n\r\nThere are several bugs in the SecureSerializer feature:\r\n - When using the 'json' serializer (default), it will always fail to validate the signature if the passed value is of type 'bytes'\r\n- When using the other binary serializer (and not a string-based serializer like 'json'), for example 'pickle', the serializer is completely broken. \r\n\n", "before_files": [{"content": "\"\"\"Secure serializer.\"\"\"\nfrom kombu.serialization import dumps, loads, registry\nfrom kombu.utils.encoding import bytes_to_str, ensure_bytes, str_to_bytes\n\nfrom celery.app.defaults import DEFAULT_SECURITY_DIGEST\nfrom celery.utils.serialization import b64decode, b64encode\n\nfrom .certificate import Certificate, FSCertStore\nfrom .key import PrivateKey\nfrom .utils import get_digest_algorithm, reraise_errors\n\n__all__ = ('SecureSerializer', 'register_auth')\n\n\nclass SecureSerializer:\n \"\"\"Signed serializer.\"\"\"\n\n def __init__(self, key=None, cert=None, cert_store=None,\n digest=DEFAULT_SECURITY_DIGEST, serializer='json'):\n self._key = key\n self._cert = cert\n self._cert_store = cert_store\n self._digest = get_digest_algorithm(digest)\n self._serializer = serializer\n\n def serialize(self, data):\n \"\"\"Serialize data structure into string.\"\"\"\n assert self._key is not None\n assert self._cert is not None\n with reraise_errors('Unable to serialize: {0!r}', (Exception,)):\n content_type, content_encoding, body = dumps(\n bytes_to_str(data), serializer=self._serializer)\n # What we sign is the serialized body, not the body itself.\n # this way the receiver doesn't have to decode the contents\n # to verify the signature (and thus avoiding potential flaws\n # in the decoding step).\n body = ensure_bytes(body)\n return self._pack(body, content_type, content_encoding,\n signature=self._key.sign(body, self._digest),\n signer=self._cert.get_id())\n\n def deserialize(self, data):\n \"\"\"Deserialize data structure from string.\"\"\"\n assert self._cert_store is not None\n with reraise_errors('Unable to deserialize: {0!r}', (Exception,)):\n payload = self._unpack(data)\n signature, signer, body = (payload['signature'],\n payload['signer'],\n payload['body'])\n self._cert_store[signer].verify(body, signature, self._digest)\n return loads(bytes_to_str(body), payload['content_type'],\n payload['content_encoding'], force=True)\n\n def _pack(self, body, content_type, content_encoding, signer, signature,\n sep=str_to_bytes('\\x00\\x01')):\n fields = sep.join(\n ensure_bytes(s) for s in [signer, signature, content_type,\n content_encoding, body]\n )\n return b64encode(fields)\n\n def _unpack(self, payload, sep=str_to_bytes('\\x00\\x01')):\n raw_payload = b64decode(ensure_bytes(payload))\n first_sep = raw_payload.find(sep)\n\n signer = raw_payload[:first_sep]\n signer_cert = self._cert_store[signer]\n\n # shift 3 bits right to get signature length\n # 2048bit rsa key has a signature length of 256\n # 4096bit rsa key has a signature length of 512\n sig_len = signer_cert.get_pubkey().key_size >> 3\n sep_len = len(sep)\n signature_start_position = first_sep + sep_len\n signature_end_position = signature_start_position + sig_len\n signature = raw_payload[\n signature_start_position:signature_end_position\n ]\n\n v = raw_payload[signature_end_position + sep_len:].split(sep)\n\n return {\n 'signer': signer,\n 'signature': signature,\n 'content_type': bytes_to_str(v[0]),\n 'content_encoding': bytes_to_str(v[1]),\n 'body': bytes_to_str(v[2]),\n }\n\n\ndef register_auth(key=None, key_password=None, cert=None, store=None,\n digest=DEFAULT_SECURITY_DIGEST,\n serializer='json'):\n \"\"\"Register security serializer.\"\"\"\n s = SecureSerializer(key and PrivateKey(key, password=key_password),\n cert and Certificate(cert),\n store and FSCertStore(store),\n digest, serializer=serializer)\n registry.register('auth', s.serialize, s.deserialize,\n content_type='application/data',\n content_encoding='utf-8')\n", "path": "celery/security/serialization.py"}], "after_files": [{"content": "\"\"\"Secure serializer.\"\"\"\nfrom kombu.serialization import dumps, loads, registry\nfrom kombu.utils.encoding import bytes_to_str, ensure_bytes, str_to_bytes\n\nfrom celery.app.defaults import DEFAULT_SECURITY_DIGEST\nfrom celery.utils.serialization import b64decode, b64encode\n\nfrom .certificate import Certificate, FSCertStore\nfrom .key import PrivateKey\nfrom .utils import get_digest_algorithm, reraise_errors\n\n__all__ = ('SecureSerializer', 'register_auth')\n\n\nclass SecureSerializer:\n \"\"\"Signed serializer.\"\"\"\n\n def __init__(self, key=None, cert=None, cert_store=None,\n digest=DEFAULT_SECURITY_DIGEST, serializer='json'):\n self._key = key\n self._cert = cert\n self._cert_store = cert_store\n self._digest = get_digest_algorithm(digest)\n self._serializer = serializer\n\n def serialize(self, data):\n \"\"\"Serialize data structure into string.\"\"\"\n assert self._key is not None\n assert self._cert is not None\n with reraise_errors('Unable to serialize: {0!r}', (Exception,)):\n content_type, content_encoding, body = dumps(\n data, serializer=self._serializer)\n\n # What we sign is the serialized body, not the body itself.\n # this way the receiver doesn't have to decode the contents\n # to verify the signature (and thus avoiding potential flaws\n # in the decoding step).\n body = ensure_bytes(body)\n return self._pack(body, content_type, content_encoding,\n signature=self._key.sign(body, self._digest),\n signer=self._cert.get_id())\n\n def deserialize(self, data):\n \"\"\"Deserialize data structure from string.\"\"\"\n assert self._cert_store is not None\n with reraise_errors('Unable to deserialize: {0!r}', (Exception,)):\n payload = self._unpack(data)\n signature, signer, body = (payload['signature'],\n payload['signer'],\n payload['body'])\n self._cert_store[signer].verify(body, signature, self._digest)\n return loads(body, payload['content_type'],\n payload['content_encoding'], force=True)\n\n def _pack(self, body, content_type, content_encoding, signer, signature,\n sep=str_to_bytes('\\x00\\x01')):\n fields = sep.join(\n ensure_bytes(s) for s in [signer, signature, content_type,\n content_encoding, body]\n )\n return b64encode(fields)\n\n def _unpack(self, payload, sep=str_to_bytes('\\x00\\x01')):\n raw_payload = b64decode(ensure_bytes(payload))\n first_sep = raw_payload.find(sep)\n\n signer = raw_payload[:first_sep]\n signer_cert = self._cert_store[signer]\n\n # shift 3 bits right to get signature length\n # 2048bit rsa key has a signature length of 256\n # 4096bit rsa key has a signature length of 512\n sig_len = signer_cert.get_pubkey().key_size >> 3\n sep_len = len(sep)\n signature_start_position = first_sep + sep_len\n signature_end_position = signature_start_position + sig_len\n signature = raw_payload[\n signature_start_position:signature_end_position\n ]\n\n v = raw_payload[signature_end_position + sep_len:].split(sep)\n\n return {\n 'signer': signer,\n 'signature': signature,\n 'content_type': bytes_to_str(v[0]),\n 'content_encoding': bytes_to_str(v[1]),\n 'body': v[2],\n }\n\n\ndef register_auth(key=None, key_password=None, cert=None, store=None,\n digest=DEFAULT_SECURITY_DIGEST,\n serializer='json'):\n \"\"\"Register security serializer.\"\"\"\n s = SecureSerializer(key and PrivateKey(key, password=key_password),\n cert and Certificate(cert),\n store and FSCertStore(store),\n digest, serializer=serializer)\n registry.register('auth', s.serialize, s.deserialize,\n content_type='application/data',\n content_encoding='utf-8')\n", "path": "celery/security/serialization.py"}]} | 2,855 | 315 |
gh_patches_debug_14696 | rasdani/github-patches | git_diff | Health-Informatics-UoN__Carrot-Mapper-645 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
💡 Combine Github release workflows
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Is your proposal related to a problem or functionality gap?
Currently the release workflow for dev/test/production is 3 separate Github actions, working with Gitflow. So merging a new fix requires 3 separate PRs, artifacts, and deployments: https://github.com/Health-Informatics-UoN/CaRROT-Mapper/pulls?q=is%3Apr+is%3Aclosed
### Describe your proposal
I propose combining this into one workflow, utilising Github environments to deploy a single artifact from the main branch. An example here: https://github.com/Health-Informatics-UoN/rcc-monitor/actions/runs/7931015704
### Describe alternatives you've considered
_No response_
### I'm part of a Project Team
Yes - DRS
### Anything else?
It might not be done here - but I think it's worth also exploring when database migrations happen as part of CD, not sure if this currently a manual process?
### Are you willing to contribute to developing this feature?
✅ Yes, me or my team intend to do the development.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/api/settings.py`
Content:
```
1 """
2 Django settings for api project.
3
4 Generated by 'django-admin startproject' using Django 3.1.5.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/3.1/topics/settings/
8
9 For the full list of settings and their values, see
10 https://docs.djangoproject.com/en/3.1/ref/settings/
11 """
12
13 import os
14 from dotenv import load_dotenv
15
16 load_dotenv()
17
18 # Build paths inside the project like this: BASE_DIR / 'subdir'.
19 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
20 TEMPLATE_DIR = os.path.join(BASE_DIR, "templates")
21 STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
22 STATIC_DIR = os.path.join(BASE_DIR, "static")
23 STATICFILES_DIRS = [
24 STATIC_DIR,
25 ]
26 MEDIA_URL = "/media/"
27 MEDIA_ROOT = os.path.join(BASE_DIR, "media")
28
29 # Quick-start development settings - unsuitable for production
30 # See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
31
32 # SECURITY WARNING: keep the secret key used in production secret!
33 SECRET_KEY = os.environ.get("SECRET_KEY")
34
35 # SECURITY WARNING: don't run with debug turned on in production!
36 # DEBUG will only evaluate to True if 'True' or 1 is supplied
37 DEBUG = os.getenv("DEBUG", "False") in ["True", 1]
38
39 # Here we need to manipulate a string containing a Python list into a list of strings
40 ALLOWED_HOSTS = [
41 x.strip()[1:-1] for x in os.environ.get("ALLOWED_HOSTS")[1:-1].split(",")
42 ]
43
44
45 # Application definition
46
47 INSTALLED_APPS = [
48 "django.contrib.admin",
49 "django.contrib.auth",
50 "django.contrib.contenttypes",
51 "django.contrib.sessions",
52 "django.contrib.messages",
53 "whitenoise.runserver_nostatic",
54 "django.contrib.staticfiles",
55 "extra_views",
56 "mapping",
57 "data",
58 "rest_framework",
59 "django_filters",
60 "rest_framework.authtoken",
61 "corsheaders",
62 ]
63
64 MIDDLEWARE = [
65 "corsheaders.middleware.CorsMiddleware",
66 "django.middleware.security.SecurityMiddleware",
67 "django.contrib.sessions.middleware.SessionMiddleware",
68 "django.middleware.common.CommonMiddleware",
69 "django.middleware.csrf.CsrfViewMiddleware",
70 "django.contrib.auth.middleware.AuthenticationMiddleware",
71 "django.contrib.messages.middleware.MessageMiddleware",
72 "django.middleware.clickjacking.XFrameOptionsMiddleware",
73 "whitenoise.middleware.WhiteNoiseMiddleware",
74 ]
75
76 STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
77
78 ROOT_URLCONF = "api.urls"
79
80 TEMPLATES = [
81 {
82 "BACKEND": "django.template.backends.django.DjangoTemplates",
83 "DIRS": [os.path.join(BASE_DIR, "templates")],
84 "APP_DIRS": True,
85 "OPTIONS": {
86 "context_processors": [
87 "django.template.context_processors.debug",
88 "django.template.context_processors.request",
89 "django.contrib.auth.context_processors.auth",
90 "django.contrib.messages.context_processors.messages",
91 "django.template.context_processors.media",
92 "api.react.react",
93 ],
94 },
95 },
96 ]
97
98 WSGI_APPLICATION = "api.wsgi.application"
99
100
101 # Database
102 # https://docs.djangoproject.com/en/3.1/ref/settings/#databases
103
104 DATABASES = {
105 "default": {
106 "ENGINE": os.getenv("COCONNECT_DB_ENGINE"),
107 "HOST": os.getenv("COCONNECT_DB_HOST"),
108 "PORT": os.getenv("COCONNECT_DB_PORT"),
109 "NAME": os.getenv("COCONNECT_DB_NAME"),
110 "USER": os.getenv("COCONNECT_DB_USER"),
111 "PASSWORD": os.getenv("COCONNECT_DB_PASSWORD"),
112 "TEST": {
113 "NAME": "throwaway-db",
114 "MIRROR": "default",
115 },
116 }
117 }
118
119
120 # Password validation
121 # https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
122
123 AUTH_PASSWORD_VALIDATORS = [
124 {
125 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
126 },
127 {
128 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
129 },
130 {
131 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
132 },
133 {
134 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
135 },
136 ]
137
138
139 # Internationalization
140 # https://docs.djangoproject.com/en/3.1/topics/i18n/
141
142 LANGUAGE_CODE = "en-us"
143
144 TIME_ZONE = "UTC"
145
146 USE_I18N = True
147
148 USE_L10N = True
149
150 USE_TZ = True
151
152 CORS_ORIGIN_ALLOW_ALL = True
153
154 REST_FRAMEWORK = {
155 "DEFAULT_AUTHENTICATION_CLASSES": (
156 "rest_framework.authentication.TokenAuthentication",
157 "rest_framework.authentication.SessionAuthentication",
158 ),
159 "DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.IsAuthenticated",),
160 }
161 # Static files (CSS, JavaScript, Images)
162 # https://docs.djangoproject.com/en/3.1/howto/static-files/
163
164 STATIC_URL = "/static/"
165
166 LOGIN_REDIRECT_URL = "/"
167 EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
168
169 # NLP API KEY
170 NLP_API_KEY = os.getenv("NLP_API_KEY")
171
172 SESSION_COOKIE_AGE = 86400 # session length is 24 hours
173
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/api/api/settings.py b/app/api/api/settings.py
--- a/app/api/api/settings.py
+++ b/app/api/api/settings.py
@@ -11,6 +11,7 @@
"""
import os
+
from dotenv import load_dotenv
load_dotenv()
@@ -59,6 +60,7 @@
"django_filters",
"rest_framework.authtoken",
"corsheaders",
+ "test",
]
MIDDLEWARE = [
@@ -110,8 +112,7 @@
"USER": os.getenv("COCONNECT_DB_USER"),
"PASSWORD": os.getenv("COCONNECT_DB_PASSWORD"),
"TEST": {
- "NAME": "throwaway-db",
- "MIRROR": "default",
+ "NAME": "throwawaydb",
},
}
}
| {"golden_diff": "diff --git a/app/api/api/settings.py b/app/api/api/settings.py\n--- a/app/api/api/settings.py\n+++ b/app/api/api/settings.py\n@@ -11,6 +11,7 @@\n \"\"\"\n \n import os\n+\n from dotenv import load_dotenv\n \n load_dotenv()\n@@ -59,6 +60,7 @@\n \"django_filters\",\n \"rest_framework.authtoken\",\n \"corsheaders\",\n+ \"test\",\n ]\n \n MIDDLEWARE = [\n@@ -110,8 +112,7 @@\n \"USER\": os.getenv(\"COCONNECT_DB_USER\"),\n \"PASSWORD\": os.getenv(\"COCONNECT_DB_PASSWORD\"),\n \"TEST\": {\n- \"NAME\": \"throwaway-db\",\n- \"MIRROR\": \"default\",\n+ \"NAME\": \"throwawaydb\",\n },\n }\n }\n", "issue": "\ud83d\udca1 Combine Github release workflows\n### Is there an existing issue for this?\r\n\r\n- [X] I have searched the existing issues\r\n\r\n### Is your proposal related to a problem or functionality gap?\r\n\r\nCurrently the release workflow for dev/test/production is 3 separate Github actions, working with Gitflow. So merging a new fix requires 3 separate PRs, artifacts, and deployments: https://github.com/Health-Informatics-UoN/CaRROT-Mapper/pulls?q=is%3Apr+is%3Aclosed\r\n\r\n\r\n\r\n### Describe your proposal\r\n\r\nI propose combining this into one workflow, utilising Github environments to deploy a single artifact from the main branch. An example here: https://github.com/Health-Informatics-UoN/rcc-monitor/actions/runs/7931015704\r\n\r\n\r\n### Describe alternatives you've considered\r\n\r\n_No response_\r\n\r\n### I'm part of a Project Team\r\n\r\nYes - DRS\r\n\r\n### Anything else?\r\n\r\nIt might not be done here - but I think it's worth also exploring when database migrations happen as part of CD, not sure if this currently a manual process?\r\n\r\n### Are you willing to contribute to developing this feature?\r\n\r\n\u2705 Yes, me or my team intend to do the development.\n", "before_files": [{"content": "\"\"\"\nDjango settings for api project.\n\nGenerated by 'django-admin startproject' using Django 3.1.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.1/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.1/ref/settings/\n\"\"\"\n\nimport os\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nTEMPLATE_DIR = os.path.join(BASE_DIR, \"templates\")\nSTATIC_ROOT = os.path.join(BASE_DIR, \"staticfiles\")\nSTATIC_DIR = os.path.join(BASE_DIR, \"static\")\nSTATICFILES_DIRS = [\n STATIC_DIR,\n]\nMEDIA_URL = \"/media/\"\nMEDIA_ROOT = os.path.join(BASE_DIR, \"media\")\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get(\"SECRET_KEY\")\n\n# SECURITY WARNING: don't run with debug turned on in production!\n# DEBUG will only evaluate to True if 'True' or 1 is supplied\nDEBUG = os.getenv(\"DEBUG\", \"False\") in [\"True\", 1]\n\n# Here we need to manipulate a string containing a Python list into a list of strings\nALLOWED_HOSTS = [\n x.strip()[1:-1] for x in os.environ.get(\"ALLOWED_HOSTS\")[1:-1].split(\",\")\n]\n\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"whitenoise.runserver_nostatic\",\n \"django.contrib.staticfiles\",\n \"extra_views\",\n \"mapping\",\n \"data\",\n \"rest_framework\",\n \"django_filters\",\n \"rest_framework.authtoken\",\n \"corsheaders\",\n]\n\nMIDDLEWARE = [\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n]\n\nSTATICFILES_STORAGE = \"whitenoise.storage.CompressedManifestStaticFilesStorage\"\n\nROOT_URLCONF = \"api.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [os.path.join(BASE_DIR, \"templates\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"django.template.context_processors.media\",\n \"api.react.react\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"api.wsgi.application\"\n\n\n# Database\n# https://docs.djangoproject.com/en/3.1/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": os.getenv(\"COCONNECT_DB_ENGINE\"),\n \"HOST\": os.getenv(\"COCONNECT_DB_HOST\"),\n \"PORT\": os.getenv(\"COCONNECT_DB_PORT\"),\n \"NAME\": os.getenv(\"COCONNECT_DB_NAME\"),\n \"USER\": os.getenv(\"COCONNECT_DB_USER\"),\n \"PASSWORD\": os.getenv(\"COCONNECT_DB_PASSWORD\"),\n \"TEST\": {\n \"NAME\": \"throwaway-db\",\n \"MIRROR\": \"default\",\n },\n }\n}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.1/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nCORS_ORIGIN_ALLOW_ALL = True\n\nREST_FRAMEWORK = {\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"rest_framework.authentication.TokenAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n ),\n \"DEFAULT_PERMISSION_CLASSES\": (\"rest_framework.permissions.IsAuthenticated\",),\n}\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.1/howto/static-files/\n\nSTATIC_URL = \"/static/\"\n\nLOGIN_REDIRECT_URL = \"/\"\nEMAIL_BACKEND = \"django.core.mail.backends.console.EmailBackend\"\n\n# NLP API KEY\nNLP_API_KEY = os.getenv(\"NLP_API_KEY\")\n\nSESSION_COOKIE_AGE = 86400 # session length is 24 hours\n", "path": "app/api/api/settings.py"}], "after_files": [{"content": "\"\"\"\nDjango settings for api project.\n\nGenerated by 'django-admin startproject' using Django 3.1.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.1/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.1/ref/settings/\n\"\"\"\n\nimport os\n\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nTEMPLATE_DIR = os.path.join(BASE_DIR, \"templates\")\nSTATIC_ROOT = os.path.join(BASE_DIR, \"staticfiles\")\nSTATIC_DIR = os.path.join(BASE_DIR, \"static\")\nSTATICFILES_DIRS = [\n STATIC_DIR,\n]\nMEDIA_URL = \"/media/\"\nMEDIA_ROOT = os.path.join(BASE_DIR, \"media\")\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get(\"SECRET_KEY\")\n\n# SECURITY WARNING: don't run with debug turned on in production!\n# DEBUG will only evaluate to True if 'True' or 1 is supplied\nDEBUG = os.getenv(\"DEBUG\", \"False\") in [\"True\", 1]\n\n# Here we need to manipulate a string containing a Python list into a list of strings\nALLOWED_HOSTS = [\n x.strip()[1:-1] for x in os.environ.get(\"ALLOWED_HOSTS\")[1:-1].split(\",\")\n]\n\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"whitenoise.runserver_nostatic\",\n \"django.contrib.staticfiles\",\n \"extra_views\",\n \"mapping\",\n \"data\",\n \"rest_framework\",\n \"django_filters\",\n \"rest_framework.authtoken\",\n \"corsheaders\",\n \"test\",\n]\n\nMIDDLEWARE = [\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n]\n\nSTATICFILES_STORAGE = \"whitenoise.storage.CompressedManifestStaticFilesStorage\"\n\nROOT_URLCONF = \"api.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [os.path.join(BASE_DIR, \"templates\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"django.template.context_processors.media\",\n \"api.react.react\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"api.wsgi.application\"\n\n\n# Database\n# https://docs.djangoproject.com/en/3.1/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": os.getenv(\"COCONNECT_DB_ENGINE\"),\n \"HOST\": os.getenv(\"COCONNECT_DB_HOST\"),\n \"PORT\": os.getenv(\"COCONNECT_DB_PORT\"),\n \"NAME\": os.getenv(\"COCONNECT_DB_NAME\"),\n \"USER\": os.getenv(\"COCONNECT_DB_USER\"),\n \"PASSWORD\": os.getenv(\"COCONNECT_DB_PASSWORD\"),\n \"TEST\": {\n \"NAME\": \"throwawaydb\",\n },\n }\n}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.1/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nCORS_ORIGIN_ALLOW_ALL = True\n\nREST_FRAMEWORK = {\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"rest_framework.authentication.TokenAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n ),\n \"DEFAULT_PERMISSION_CLASSES\": (\"rest_framework.permissions.IsAuthenticated\",),\n}\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.1/howto/static-files/\n\nSTATIC_URL = \"/static/\"\n\nLOGIN_REDIRECT_URL = \"/\"\nEMAIL_BACKEND = \"django.core.mail.backends.console.EmailBackend\"\n\n# NLP API KEY\nNLP_API_KEY = os.getenv(\"NLP_API_KEY\")\n\nSESSION_COOKIE_AGE = 86400 # session length is 24 hours\n", "path": "app/api/api/settings.py"}]} | 2,067 | 180 |
gh_patches_debug_3328 | rasdani/github-patches | git_diff | Mailu__Mailu-1944 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Letsencrypt Force Renewal
Is there a limit on the Subject Alt Name entries?
I have updated my /mailu/mailu.env "HOSTNAMES" variable, but when I restart Mailu it doesn't update the Subject Alt Names on the mailu cert.
Previously it has worked, so I am guessing that I need to force Letsencrypt to refresh as it isnt within the renewal window. But there is no guidance for the new letsencrypt certbot.
I am using the latest Mailu version (1.7) and this is the command I am using to restart mailu '/mailu/docker-compose -p mailu up -d'
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/nginx/letsencrypt.py`
Content:
```
1 #!/usr/bin/python3
2
3 import os
4 import time
5 import subprocess
6
7
8 command = [
9 "certbot",
10 "-n", "--agree-tos", # non-interactive
11 "-d", os.environ["HOSTNAMES"],
12 "-m", "{}@{}".format(os.environ["POSTMASTER"], os.environ["DOMAIN"]),
13 "certonly", "--standalone",
14 "--cert-name", "mailu",
15 "--preferred-challenges", "http", "--http-01-port", "8008",
16 "--keep-until-expiring",
17 "--rsa-key-size", "4096",
18 "--config-dir", "/certs/letsencrypt",
19 "--post-hook", "/config.py"
20 ]
21
22 # Wait for nginx to start
23 time.sleep(5)
24
25 # Run certbot every hour
26 while True:
27 subprocess.call(command)
28 time.sleep(3600)
29
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/core/nginx/letsencrypt.py b/core/nginx/letsencrypt.py
--- a/core/nginx/letsencrypt.py
+++ b/core/nginx/letsencrypt.py
@@ -14,8 +14,8 @@
"--cert-name", "mailu",
"--preferred-challenges", "http", "--http-01-port", "8008",
"--keep-until-expiring",
- "--rsa-key-size", "4096",
"--config-dir", "/certs/letsencrypt",
+ "--renew-with-new-domains",
"--post-hook", "/config.py"
]
| {"golden_diff": "diff --git a/core/nginx/letsencrypt.py b/core/nginx/letsencrypt.py\n--- a/core/nginx/letsencrypt.py\n+++ b/core/nginx/letsencrypt.py\n@@ -14,8 +14,8 @@\n \"--cert-name\", \"mailu\",\n \"--preferred-challenges\", \"http\", \"--http-01-port\", \"8008\",\n \"--keep-until-expiring\",\n- \"--rsa-key-size\", \"4096\",\n \"--config-dir\", \"/certs/letsencrypt\",\n+ \"--renew-with-new-domains\",\n \"--post-hook\", \"/config.py\"\n ]\n", "issue": "Letsencrypt Force Renewal\nIs there a limit on the Subject Alt Name entries?\r\n\r\nI have updated my /mailu/mailu.env \"HOSTNAMES\" variable, but when I restart Mailu it doesn't update the Subject Alt Names on the mailu cert.\r\n\r\nPreviously it has worked, so I am guessing that I need to force Letsencrypt to refresh as it isnt within the renewal window. But there is no guidance for the new letsencrypt certbot.\r\n\r\nI am using the latest Mailu version (1.7) and this is the command I am using to restart mailu '/mailu/docker-compose -p mailu up -d'\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport os\nimport time\nimport subprocess\n\n\ncommand = [\n \"certbot\",\n \"-n\", \"--agree-tos\", # non-interactive\n \"-d\", os.environ[\"HOSTNAMES\"],\n \"-m\", \"{}@{}\".format(os.environ[\"POSTMASTER\"], os.environ[\"DOMAIN\"]),\n \"certonly\", \"--standalone\",\n \"--cert-name\", \"mailu\",\n \"--preferred-challenges\", \"http\", \"--http-01-port\", \"8008\",\n \"--keep-until-expiring\",\n \"--rsa-key-size\", \"4096\",\n \"--config-dir\", \"/certs/letsencrypt\",\n \"--post-hook\", \"/config.py\"\n]\n\n# Wait for nginx to start\ntime.sleep(5)\n\n# Run certbot every hour\nwhile True:\n subprocess.call(command)\n time.sleep(3600)\n\n", "path": "core/nginx/letsencrypt.py"}], "after_files": [{"content": "#!/usr/bin/python3\n\nimport os\nimport time\nimport subprocess\n\n\ncommand = [\n \"certbot\",\n \"-n\", \"--agree-tos\", # non-interactive\n \"-d\", os.environ[\"HOSTNAMES\"],\n \"-m\", \"{}@{}\".format(os.environ[\"POSTMASTER\"], os.environ[\"DOMAIN\"]),\n \"certonly\", \"--standalone\",\n \"--cert-name\", \"mailu\",\n \"--preferred-challenges\", \"http\", \"--http-01-port\", \"8008\",\n \"--keep-until-expiring\",\n \"--config-dir\", \"/certs/letsencrypt\",\n \"--renew-with-new-domains\",\n \"--post-hook\", \"/config.py\"\n]\n\n# Wait for nginx to start\ntime.sleep(5)\n\n# Run certbot every hour\nwhile True:\n subprocess.call(command)\n time.sleep(3600)\n\n", "path": "core/nginx/letsencrypt.py"}]} | 635 | 132 |
gh_patches_debug_29046 | rasdani/github-patches | git_diff | Mailu__Mailu-931 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Admin whitelist_webmail hard coded
Hi,
I'm trying to send transactional email with Mailu SMTP from a backend server and it takes more than 15s.
I tracked down one problem in core/admin/mailu/internal/__init__.py:
def whitelist_webmail() uses socket.gethostbyname("webmail")
In my docker configuration there is no "webmail" host so the function socket.gethostbyname return nothing after 5s which slows down a lot the request /internal/auth/email
When I set "webmail" to a fake ip on the admin server the /internal/auth/email returns immediately.
Maybe it would be better to define a list of hostnames in the configuration file instead of using a hard coded "webmail" value. What do you think?
Thanks Mailu for the great work!
JB
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/admin/mailu/internal/__init__.py`
Content:
```
1 from flask_limiter import RateLimitExceeded
2
3 from mailu import utils
4
5 import socket
6 import flask
7
8
9 internal = flask.Blueprint('internal', __name__, template_folder='templates')
10
11
12 @internal.app_errorhandler(RateLimitExceeded)
13 def rate_limit_handler(e):
14 response = flask.Response()
15 response.headers['Auth-Status'] = 'Authentication rate limit from one source exceeded'
16 response.headers['Auth-Error-Code'] = '451 4.3.2'
17 if int(flask.request.headers['Auth-Login-Attempt']) < 10:
18 response.headers['Auth-Wait'] = '3'
19 return response
20
21
22 @utils.limiter.request_filter
23 def whitelist_webmail():
24 try:
25 return flask.request.headers["Client-Ip"] ==\
26 socket.gethostbyname("webmail")
27 except:
28 return False
29
30
31 from mailu.internal.views import *
32
```
Path: `core/admin/mailu/configuration.py`
Content:
```
1 import os
2 from mailustart import resolve
3
4 DEFAULT_CONFIG = {
5 # Specific to the admin UI
6 'DOCKER_SOCKET': 'unix:///var/run/docker.sock',
7 'BABEL_DEFAULT_LOCALE': 'en',
8 'BABEL_DEFAULT_TIMEZONE': 'UTC',
9 'BOOTSTRAP_SERVE_LOCAL': True,
10 'RATELIMIT_STORAGE_URL': 'redis://redis/2',
11 'QUOTA_STORAGE_URL': 'redis://redis/1',
12 'DEBUG': False,
13 'DOMAIN_REGISTRATION': False,
14 'TEMPLATES_AUTO_RELOAD': True,
15 # Database settings
16 'DB_FLAVOR': None,
17 'DB_USER': 'mailu',
18 'DB_PW': None,
19 'DB_HOST': 'database',
20 'DB_NAME': 'mailu',
21 'SQLITE_DATABASE_FILE':'data/main.db',
22 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db',
23 'SQLALCHEMY_TRACK_MODIFICATIONS': False,
24 # Statistics management
25 'INSTANCE_ID_PATH': '/data/instance',
26 'STATS_ENDPOINT': '0.{}.stats.mailu.io',
27 # Common configuration variables
28 'SECRET_KEY': 'changeMe',
29 'DOMAIN': 'mailu.io',
30 'HOSTNAMES': 'mail.mailu.io,alternative.mailu.io,yetanother.mailu.io',
31 'POSTMASTER': 'postmaster',
32 'TLS_FLAVOR': 'cert',
33 'AUTH_RATELIMIT': '10/minute;1000/hour',
34 'DISABLE_STATISTICS': False,
35 # Mail settings
36 'DMARC_RUA': None,
37 'DMARC_RUF': None,
38 'WELCOME': False,
39 'WELCOME_SUBJECT': 'Dummy welcome topic',
40 'WELCOME_BODY': 'Dummy welcome body',
41 'DKIM_SELECTOR': 'dkim',
42 'DKIM_PATH': '/dkim/{domain}.{selector}.key',
43 'DEFAULT_QUOTA': 1000000000,
44 # Web settings
45 'SITENAME': 'Mailu',
46 'WEBSITE': 'https://mailu.io',
47 'WEB_ADMIN': '/admin',
48 'WEB_WEBMAIL': '/webmail',
49 'RECAPTCHA_PUBLIC_KEY': '',
50 'RECAPTCHA_PRIVATE_KEY': '',
51 # Advanced settings
52 'PASSWORD_SCHEME': 'BLF-CRYPT',
53 # Host settings
54 'HOST_IMAP': 'imap',
55 'HOST_POP3': 'imap',
56 'HOST_SMTP': 'smtp',
57 'HOST_WEBMAIL': 'webmail',
58 'HOST_FRONT': 'front',
59 'HOST_AUTHSMTP': os.environ.get('HOST_SMTP', 'smtp'),
60 'SUBNET': '192.168.203.0/24',
61 'POD_ADDRESS_RANGE': None
62 }
63
64 class ConfigManager(dict):
65 """ Naive configuration manager that uses environment only
66 """
67
68 DB_TEMPLATES = {
69 'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}',
70 'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',
71 'mysql': 'mysql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}'
72 }
73
74 def __init__(self):
75 self.config = dict()
76
77 def resolve_host(self):
78 self.config['HOST_IMAP'] = resolve(self.config['HOST_IMAP'])
79 self.config['HOST_POP3'] = resolve(self.config['HOST_POP3'])
80 self.config['HOST_AUTHSMTP'] = resolve(self.config['HOST_AUTHSMTP'])
81 self.config['HOST_SMTP'] = resolve(self.config['HOST_SMTP'])
82
83 def __coerce_value(self, value):
84 if isinstance(value, str) and value.lower() in ('true','yes'):
85 return True
86 elif isinstance(value, str) and value.lower() in ('false', 'no'):
87 return False
88 return value
89
90 def init_app(self, app):
91 self.config.update(app.config)
92 # get environment variables
93 self.config.update({
94 key: self.__coerce_value(os.environ.get(key, value))
95 for key, value in DEFAULT_CONFIG.items()
96 })
97 self.resolve_host()
98
99 # automatically set the sqlalchemy string
100 if self.config['DB_FLAVOR']:
101 template = self.DB_TEMPLATES[self.config['DB_FLAVOR']]
102 self.config['SQLALCHEMY_DATABASE_URI'] = template.format(**self.config)
103 # update the app config itself
104 app.config = self
105
106 def setdefault(self, key, value):
107 if key not in self.config:
108 self.config[key] = value
109 return self.config[key]
110
111 def get(self, *args):
112 return self.config.get(*args)
113
114 def keys(self):
115 return self.config.keys()
116
117 def __getitem__(self, key):
118 return self.config.get(key)
119
120 def __setitem__(self, key, value):
121 self.config[key] = value
122
123 def __contains__(self, key):
124 return key in self.config
125
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py
--- a/core/admin/mailu/configuration.py
+++ b/core/admin/mailu/configuration.py
@@ -46,6 +46,7 @@
'WEBSITE': 'https://mailu.io',
'WEB_ADMIN': '/admin',
'WEB_WEBMAIL': '/webmail',
+ 'WEBMAIL': 'none',
'RECAPTCHA_PUBLIC_KEY': '',
'RECAPTCHA_PRIVATE_KEY': '',
# Advanced settings
@@ -79,6 +80,8 @@
self.config['HOST_POP3'] = resolve(self.config['HOST_POP3'])
self.config['HOST_AUTHSMTP'] = resolve(self.config['HOST_AUTHSMTP'])
self.config['HOST_SMTP'] = resolve(self.config['HOST_SMTP'])
+ if self.config['WEBMAIL'] != 'none':
+ self.config['HOST_WEBMAIL'] = resolve(self.config['HOST_WEBMAIL'])
def __coerce_value(self, value):
if isinstance(value, str) and value.lower() in ('true','yes'):
diff --git a/core/admin/mailu/internal/__init__.py b/core/admin/mailu/internal/__init__.py
--- a/core/admin/mailu/internal/__init__.py
+++ b/core/admin/mailu/internal/__init__.py
@@ -1,6 +1,7 @@
from flask_limiter import RateLimitExceeded
from mailu import utils
+from flask import current_app as app
import socket
import flask
@@ -23,7 +24,7 @@
def whitelist_webmail():
try:
return flask.request.headers["Client-Ip"] ==\
- socket.gethostbyname("webmail")
+ app.config["HOST_WEBMAIL"]
except:
return False
| {"golden_diff": "diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py\n--- a/core/admin/mailu/configuration.py\n+++ b/core/admin/mailu/configuration.py\n@@ -46,6 +46,7 @@\n 'WEBSITE': 'https://mailu.io',\n 'WEB_ADMIN': '/admin',\n 'WEB_WEBMAIL': '/webmail',\n+ 'WEBMAIL': 'none',\n 'RECAPTCHA_PUBLIC_KEY': '',\n 'RECAPTCHA_PRIVATE_KEY': '',\n # Advanced settings\n@@ -79,6 +80,8 @@\n self.config['HOST_POP3'] = resolve(self.config['HOST_POP3'])\n self.config['HOST_AUTHSMTP'] = resolve(self.config['HOST_AUTHSMTP'])\n self.config['HOST_SMTP'] = resolve(self.config['HOST_SMTP'])\n+ if self.config['WEBMAIL'] != 'none':\n+ self.config['HOST_WEBMAIL'] = resolve(self.config['HOST_WEBMAIL'])\n \n def __coerce_value(self, value):\n if isinstance(value, str) and value.lower() in ('true','yes'):\ndiff --git a/core/admin/mailu/internal/__init__.py b/core/admin/mailu/internal/__init__.py\n--- a/core/admin/mailu/internal/__init__.py\n+++ b/core/admin/mailu/internal/__init__.py\n@@ -1,6 +1,7 @@\n from flask_limiter import RateLimitExceeded\n \n from mailu import utils\n+from flask import current_app as app\n \n import socket\n import flask\n@@ -23,7 +24,7 @@\n def whitelist_webmail():\n try:\n return flask.request.headers[\"Client-Ip\"] ==\\\n- socket.gethostbyname(\"webmail\")\n+ app.config[\"HOST_WEBMAIL\"]\n except:\n return False\n", "issue": "Admin whitelist_webmail hard coded\nHi,\r\nI'm trying to send transactional email with Mailu SMTP from a backend server and it takes more than 15s.\r\nI tracked down one problem in core/admin/mailu/internal/__init__.py:\r\ndef whitelist_webmail() uses socket.gethostbyname(\"webmail\")\r\n\r\nIn my docker configuration there is no \"webmail\" host so the function socket.gethostbyname return nothing after 5s which slows down a lot the request /internal/auth/email\r\nWhen I set \"webmail\" to a fake ip on the admin server the /internal/auth/email returns immediately.\r\n\r\nMaybe it would be better to define a list of hostnames in the configuration file instead of using a hard coded \"webmail\" value. What do you think?\r\n\r\nThanks Mailu for the great work!\r\nJB\n", "before_files": [{"content": "from flask_limiter import RateLimitExceeded\n\nfrom mailu import utils\n\nimport socket\nimport flask\n\n\ninternal = flask.Blueprint('internal', __name__, template_folder='templates')\n\n\[email protected]_errorhandler(RateLimitExceeded)\ndef rate_limit_handler(e):\n response = flask.Response()\n response.headers['Auth-Status'] = 'Authentication rate limit from one source exceeded'\n response.headers['Auth-Error-Code'] = '451 4.3.2'\n if int(flask.request.headers['Auth-Login-Attempt']) < 10:\n response.headers['Auth-Wait'] = '3'\n return response\n\n\[email protected]_filter\ndef whitelist_webmail():\n try:\n return flask.request.headers[\"Client-Ip\"] ==\\\n socket.gethostbyname(\"webmail\")\n except:\n return False\n\n\nfrom mailu.internal.views import *\n", "path": "core/admin/mailu/internal/__init__.py"}, {"content": "import os\nfrom mailustart import resolve\n\nDEFAULT_CONFIG = {\n # Specific to the admin UI\n 'DOCKER_SOCKET': 'unix:///var/run/docker.sock',\n 'BABEL_DEFAULT_LOCALE': 'en',\n 'BABEL_DEFAULT_TIMEZONE': 'UTC',\n 'BOOTSTRAP_SERVE_LOCAL': True,\n 'RATELIMIT_STORAGE_URL': 'redis://redis/2',\n 'QUOTA_STORAGE_URL': 'redis://redis/1',\n 'DEBUG': False,\n 'DOMAIN_REGISTRATION': False,\n 'TEMPLATES_AUTO_RELOAD': True,\n # Database settings\n 'DB_FLAVOR': None,\n 'DB_USER': 'mailu',\n 'DB_PW': None,\n 'DB_HOST': 'database',\n 'DB_NAME': 'mailu',\n 'SQLITE_DATABASE_FILE':'data/main.db',\n 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db',\n 'SQLALCHEMY_TRACK_MODIFICATIONS': False,\n # Statistics management\n 'INSTANCE_ID_PATH': '/data/instance',\n 'STATS_ENDPOINT': '0.{}.stats.mailu.io',\n # Common configuration variables\n 'SECRET_KEY': 'changeMe',\n 'DOMAIN': 'mailu.io',\n 'HOSTNAMES': 'mail.mailu.io,alternative.mailu.io,yetanother.mailu.io',\n 'POSTMASTER': 'postmaster',\n 'TLS_FLAVOR': 'cert',\n 'AUTH_RATELIMIT': '10/minute;1000/hour',\n 'DISABLE_STATISTICS': False,\n # Mail settings\n 'DMARC_RUA': None,\n 'DMARC_RUF': None,\n 'WELCOME': False,\n 'WELCOME_SUBJECT': 'Dummy welcome topic',\n 'WELCOME_BODY': 'Dummy welcome body',\n 'DKIM_SELECTOR': 'dkim',\n 'DKIM_PATH': '/dkim/{domain}.{selector}.key',\n 'DEFAULT_QUOTA': 1000000000,\n # Web settings\n 'SITENAME': 'Mailu',\n 'WEBSITE': 'https://mailu.io',\n 'WEB_ADMIN': '/admin',\n 'WEB_WEBMAIL': '/webmail',\n 'RECAPTCHA_PUBLIC_KEY': '',\n 'RECAPTCHA_PRIVATE_KEY': '',\n # Advanced settings\n 'PASSWORD_SCHEME': 'BLF-CRYPT',\n # Host settings\n 'HOST_IMAP': 'imap',\n 'HOST_POP3': 'imap',\n 'HOST_SMTP': 'smtp',\n 'HOST_WEBMAIL': 'webmail',\n 'HOST_FRONT': 'front',\n 'HOST_AUTHSMTP': os.environ.get('HOST_SMTP', 'smtp'),\n 'SUBNET': '192.168.203.0/24',\n 'POD_ADDRESS_RANGE': None\n}\n\nclass ConfigManager(dict):\n \"\"\" Naive configuration manager that uses environment only\n \"\"\"\n\n DB_TEMPLATES = {\n 'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}',\n 'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',\n 'mysql': 'mysql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}'\n }\n\n def __init__(self):\n self.config = dict()\n\n def resolve_host(self):\n self.config['HOST_IMAP'] = resolve(self.config['HOST_IMAP'])\n self.config['HOST_POP3'] = resolve(self.config['HOST_POP3'])\n self.config['HOST_AUTHSMTP'] = resolve(self.config['HOST_AUTHSMTP'])\n self.config['HOST_SMTP'] = resolve(self.config['HOST_SMTP'])\n\n def __coerce_value(self, value):\n if isinstance(value, str) and value.lower() in ('true','yes'):\n return True\n elif isinstance(value, str) and value.lower() in ('false', 'no'):\n return False\n return value\n\n def init_app(self, app):\n self.config.update(app.config)\n # get environment variables\n self.config.update({\n key: self.__coerce_value(os.environ.get(key, value))\n for key, value in DEFAULT_CONFIG.items()\n })\n self.resolve_host()\n\n # automatically set the sqlalchemy string\n if self.config['DB_FLAVOR']:\n template = self.DB_TEMPLATES[self.config['DB_FLAVOR']]\n self.config['SQLALCHEMY_DATABASE_URI'] = template.format(**self.config)\n # update the app config itself\n app.config = self\n\n def setdefault(self, key, value):\n if key not in self.config:\n self.config[key] = value\n return self.config[key]\n\n def get(self, *args):\n return self.config.get(*args)\n\n def keys(self):\n return self.config.keys()\n\n def __getitem__(self, key):\n return self.config.get(key)\n\n def __setitem__(self, key, value):\n self.config[key] = value\n\n def __contains__(self, key):\n return key in self.config\n", "path": "core/admin/mailu/configuration.py"}], "after_files": [{"content": "from flask_limiter import RateLimitExceeded\n\nfrom mailu import utils\nfrom flask import current_app as app\n\nimport socket\nimport flask\n\n\ninternal = flask.Blueprint('internal', __name__, template_folder='templates')\n\n\[email protected]_errorhandler(RateLimitExceeded)\ndef rate_limit_handler(e):\n response = flask.Response()\n response.headers['Auth-Status'] = 'Authentication rate limit from one source exceeded'\n response.headers['Auth-Error-Code'] = '451 4.3.2'\n if int(flask.request.headers['Auth-Login-Attempt']) < 10:\n response.headers['Auth-Wait'] = '3'\n return response\n\n\[email protected]_filter\ndef whitelist_webmail():\n try:\n return flask.request.headers[\"Client-Ip\"] ==\\\n app.config[\"HOST_WEBMAIL\"]\n except:\n return False\n\n\nfrom mailu.internal.views import *\n", "path": "core/admin/mailu/internal/__init__.py"}, {"content": "import os\nfrom mailustart import resolve\n\nDEFAULT_CONFIG = {\n # Specific to the admin UI\n 'DOCKER_SOCKET': 'unix:///var/run/docker.sock',\n 'BABEL_DEFAULT_LOCALE': 'en',\n 'BABEL_DEFAULT_TIMEZONE': 'UTC',\n 'BOOTSTRAP_SERVE_LOCAL': True,\n 'RATELIMIT_STORAGE_URL': 'redis://redis/2',\n 'QUOTA_STORAGE_URL': 'redis://redis/1',\n 'DEBUG': False,\n 'DOMAIN_REGISTRATION': False,\n 'TEMPLATES_AUTO_RELOAD': True,\n # Database settings\n 'DB_FLAVOR': None,\n 'DB_USER': 'mailu',\n 'DB_PW': None,\n 'DB_HOST': 'database',\n 'DB_NAME': 'mailu',\n 'SQLITE_DATABASE_FILE':'data/main.db',\n 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db',\n 'SQLALCHEMY_TRACK_MODIFICATIONS': False,\n # Statistics management\n 'INSTANCE_ID_PATH': '/data/instance',\n 'STATS_ENDPOINT': '0.{}.stats.mailu.io',\n # Common configuration variables\n 'SECRET_KEY': 'changeMe',\n 'DOMAIN': 'mailu.io',\n 'HOSTNAMES': 'mail.mailu.io,alternative.mailu.io,yetanother.mailu.io',\n 'POSTMASTER': 'postmaster',\n 'TLS_FLAVOR': 'cert',\n 'AUTH_RATELIMIT': '10/minute;1000/hour',\n 'DISABLE_STATISTICS': False,\n # Mail settings\n 'DMARC_RUA': None,\n 'DMARC_RUF': None,\n 'WELCOME': False,\n 'WELCOME_SUBJECT': 'Dummy welcome topic',\n 'WELCOME_BODY': 'Dummy welcome body',\n 'DKIM_SELECTOR': 'dkim',\n 'DKIM_PATH': '/dkim/{domain}.{selector}.key',\n 'DEFAULT_QUOTA': 1000000000,\n # Web settings\n 'SITENAME': 'Mailu',\n 'WEBSITE': 'https://mailu.io',\n 'WEB_ADMIN': '/admin',\n 'WEB_WEBMAIL': '/webmail',\n 'WEBMAIL': 'none',\n 'RECAPTCHA_PUBLIC_KEY': '',\n 'RECAPTCHA_PRIVATE_KEY': '',\n # Advanced settings\n 'PASSWORD_SCHEME': 'BLF-CRYPT',\n # Host settings\n 'HOST_IMAP': 'imap',\n 'HOST_POP3': 'imap',\n 'HOST_SMTP': 'smtp',\n 'HOST_WEBMAIL': 'webmail',\n 'HOST_FRONT': 'front',\n 'HOST_AUTHSMTP': os.environ.get('HOST_SMTP', 'smtp'),\n 'SUBNET': '192.168.203.0/24',\n 'POD_ADDRESS_RANGE': None\n}\n\nclass ConfigManager(dict):\n \"\"\" Naive configuration manager that uses environment only\n \"\"\"\n\n DB_TEMPLATES = {\n 'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}',\n 'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',\n 'mysql': 'mysql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}'\n }\n\n def __init__(self):\n self.config = dict()\n\n def resolve_host(self):\n self.config['HOST_IMAP'] = resolve(self.config['HOST_IMAP'])\n self.config['HOST_POP3'] = resolve(self.config['HOST_POP3'])\n self.config['HOST_AUTHSMTP'] = resolve(self.config['HOST_AUTHSMTP'])\n self.config['HOST_SMTP'] = resolve(self.config['HOST_SMTP'])\n if self.config['WEBMAIL'] != 'none':\n self.config['HOST_WEBMAIL'] = resolve(self.config['HOST_WEBMAIL'])\n\n def __coerce_value(self, value):\n if isinstance(value, str) and value.lower() in ('true','yes'):\n return True\n elif isinstance(value, str) and value.lower() in ('false', 'no'):\n return False\n return value\n\n def init_app(self, app):\n self.config.update(app.config)\n # get environment variables\n self.config.update({\n key: self.__coerce_value(os.environ.get(key, value))\n for key, value in DEFAULT_CONFIG.items()\n })\n self.resolve_host()\n\n # automatically set the sqlalchemy string\n if self.config['DB_FLAVOR']:\n template = self.DB_TEMPLATES[self.config['DB_FLAVOR']]\n self.config['SQLALCHEMY_DATABASE_URI'] = template.format(**self.config)\n # update the app config itself\n app.config = self\n\n def setdefault(self, key, value):\n if key not in self.config:\n self.config[key] = value\n return self.config[key]\n\n def get(self, *args):\n return self.config.get(*args)\n\n def keys(self):\n return self.config.keys()\n\n def __getitem__(self, key):\n return self.config.get(key)\n\n def __setitem__(self, key, value):\n self.config[key] = value\n\n def __contains__(self, key):\n return key in self.config\n", "path": "core/admin/mailu/configuration.py"}]} | 2,038 | 379 |
gh_patches_debug_6932 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-1172 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Incorrect import of book from Goodreads
**Describe the bug**
I attempted to import my Goodreads library (https://www.goodreads.com/user/show/986159-nikhil) into Bookwyrm. I noticed the book _Dark Fiber: Tracking Critical Internet Culture_ ([bookwyrm](https://bookwyrm.social/book/120984), [goodreads](https://www.goodreads.com/book/show/1371653.Dark_Fiber); ISBN 9780262122498) in my library, which was not a book on my Goodreads list. Based on the order the imported books began showing up in my library, I think (but cannot confirm) that this was an incorrect import of the book _Ganhadores: A greve negra de 1857 na Bahia_ ([goodreads](https://www.goodreads.com/book/show/49417547-ganhadores); ISBN 9788535932430).
**To Reproduce**
Steps to reproduce the behavior:
1. Export Goodreads CSV for account https://www.goodreads.com/user/show/986159-nikhil
2. Navigate to https://bookwyrm.social/import
3. Upload CSV with format `GoodReads (CSV)`.
4. Navigate to https://bookwyrm.social/user/nikhilarundesai/books and see error
**Expected behavior**
The book _Dark Fiber_ does not appear in my BookWyrm library.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Desktop (please complete the following information):**
- OS: Mac OS
- Browser: Chrome
- Version: 90.0.4430.212 (Official Build) (x86_64)
**Additional context**
Add any other context about the problem here.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/models/import_job.py`
Content:
```
1 """ track progress of goodreads imports """
2 import re
3 import dateutil.parser
4
5 from django.apps import apps
6 from django.db import models
7 from django.utils import timezone
8
9 from bookwyrm.connectors import connector_manager
10 from bookwyrm.models import ReadThrough, User, Book
11 from .fields import PrivacyLevels
12
13
14 # Mapping goodreads -> bookwyrm shelf titles.
15 GOODREADS_SHELVES = {
16 "read": "read",
17 "currently-reading": "reading",
18 "to-read": "to-read",
19 }
20
21
22 def unquote_string(text):
23 """resolve csv quote weirdness"""
24 match = re.match(r'="([^"]*)"', text)
25 if match:
26 return match.group(1)
27 return text
28
29
30 def construct_search_term(title, author):
31 """formulate a query for the data connector"""
32 # Strip brackets (usually series title from search term)
33 title = re.sub(r"\s*\([^)]*\)\s*", "", title)
34 # Open library doesn't like including author initials in search term.
35 author = re.sub(r"(\w\.)+\s*", "", author)
36
37 return " ".join([title, author])
38
39
40 class ImportJob(models.Model):
41 """entry for a specific request for book data import"""
42
43 user = models.ForeignKey(User, on_delete=models.CASCADE)
44 created_date = models.DateTimeField(default=timezone.now)
45 task_id = models.CharField(max_length=100, null=True)
46 include_reviews = models.BooleanField(default=True)
47 complete = models.BooleanField(default=False)
48 privacy = models.CharField(
49 max_length=255, default="public", choices=PrivacyLevels.choices
50 )
51 retry = models.BooleanField(default=False)
52
53 def save(self, *args, **kwargs):
54 """save and notify"""
55 super().save(*args, **kwargs)
56 if self.complete:
57 notification_model = apps.get_model(
58 "bookwyrm.Notification", require_ready=True
59 )
60 notification_model.objects.create(
61 user=self.user,
62 notification_type="IMPORT",
63 related_import=self,
64 )
65
66
67 class ImportItem(models.Model):
68 """a single line of a csv being imported"""
69
70 job = models.ForeignKey(ImportJob, on_delete=models.CASCADE, related_name="items")
71 index = models.IntegerField()
72 data = models.JSONField()
73 book = models.ForeignKey(Book, on_delete=models.SET_NULL, null=True, blank=True)
74 fail_reason = models.TextField(null=True)
75
76 def resolve(self):
77 """try various ways to lookup a book"""
78 self.book = self.get_book_from_isbn() or self.get_book_from_title_author()
79
80 def get_book_from_isbn(self):
81 """search by isbn"""
82 search_result = connector_manager.first_search_result(
83 self.isbn, min_confidence=0.999
84 )
85 if search_result:
86 # raises ConnectorException
87 return search_result.connector.get_or_create_book(search_result.key)
88 return None
89
90 def get_book_from_title_author(self):
91 """search by title and author"""
92 search_term = construct_search_term(self.title, self.author)
93 search_result = connector_manager.first_search_result(
94 search_term, min_confidence=0.999
95 )
96 if search_result:
97 # raises ConnectorException
98 return search_result.connector.get_or_create_book(search_result.key)
99 return None
100
101 @property
102 def title(self):
103 """get the book title"""
104 return self.data["Title"]
105
106 @property
107 def author(self):
108 """get the book title"""
109 return self.data["Author"]
110
111 @property
112 def isbn(self):
113 """pulls out the isbn13 field from the csv line data"""
114 return unquote_string(self.data["ISBN13"])
115
116 @property
117 def shelf(self):
118 """the goodreads shelf field"""
119 if self.data["Exclusive Shelf"]:
120 return GOODREADS_SHELVES.get(self.data["Exclusive Shelf"])
121 return None
122
123 @property
124 def review(self):
125 """a user-written review, to be imported with the book data"""
126 return self.data["My Review"]
127
128 @property
129 def rating(self):
130 """x/5 star rating for a book"""
131 if self.data.get("My Rating", None):
132 return int(self.data["My Rating"])
133 return None
134
135 @property
136 def date_added(self):
137 """when the book was added to this dataset"""
138 if self.data["Date Added"]:
139 return timezone.make_aware(dateutil.parser.parse(self.data["Date Added"]))
140 return None
141
142 @property
143 def date_started(self):
144 """when the book was started"""
145 if "Date Started" in self.data and self.data["Date Started"]:
146 return timezone.make_aware(dateutil.parser.parse(self.data["Date Started"]))
147 return None
148
149 @property
150 def date_read(self):
151 """the date a book was completed"""
152 if self.data["Date Read"]:
153 return timezone.make_aware(dateutil.parser.parse(self.data["Date Read"]))
154 return None
155
156 @property
157 def reads(self):
158 """formats a read through dataset for the book in this line"""
159 start_date = self.date_started
160
161 # Goodreads special case (no 'date started' field)
162 if (
163 (self.shelf == "reading" or (self.shelf == "read" and self.date_read))
164 and self.date_added
165 and not start_date
166 ):
167 start_date = self.date_added
168
169 if start_date and start_date is not None and not self.date_read:
170 return [ReadThrough(start_date=start_date)]
171 if self.date_read:
172 return [
173 ReadThrough(
174 start_date=start_date,
175 finish_date=self.date_read,
176 )
177 ]
178 return []
179
180 def __repr__(self):
181 return "<{!r}Item {!r}>".format(self.data["import_source"], self.data["Title"])
182
183 def __str__(self):
184 return "{} by {}".format(self.data["Title"], self.data["Author"])
185
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bookwyrm/models/import_job.py b/bookwyrm/models/import_job.py
--- a/bookwyrm/models/import_job.py
+++ b/bookwyrm/models/import_job.py
@@ -75,7 +75,12 @@
def resolve(self):
"""try various ways to lookup a book"""
- self.book = self.get_book_from_isbn() or self.get_book_from_title_author()
+ if self.isbn:
+ self.book = self.get_book_from_isbn()
+ else:
+ # don't fall back on title/author search is isbn is present.
+ # you're too likely to mismatch
+ self.get_book_from_title_author()
def get_book_from_isbn(self):
"""search by isbn"""
| {"golden_diff": "diff --git a/bookwyrm/models/import_job.py b/bookwyrm/models/import_job.py\n--- a/bookwyrm/models/import_job.py\n+++ b/bookwyrm/models/import_job.py\n@@ -75,7 +75,12 @@\n \n def resolve(self):\n \"\"\"try various ways to lookup a book\"\"\"\n- self.book = self.get_book_from_isbn() or self.get_book_from_title_author()\n+ if self.isbn:\n+ self.book = self.get_book_from_isbn()\n+ else:\n+ # don't fall back on title/author search is isbn is present.\n+ # you're too likely to mismatch\n+ self.get_book_from_title_author()\n \n def get_book_from_isbn(self):\n \"\"\"search by isbn\"\"\"\n", "issue": "Incorrect import of book from Goodreads\n**Describe the bug**\r\nI attempted to import my Goodreads library (https://www.goodreads.com/user/show/986159-nikhil) into Bookwyrm. I noticed the book _Dark Fiber: Tracking Critical Internet Culture_ ([bookwyrm](https://bookwyrm.social/book/120984), [goodreads](https://www.goodreads.com/book/show/1371653.Dark_Fiber); ISBN 9780262122498) in my library, which was not a book on my Goodreads list. Based on the order the imported books began showing up in my library, I think (but cannot confirm) that this was an incorrect import of the book _Ganhadores: A greve negra de 1857 na Bahia_ ([goodreads](https://www.goodreads.com/book/show/49417547-ganhadores); ISBN 9788535932430).\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Export Goodreads CSV for account https://www.goodreads.com/user/show/986159-nikhil\r\n2. Navigate to https://bookwyrm.social/import\r\n3. Upload CSV with format `GoodReads (CSV)`.\r\n4. Navigate to https://bookwyrm.social/user/nikhilarundesai/books and see error\r\n\r\n**Expected behavior**\r\nThe book _Dark Fiber_ does not appear in my BookWyrm library.\r\n\r\n**Screenshots**\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Mac OS\r\n - Browser: Chrome\r\n - Version: 90.0.4430.212 (Official Build) (x86_64)\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "\"\"\" track progress of goodreads imports \"\"\"\nimport re\nimport dateutil.parser\n\nfrom django.apps import apps\nfrom django.db import models\nfrom django.utils import timezone\n\nfrom bookwyrm.connectors import connector_manager\nfrom bookwyrm.models import ReadThrough, User, Book\nfrom .fields import PrivacyLevels\n\n\n# Mapping goodreads -> bookwyrm shelf titles.\nGOODREADS_SHELVES = {\n \"read\": \"read\",\n \"currently-reading\": \"reading\",\n \"to-read\": \"to-read\",\n}\n\n\ndef unquote_string(text):\n \"\"\"resolve csv quote weirdness\"\"\"\n match = re.match(r'=\"([^\"]*)\"', text)\n if match:\n return match.group(1)\n return text\n\n\ndef construct_search_term(title, author):\n \"\"\"formulate a query for the data connector\"\"\"\n # Strip brackets (usually series title from search term)\n title = re.sub(r\"\\s*\\([^)]*\\)\\s*\", \"\", title)\n # Open library doesn't like including author initials in search term.\n author = re.sub(r\"(\\w\\.)+\\s*\", \"\", author)\n\n return \" \".join([title, author])\n\n\nclass ImportJob(models.Model):\n \"\"\"entry for a specific request for book data import\"\"\"\n\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n created_date = models.DateTimeField(default=timezone.now)\n task_id = models.CharField(max_length=100, null=True)\n include_reviews = models.BooleanField(default=True)\n complete = models.BooleanField(default=False)\n privacy = models.CharField(\n max_length=255, default=\"public\", choices=PrivacyLevels.choices\n )\n retry = models.BooleanField(default=False)\n\n def save(self, *args, **kwargs):\n \"\"\"save and notify\"\"\"\n super().save(*args, **kwargs)\n if self.complete:\n notification_model = apps.get_model(\n \"bookwyrm.Notification\", require_ready=True\n )\n notification_model.objects.create(\n user=self.user,\n notification_type=\"IMPORT\",\n related_import=self,\n )\n\n\nclass ImportItem(models.Model):\n \"\"\"a single line of a csv being imported\"\"\"\n\n job = models.ForeignKey(ImportJob, on_delete=models.CASCADE, related_name=\"items\")\n index = models.IntegerField()\n data = models.JSONField()\n book = models.ForeignKey(Book, on_delete=models.SET_NULL, null=True, blank=True)\n fail_reason = models.TextField(null=True)\n\n def resolve(self):\n \"\"\"try various ways to lookup a book\"\"\"\n self.book = self.get_book_from_isbn() or self.get_book_from_title_author()\n\n def get_book_from_isbn(self):\n \"\"\"search by isbn\"\"\"\n search_result = connector_manager.first_search_result(\n self.isbn, min_confidence=0.999\n )\n if search_result:\n # raises ConnectorException\n return search_result.connector.get_or_create_book(search_result.key)\n return None\n\n def get_book_from_title_author(self):\n \"\"\"search by title and author\"\"\"\n search_term = construct_search_term(self.title, self.author)\n search_result = connector_manager.first_search_result(\n search_term, min_confidence=0.999\n )\n if search_result:\n # raises ConnectorException\n return search_result.connector.get_or_create_book(search_result.key)\n return None\n\n @property\n def title(self):\n \"\"\"get the book title\"\"\"\n return self.data[\"Title\"]\n\n @property\n def author(self):\n \"\"\"get the book title\"\"\"\n return self.data[\"Author\"]\n\n @property\n def isbn(self):\n \"\"\"pulls out the isbn13 field from the csv line data\"\"\"\n return unquote_string(self.data[\"ISBN13\"])\n\n @property\n def shelf(self):\n \"\"\"the goodreads shelf field\"\"\"\n if self.data[\"Exclusive Shelf\"]:\n return GOODREADS_SHELVES.get(self.data[\"Exclusive Shelf\"])\n return None\n\n @property\n def review(self):\n \"\"\"a user-written review, to be imported with the book data\"\"\"\n return self.data[\"My Review\"]\n\n @property\n def rating(self):\n \"\"\"x/5 star rating for a book\"\"\"\n if self.data.get(\"My Rating\", None):\n return int(self.data[\"My Rating\"])\n return None\n\n @property\n def date_added(self):\n \"\"\"when the book was added to this dataset\"\"\"\n if self.data[\"Date Added\"]:\n return timezone.make_aware(dateutil.parser.parse(self.data[\"Date Added\"]))\n return None\n\n @property\n def date_started(self):\n \"\"\"when the book was started\"\"\"\n if \"Date Started\" in self.data and self.data[\"Date Started\"]:\n return timezone.make_aware(dateutil.parser.parse(self.data[\"Date Started\"]))\n return None\n\n @property\n def date_read(self):\n \"\"\"the date a book was completed\"\"\"\n if self.data[\"Date Read\"]:\n return timezone.make_aware(dateutil.parser.parse(self.data[\"Date Read\"]))\n return None\n\n @property\n def reads(self):\n \"\"\"formats a read through dataset for the book in this line\"\"\"\n start_date = self.date_started\n\n # Goodreads special case (no 'date started' field)\n if (\n (self.shelf == \"reading\" or (self.shelf == \"read\" and self.date_read))\n and self.date_added\n and not start_date\n ):\n start_date = self.date_added\n\n if start_date and start_date is not None and not self.date_read:\n return [ReadThrough(start_date=start_date)]\n if self.date_read:\n return [\n ReadThrough(\n start_date=start_date,\n finish_date=self.date_read,\n )\n ]\n return []\n\n def __repr__(self):\n return \"<{!r}Item {!r}>\".format(self.data[\"import_source\"], self.data[\"Title\"])\n\n def __str__(self):\n return \"{} by {}\".format(self.data[\"Title\"], self.data[\"Author\"])\n", "path": "bookwyrm/models/import_job.py"}], "after_files": [{"content": "\"\"\" track progress of goodreads imports \"\"\"\nimport re\nimport dateutil.parser\n\nfrom django.apps import apps\nfrom django.db import models\nfrom django.utils import timezone\n\nfrom bookwyrm.connectors import connector_manager\nfrom bookwyrm.models import ReadThrough, User, Book\nfrom .fields import PrivacyLevels\n\n\n# Mapping goodreads -> bookwyrm shelf titles.\nGOODREADS_SHELVES = {\n \"read\": \"read\",\n \"currently-reading\": \"reading\",\n \"to-read\": \"to-read\",\n}\n\n\ndef unquote_string(text):\n \"\"\"resolve csv quote weirdness\"\"\"\n match = re.match(r'=\"([^\"]*)\"', text)\n if match:\n return match.group(1)\n return text\n\n\ndef construct_search_term(title, author):\n \"\"\"formulate a query for the data connector\"\"\"\n # Strip brackets (usually series title from search term)\n title = re.sub(r\"\\s*\\([^)]*\\)\\s*\", \"\", title)\n # Open library doesn't like including author initials in search term.\n author = re.sub(r\"(\\w\\.)+\\s*\", \"\", author)\n\n return \" \".join([title, author])\n\n\nclass ImportJob(models.Model):\n \"\"\"entry for a specific request for book data import\"\"\"\n\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n created_date = models.DateTimeField(default=timezone.now)\n task_id = models.CharField(max_length=100, null=True)\n include_reviews = models.BooleanField(default=True)\n complete = models.BooleanField(default=False)\n privacy = models.CharField(\n max_length=255, default=\"public\", choices=PrivacyLevels.choices\n )\n retry = models.BooleanField(default=False)\n\n def save(self, *args, **kwargs):\n \"\"\"save and notify\"\"\"\n super().save(*args, **kwargs)\n if self.complete:\n notification_model = apps.get_model(\n \"bookwyrm.Notification\", require_ready=True\n )\n notification_model.objects.create(\n user=self.user,\n notification_type=\"IMPORT\",\n related_import=self,\n )\n\n\nclass ImportItem(models.Model):\n \"\"\"a single line of a csv being imported\"\"\"\n\n job = models.ForeignKey(ImportJob, on_delete=models.CASCADE, related_name=\"items\")\n index = models.IntegerField()\n data = models.JSONField()\n book = models.ForeignKey(Book, on_delete=models.SET_NULL, null=True, blank=True)\n fail_reason = models.TextField(null=True)\n\n def resolve(self):\n \"\"\"try various ways to lookup a book\"\"\"\n if self.isbn:\n self.book = self.get_book_from_isbn()\n else:\n # don't fall back on title/author search is isbn is present.\n # you're too likely to mismatch\n self.get_book_from_title_author()\n\n def get_book_from_isbn(self):\n \"\"\"search by isbn\"\"\"\n search_result = connector_manager.first_search_result(\n self.isbn, min_confidence=0.999\n )\n if search_result:\n # raises ConnectorException\n return search_result.connector.get_or_create_book(search_result.key)\n return None\n\n def get_book_from_title_author(self):\n \"\"\"search by title and author\"\"\"\n search_term = construct_search_term(self.title, self.author)\n search_result = connector_manager.first_search_result(\n search_term, min_confidence=0.999\n )\n if search_result:\n # raises ConnectorException\n return search_result.connector.get_or_create_book(search_result.key)\n return None\n\n @property\n def title(self):\n \"\"\"get the book title\"\"\"\n return self.data[\"Title\"]\n\n @property\n def author(self):\n \"\"\"get the book title\"\"\"\n return self.data[\"Author\"]\n\n @property\n def isbn(self):\n \"\"\"pulls out the isbn13 field from the csv line data\"\"\"\n return unquote_string(self.data[\"ISBN13\"])\n\n @property\n def shelf(self):\n \"\"\"the goodreads shelf field\"\"\"\n if self.data[\"Exclusive Shelf\"]:\n return GOODREADS_SHELVES.get(self.data[\"Exclusive Shelf\"])\n return None\n\n @property\n def review(self):\n \"\"\"a user-written review, to be imported with the book data\"\"\"\n return self.data[\"My Review\"]\n\n @property\n def rating(self):\n \"\"\"x/5 star rating for a book\"\"\"\n if self.data.get(\"My Rating\", None):\n return int(self.data[\"My Rating\"])\n return None\n\n @property\n def date_added(self):\n \"\"\"when the book was added to this dataset\"\"\"\n if self.data[\"Date Added\"]:\n return timezone.make_aware(dateutil.parser.parse(self.data[\"Date Added\"]))\n return None\n\n @property\n def date_started(self):\n \"\"\"when the book was started\"\"\"\n if \"Date Started\" in self.data and self.data[\"Date Started\"]:\n return timezone.make_aware(dateutil.parser.parse(self.data[\"Date Started\"]))\n return None\n\n @property\n def date_read(self):\n \"\"\"the date a book was completed\"\"\"\n if self.data[\"Date Read\"]:\n return timezone.make_aware(dateutil.parser.parse(self.data[\"Date Read\"]))\n return None\n\n @property\n def reads(self):\n \"\"\"formats a read through dataset for the book in this line\"\"\"\n start_date = self.date_started\n\n # Goodreads special case (no 'date started' field)\n if (\n (self.shelf == \"reading\" or (self.shelf == \"read\" and self.date_read))\n and self.date_added\n and not start_date\n ):\n start_date = self.date_added\n\n if start_date and start_date is not None and not self.date_read:\n return [ReadThrough(start_date=start_date)]\n if self.date_read:\n return [\n ReadThrough(\n start_date=start_date,\n finish_date=self.date_read,\n )\n ]\n return []\n\n def __repr__(self):\n return \"<{!r}Item {!r}>\".format(self.data[\"import_source\"], self.data[\"Title\"])\n\n def __str__(self):\n return \"{} by {}\".format(self.data[\"Title\"], self.data[\"Author\"])\n", "path": "bookwyrm/models/import_job.py"}]} | 2,409 | 164 |
gh_patches_debug_594 | rasdani/github-patches | git_diff | pex-tool__pex-1057 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 2.1.17
On the docket:
+ [x] TypeError when resolving local platforms. #1043
+ [x] No such file for interpreter's binary name #1009
+ [x] Pex resources leak while bootstrapping pants #1050
+ [x] Pex PEX perf regression #1054
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.16"
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.16"
+__version__ = "2.1.17"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.16\"\n+__version__ = \"2.1.17\"\n", "issue": "Release 2.1.17\nOn the docket:\r\n+ [x] TypeError when resolving local platforms. #1043\r\n+ [x] No such file for interpreter's binary name #1009\r\n+ [x] Pex resources leak while bootstrapping pants #1050\r\n+ [x] Pex PEX perf regression #1054\r\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.16\"\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.17\"\n", "path": "pex/version.py"}]} | 391 | 96 |
gh_patches_debug_39575 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-3586 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Implement `columns.delete` RPC method
Replaces `DELETE /api/db/v0/tables/{tableId}/columns/{columnId}`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `db/sql/install.py`
Content:
```
1 import os
2 from db.connection import load_file_with_engine
3
4 FILE_DIR = os.path.abspath(os.path.dirname(__file__))
5 MSAR_SQL = os.path.join(FILE_DIR, '0_msar.sql')
6 MSAR_AGGREGATE_SQL = os.path.join(FILE_DIR, '3_msar_custom_aggregates.sql')
7
8
9 def install(engine):
10 """Install SQL pieces using the given engine."""
11 with open(MSAR_SQL) as file_handle:
12 load_file_with_engine(engine, file_handle)
13 with open(MSAR_AGGREGATE_SQL) as custom_aggregates:
14 load_file_with_engine(engine, custom_aggregates)
15
```
Path: `mathesar/rpc/columns.py`
Content:
```
1 """
2 Classes and functions exposed to the RPC endpoint for managing table columns.
3 """
4 from typing import TypedDict
5
6 from modernrpc.core import rpc_method, REQUEST_KEY
7 from modernrpc.auth.basic import http_basic_auth_login_required
8
9 from db.columns.operations.select import get_column_info_for_table
10 from mathesar.rpc.exceptions.handlers import handle_rpc_exceptions
11 from mathesar.rpc.utils import connect
12 from mathesar.utils.columns import get_raw_display_options
13
14
15 class TypeOptions(TypedDict, total=False):
16 """
17 Options applied to a type. All attributes are optional.
18
19 Take special care with the difference between numeric and date/time
20 types w.r.t. precision. The attribute has a different meaning
21 depending on the type to which it's being applied.
22
23 Attributes:
24 precision: For numeric types, the number of significant digits.
25 For date/time types, the number of fractional digits.
26 scale: For numeric types, the number of fractional digits.
27 fields: Which time fields are stored. See Postgres docs.
28 length: The maximum length of a character-type field.
29 item_type: The member type for arrays.
30 """
31 precision: int
32 scale: int
33 fields: str
34 length: int
35 item_type: str
36
37 @classmethod
38 def from_dict(cls, type_options):
39 if type_options is None:
40 return
41 # All keys are optional, but we want to validate the keys we
42 # actually return.
43 all_keys = dict(
44 precision=type_options.get("precision"),
45 scale=type_options.get("scale"),
46 fields=type_options.get("fields"),
47 length=type_options.get("length"),
48 item_type=type_options.get("item_type"),
49 )
50 reduced_keys = {k: v for k, v in all_keys.items() if v is not None}
51 if reduced_keys != {}:
52 return cls(**reduced_keys)
53
54
55 class ColumnDefault(TypedDict):
56 """
57 A dictionary describing the default value for a column.
58
59 Attributes:
60 value: An SQL expression giving the default value.
61 is_dynamic: Whether the `value` is possibly dynamic.
62 """
63 value: str
64 is_dynamic: bool
65
66 @classmethod
67 def from_dict(cls, col_default):
68 if col_default is not None:
69 return cls(
70 value=col_default["value"],
71 is_dynamic=col_default["is_dynamic"],
72 )
73
74
75 class ColumnInfo(TypedDict):
76 """
77 Information about a column.
78
79 Attributes:
80 id: The `attnum` of the column in the table.
81 name: The name of the column.
82 type: The type of the column on the database.
83 type_options: The options applied to the column type.
84 nullable: Whether or not the column is nullable.
85 primary_key: Whether the column is in the primary key.
86 default: The default value and whether it's dynamic.
87 has_dependents: Whether the column has dependent objects.
88 description: The description of the column.
89 """
90 id: int
91 name: str
92 type: str
93 type_options: TypeOptions
94 nullable: bool
95 primary_key: bool
96 default: ColumnDefault
97 has_dependents: bool
98 description: str
99
100 @classmethod
101 def from_dict(cls, col_info):
102 return cls(
103 id=col_info["id"],
104 name=col_info["name"],
105 type=col_info["type"],
106 type_options=TypeOptions.from_dict(col_info.get("type_options")),
107 nullable=col_info["nullable"],
108 primary_key=col_info["primary_key"],
109 default=ColumnDefault.from_dict(col_info.get("default")),
110 has_dependents=col_info["has_dependents"],
111 description=col_info.get("description")
112 )
113
114
115 class ColumnListReturn(TypedDict):
116 """
117 Information about the columns of a table.
118
119 Attributes:
120 column_info: Column information from the user's database.
121 display_options: Display metadata managed by Mathesar.
122 """
123 column_info: list[ColumnInfo]
124 display_options: list[dict]
125
126
127 @rpc_method(name="columns.list")
128 @http_basic_auth_login_required
129 @handle_rpc_exceptions
130 def list_(*, table_oid: int, database_id: int, **kwargs) -> ColumnListReturn:
131 """
132 List information about columns for a table. Exposed as `list`.
133
134 Also return display options for each column, if they're defined.
135
136 Args:
137 table_oid: Identity of the table in the user's database.
138 database_id: The Django id of the database containing the table.
139
140 Returns:
141 A list of column details, and a separate list of display options.
142 """
143 user = kwargs.get(REQUEST_KEY).user
144 with connect(database_id, user) as conn:
145 raw_column_info = get_column_info_for_table(table_oid, conn)
146 column_info, attnums = tuple(
147 zip(
148 *[(ColumnInfo.from_dict(col), col['id']) for col in raw_column_info]
149 )
150 )
151 display_options = get_raw_display_options(
152 database_id, table_oid, attnums, user
153 )
154 return ColumnListReturn(
155 column_info=column_info,
156 display_options=display_options,
157 )
158
```
Path: `db/columns/operations/drop.py`
Content:
```
1 """The function in this module wraps SQL functions that drop columns."""
2 from db import connection as db_conn
3
4
5 def drop_column(table_oid, column_attnum, engine):
6 """
7 Drop the given columns from the given table.
8
9 Args:
10 table_oid: OID of the table whose columns we'll drop.
11 column_attnum: The attnums of the columns to drop.
12 engine: SQLAlchemy engine object for connecting.
13
14 Returns:
15 Returns a string giving the command that was run.
16 """
17 return db_conn.execute_msar_func_with_engine(
18 engine, 'drop_columns', table_oid, column_attnum
19 ).fetchone()[0]
20
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/db/columns/operations/drop.py b/db/columns/operations/drop.py
--- a/db/columns/operations/drop.py
+++ b/db/columns/operations/drop.py
@@ -1,14 +1,14 @@
-"""The function in this module wraps SQL functions that drop columns."""
+"""The functions in this module wrap SQL functions that drop columns."""
from db import connection as db_conn
def drop_column(table_oid, column_attnum, engine):
"""
- Drop the given columns from the given table.
+ Drop the given column from the given table.
Args:
- table_oid: OID of the table whose columns we'll drop.
- column_attnum: The attnums of the columns to drop.
+ table_oid: OID of the table whose column we'll drop.
+ column_attnum: The attnum of the column to drop.
engine: SQLAlchemy engine object for connecting.
Returns:
@@ -17,3 +17,17 @@
return db_conn.execute_msar_func_with_engine(
engine, 'drop_columns', table_oid, column_attnum
).fetchone()[0]
+
+
+def drop_columns_from_table(table_oid, column_attnums, conn):
+ """
+ Drop the given columns from the given table.
+
+ Args:
+ table_oid: OID of the table whose columns we'll drop.
+ column_attnums: The attnums of the columns to drop.
+ conn: A psycopg connection to the relevant database.
+ """
+ return db_conn.exec_msar_func(
+ conn, 'drop_columns', table_oid, *column_attnums
+ ).fetchone()[0]
diff --git a/db/sql/install.py b/db/sql/install.py
--- a/db/sql/install.py
+++ b/db/sql/install.py
@@ -2,8 +2,8 @@
from db.connection import load_file_with_engine
FILE_DIR = os.path.abspath(os.path.dirname(__file__))
-MSAR_SQL = os.path.join(FILE_DIR, '0_msar.sql')
-MSAR_AGGREGATE_SQL = os.path.join(FILE_DIR, '3_msar_custom_aggregates.sql')
+MSAR_SQL = os.path.join(FILE_DIR, '00_msar.sql')
+MSAR_AGGREGATE_SQL = os.path.join(FILE_DIR, '30_msar_custom_aggregates.sql')
def install(engine):
diff --git a/mathesar/rpc/columns.py b/mathesar/rpc/columns.py
--- a/mathesar/rpc/columns.py
+++ b/mathesar/rpc/columns.py
@@ -7,6 +7,7 @@
from modernrpc.auth.basic import http_basic_auth_login_required
from db.columns.operations.select import get_column_info_for_table
+from db.columns.operations.drop import drop_columns_from_table
from mathesar.rpc.exceptions.handlers import handle_rpc_exceptions
from mathesar.rpc.utils import connect
from mathesar.utils.columns import get_raw_display_options
@@ -155,3 +156,25 @@
column_info=column_info,
display_options=display_options,
)
+
+
+@rpc_method(name="columns.delete")
+@http_basic_auth_login_required
+@handle_rpc_exceptions
+def delete(
+ *, column_attnums: list[int], table_oid: int, database_id: int, **kwargs
+) -> int:
+ """
+ Delete columns from a table.
+
+ Args:
+ column_attnums: A list of attnums of columns to delete.
+ table_oid: Identity of the table in the user's database.
+ database_id: The Django id of the database containing the table.
+
+ Returns:
+ The number of columns dropped.
+ """
+ user = kwargs.get(REQUEST_KEY).user
+ with connect(database_id, user) as conn:
+ return drop_columns_from_table(table_oid, column_attnums, conn)
| {"golden_diff": "diff --git a/db/columns/operations/drop.py b/db/columns/operations/drop.py\n--- a/db/columns/operations/drop.py\n+++ b/db/columns/operations/drop.py\n@@ -1,14 +1,14 @@\n-\"\"\"The function in this module wraps SQL functions that drop columns.\"\"\"\n+\"\"\"The functions in this module wrap SQL functions that drop columns.\"\"\"\n from db import connection as db_conn\n \n \n def drop_column(table_oid, column_attnum, engine):\n \"\"\"\n- Drop the given columns from the given table.\n+ Drop the given column from the given table.\n \n Args:\n- table_oid: OID of the table whose columns we'll drop.\n- column_attnum: The attnums of the columns to drop.\n+ table_oid: OID of the table whose column we'll drop.\n+ column_attnum: The attnum of the column to drop.\n engine: SQLAlchemy engine object for connecting.\n \n Returns:\n@@ -17,3 +17,17 @@\n return db_conn.execute_msar_func_with_engine(\n engine, 'drop_columns', table_oid, column_attnum\n ).fetchone()[0]\n+\n+\n+def drop_columns_from_table(table_oid, column_attnums, conn):\n+ \"\"\"\n+ Drop the given columns from the given table.\n+\n+ Args:\n+ table_oid: OID of the table whose columns we'll drop.\n+ column_attnums: The attnums of the columns to drop.\n+ conn: A psycopg connection to the relevant database.\n+ \"\"\"\n+ return db_conn.exec_msar_func(\n+ conn, 'drop_columns', table_oid, *column_attnums\n+ ).fetchone()[0]\ndiff --git a/db/sql/install.py b/db/sql/install.py\n--- a/db/sql/install.py\n+++ b/db/sql/install.py\n@@ -2,8 +2,8 @@\n from db.connection import load_file_with_engine\n \n FILE_DIR = os.path.abspath(os.path.dirname(__file__))\n-MSAR_SQL = os.path.join(FILE_DIR, '0_msar.sql')\n-MSAR_AGGREGATE_SQL = os.path.join(FILE_DIR, '3_msar_custom_aggregates.sql')\n+MSAR_SQL = os.path.join(FILE_DIR, '00_msar.sql')\n+MSAR_AGGREGATE_SQL = os.path.join(FILE_DIR, '30_msar_custom_aggregates.sql')\n \n \n def install(engine):\ndiff --git a/mathesar/rpc/columns.py b/mathesar/rpc/columns.py\n--- a/mathesar/rpc/columns.py\n+++ b/mathesar/rpc/columns.py\n@@ -7,6 +7,7 @@\n from modernrpc.auth.basic import http_basic_auth_login_required\n \n from db.columns.operations.select import get_column_info_for_table\n+from db.columns.operations.drop import drop_columns_from_table\n from mathesar.rpc.exceptions.handlers import handle_rpc_exceptions\n from mathesar.rpc.utils import connect\n from mathesar.utils.columns import get_raw_display_options\n@@ -155,3 +156,25 @@\n column_info=column_info,\n display_options=display_options,\n )\n+\n+\n+@rpc_method(name=\"columns.delete\")\n+@http_basic_auth_login_required\n+@handle_rpc_exceptions\n+def delete(\n+ *, column_attnums: list[int], table_oid: int, database_id: int, **kwargs\n+) -> int:\n+ \"\"\"\n+ Delete columns from a table.\n+\n+ Args:\n+ column_attnums: A list of attnums of columns to delete.\n+ table_oid: Identity of the table in the user's database.\n+ database_id: The Django id of the database containing the table.\n+\n+ Returns:\n+ The number of columns dropped.\n+ \"\"\"\n+ user = kwargs.get(REQUEST_KEY).user\n+ with connect(database_id, user) as conn:\n+ return drop_columns_from_table(table_oid, column_attnums, conn)\n", "issue": "Implement `columns.delete` RPC method\nReplaces `DELETE /api/db/v0/tables/{tableId}/columns/{columnId}`\n", "before_files": [{"content": "import os\nfrom db.connection import load_file_with_engine\n\nFILE_DIR = os.path.abspath(os.path.dirname(__file__))\nMSAR_SQL = os.path.join(FILE_DIR, '0_msar.sql')\nMSAR_AGGREGATE_SQL = os.path.join(FILE_DIR, '3_msar_custom_aggregates.sql')\n\n\ndef install(engine):\n \"\"\"Install SQL pieces using the given engine.\"\"\"\n with open(MSAR_SQL) as file_handle:\n load_file_with_engine(engine, file_handle)\n with open(MSAR_AGGREGATE_SQL) as custom_aggregates:\n load_file_with_engine(engine, custom_aggregates)\n", "path": "db/sql/install.py"}, {"content": "\"\"\"\nClasses and functions exposed to the RPC endpoint for managing table columns.\n\"\"\"\nfrom typing import TypedDict\n\nfrom modernrpc.core import rpc_method, REQUEST_KEY\nfrom modernrpc.auth.basic import http_basic_auth_login_required\n\nfrom db.columns.operations.select import get_column_info_for_table\nfrom mathesar.rpc.exceptions.handlers import handle_rpc_exceptions\nfrom mathesar.rpc.utils import connect\nfrom mathesar.utils.columns import get_raw_display_options\n\n\nclass TypeOptions(TypedDict, total=False):\n \"\"\"\n Options applied to a type. All attributes are optional.\n\n Take special care with the difference between numeric and date/time\n types w.r.t. precision. The attribute has a different meaning\n depending on the type to which it's being applied.\n\n Attributes:\n precision: For numeric types, the number of significant digits.\n For date/time types, the number of fractional digits.\n scale: For numeric types, the number of fractional digits.\n fields: Which time fields are stored. See Postgres docs.\n length: The maximum length of a character-type field.\n item_type: The member type for arrays.\n \"\"\"\n precision: int\n scale: int\n fields: str\n length: int\n item_type: str\n\n @classmethod\n def from_dict(cls, type_options):\n if type_options is None:\n return\n # All keys are optional, but we want to validate the keys we\n # actually return.\n all_keys = dict(\n precision=type_options.get(\"precision\"),\n scale=type_options.get(\"scale\"),\n fields=type_options.get(\"fields\"),\n length=type_options.get(\"length\"),\n item_type=type_options.get(\"item_type\"),\n )\n reduced_keys = {k: v for k, v in all_keys.items() if v is not None}\n if reduced_keys != {}:\n return cls(**reduced_keys)\n\n\nclass ColumnDefault(TypedDict):\n \"\"\"\n A dictionary describing the default value for a column.\n\n Attributes:\n value: An SQL expression giving the default value.\n is_dynamic: Whether the `value` is possibly dynamic.\n \"\"\"\n value: str\n is_dynamic: bool\n\n @classmethod\n def from_dict(cls, col_default):\n if col_default is not None:\n return cls(\n value=col_default[\"value\"],\n is_dynamic=col_default[\"is_dynamic\"],\n )\n\n\nclass ColumnInfo(TypedDict):\n \"\"\"\n Information about a column.\n\n Attributes:\n id: The `attnum` of the column in the table.\n name: The name of the column.\n type: The type of the column on the database.\n type_options: The options applied to the column type.\n nullable: Whether or not the column is nullable.\n primary_key: Whether the column is in the primary key.\n default: The default value and whether it's dynamic.\n has_dependents: Whether the column has dependent objects.\n description: The description of the column.\n \"\"\"\n id: int\n name: str\n type: str\n type_options: TypeOptions\n nullable: bool\n primary_key: bool\n default: ColumnDefault\n has_dependents: bool\n description: str\n\n @classmethod\n def from_dict(cls, col_info):\n return cls(\n id=col_info[\"id\"],\n name=col_info[\"name\"],\n type=col_info[\"type\"],\n type_options=TypeOptions.from_dict(col_info.get(\"type_options\")),\n nullable=col_info[\"nullable\"],\n primary_key=col_info[\"primary_key\"],\n default=ColumnDefault.from_dict(col_info.get(\"default\")),\n has_dependents=col_info[\"has_dependents\"],\n description=col_info.get(\"description\")\n )\n\n\nclass ColumnListReturn(TypedDict):\n \"\"\"\n Information about the columns of a table.\n\n Attributes:\n column_info: Column information from the user's database.\n display_options: Display metadata managed by Mathesar.\n \"\"\"\n column_info: list[ColumnInfo]\n display_options: list[dict]\n\n\n@rpc_method(name=\"columns.list\")\n@http_basic_auth_login_required\n@handle_rpc_exceptions\ndef list_(*, table_oid: int, database_id: int, **kwargs) -> ColumnListReturn:\n \"\"\"\n List information about columns for a table. Exposed as `list`.\n\n Also return display options for each column, if they're defined.\n\n Args:\n table_oid: Identity of the table in the user's database.\n database_id: The Django id of the database containing the table.\n\n Returns:\n A list of column details, and a separate list of display options.\n \"\"\"\n user = kwargs.get(REQUEST_KEY).user\n with connect(database_id, user) as conn:\n raw_column_info = get_column_info_for_table(table_oid, conn)\n column_info, attnums = tuple(\n zip(\n *[(ColumnInfo.from_dict(col), col['id']) for col in raw_column_info]\n )\n )\n display_options = get_raw_display_options(\n database_id, table_oid, attnums, user\n )\n return ColumnListReturn(\n column_info=column_info,\n display_options=display_options,\n )\n", "path": "mathesar/rpc/columns.py"}, {"content": "\"\"\"The function in this module wraps SQL functions that drop columns.\"\"\"\nfrom db import connection as db_conn\n\n\ndef drop_column(table_oid, column_attnum, engine):\n \"\"\"\n Drop the given columns from the given table.\n\n Args:\n table_oid: OID of the table whose columns we'll drop.\n column_attnum: The attnums of the columns to drop.\n engine: SQLAlchemy engine object for connecting.\n\n Returns:\n Returns a string giving the command that was run.\n \"\"\"\n return db_conn.execute_msar_func_with_engine(\n engine, 'drop_columns', table_oid, column_attnum\n ).fetchone()[0]\n", "path": "db/columns/operations/drop.py"}], "after_files": [{"content": "import os\nfrom db.connection import load_file_with_engine\n\nFILE_DIR = os.path.abspath(os.path.dirname(__file__))\nMSAR_SQL = os.path.join(FILE_DIR, '00_msar.sql')\nMSAR_AGGREGATE_SQL = os.path.join(FILE_DIR, '30_msar_custom_aggregates.sql')\n\n\ndef install(engine):\n \"\"\"Install SQL pieces using the given engine.\"\"\"\n with open(MSAR_SQL) as file_handle:\n load_file_with_engine(engine, file_handle)\n with open(MSAR_AGGREGATE_SQL) as custom_aggregates:\n load_file_with_engine(engine, custom_aggregates)\n", "path": "db/sql/install.py"}, {"content": "\"\"\"\nClasses and functions exposed to the RPC endpoint for managing table columns.\n\"\"\"\nfrom typing import TypedDict\n\nfrom modernrpc.core import rpc_method, REQUEST_KEY\nfrom modernrpc.auth.basic import http_basic_auth_login_required\n\nfrom db.columns.operations.select import get_column_info_for_table\nfrom db.columns.operations.drop import drop_columns_from_table\nfrom mathesar.rpc.exceptions.handlers import handle_rpc_exceptions\nfrom mathesar.rpc.utils import connect\nfrom mathesar.utils.columns import get_raw_display_options\n\n\nclass TypeOptions(TypedDict, total=False):\n \"\"\"\n Options applied to a type. All attributes are optional.\n\n Take special care with the difference between numeric and date/time\n types w.r.t. precision. The attribute has a different meaning\n depending on the type to which it's being applied.\n\n Attributes:\n precision: For numeric types, the number of significant digits.\n For date/time types, the number of fractional digits.\n scale: For numeric types, the number of fractional digits.\n fields: Which time fields are stored. See Postgres docs.\n length: The maximum length of a character-type field.\n item_type: The member type for arrays.\n \"\"\"\n precision: int\n scale: int\n fields: str\n length: int\n item_type: str\n\n @classmethod\n def from_dict(cls, type_options):\n if type_options is None:\n return\n # All keys are optional, but we want to validate the keys we\n # actually return.\n all_keys = dict(\n precision=type_options.get(\"precision\"),\n scale=type_options.get(\"scale\"),\n fields=type_options.get(\"fields\"),\n length=type_options.get(\"length\"),\n item_type=type_options.get(\"item_type\"),\n )\n reduced_keys = {k: v for k, v in all_keys.items() if v is not None}\n if reduced_keys != {}:\n return cls(**reduced_keys)\n\n\nclass ColumnDefault(TypedDict):\n \"\"\"\n A dictionary describing the default value for a column.\n\n Attributes:\n value: An SQL expression giving the default value.\n is_dynamic: Whether the `value` is possibly dynamic.\n \"\"\"\n value: str\n is_dynamic: bool\n\n @classmethod\n def from_dict(cls, col_default):\n if col_default is not None:\n return cls(\n value=col_default[\"value\"],\n is_dynamic=col_default[\"is_dynamic\"],\n )\n\n\nclass ColumnInfo(TypedDict):\n \"\"\"\n Information about a column.\n\n Attributes:\n id: The `attnum` of the column in the table.\n name: The name of the column.\n type: The type of the column on the database.\n type_options: The options applied to the column type.\n nullable: Whether or not the column is nullable.\n primary_key: Whether the column is in the primary key.\n default: The default value and whether it's dynamic.\n has_dependents: Whether the column has dependent objects.\n description: The description of the column.\n \"\"\"\n id: int\n name: str\n type: str\n type_options: TypeOptions\n nullable: bool\n primary_key: bool\n default: ColumnDefault\n has_dependents: bool\n description: str\n\n @classmethod\n def from_dict(cls, col_info):\n return cls(\n id=col_info[\"id\"],\n name=col_info[\"name\"],\n type=col_info[\"type\"],\n type_options=TypeOptions.from_dict(col_info.get(\"type_options\")),\n nullable=col_info[\"nullable\"],\n primary_key=col_info[\"primary_key\"],\n default=ColumnDefault.from_dict(col_info.get(\"default\")),\n has_dependents=col_info[\"has_dependents\"],\n description=col_info.get(\"description\")\n )\n\n\nclass ColumnListReturn(TypedDict):\n \"\"\"\n Information about the columns of a table.\n\n Attributes:\n column_info: Column information from the user's database.\n display_options: Display metadata managed by Mathesar.\n \"\"\"\n column_info: list[ColumnInfo]\n display_options: list[dict]\n\n\n@rpc_method(name=\"columns.list\")\n@http_basic_auth_login_required\n@handle_rpc_exceptions\ndef list_(*, table_oid: int, database_id: int, **kwargs) -> ColumnListReturn:\n \"\"\"\n List information about columns for a table. Exposed as `list`.\n\n Also return display options for each column, if they're defined.\n\n Args:\n table_oid: Identity of the table in the user's database.\n database_id: The Django id of the database containing the table.\n\n Returns:\n A list of column details, and a separate list of display options.\n \"\"\"\n user = kwargs.get(REQUEST_KEY).user\n with connect(database_id, user) as conn:\n raw_column_info = get_column_info_for_table(table_oid, conn)\n column_info, attnums = tuple(\n zip(\n *[(ColumnInfo.from_dict(col), col['id']) for col in raw_column_info]\n )\n )\n display_options = get_raw_display_options(\n database_id, table_oid, attnums, user\n )\n return ColumnListReturn(\n column_info=column_info,\n display_options=display_options,\n )\n\n\n@rpc_method(name=\"columns.delete\")\n@http_basic_auth_login_required\n@handle_rpc_exceptions\ndef delete(\n *, column_attnums: list[int], table_oid: int, database_id: int, **kwargs\n) -> int:\n \"\"\"\n Delete columns from a table.\n\n Args:\n column_attnums: A list of attnums of columns to delete.\n table_oid: Identity of the table in the user's database.\n database_id: The Django id of the database containing the table.\n\n Returns:\n The number of columns dropped.\n \"\"\"\n user = kwargs.get(REQUEST_KEY).user\n with connect(database_id, user) as conn:\n return drop_columns_from_table(table_oid, column_attnums, conn)\n", "path": "mathesar/rpc/columns.py"}, {"content": "\"\"\"The functions in this module wrap SQL functions that drop columns.\"\"\"\nfrom db import connection as db_conn\n\n\ndef drop_column(table_oid, column_attnum, engine):\n \"\"\"\n Drop the given column from the given table.\n\n Args:\n table_oid: OID of the table whose column we'll drop.\n column_attnum: The attnum of the column to drop.\n engine: SQLAlchemy engine object for connecting.\n\n Returns:\n Returns a string giving the command that was run.\n \"\"\"\n return db_conn.execute_msar_func_with_engine(\n engine, 'drop_columns', table_oid, column_attnum\n ).fetchone()[0]\n\n\ndef drop_columns_from_table(table_oid, column_attnums, conn):\n \"\"\"\n Drop the given columns from the given table.\n\n Args:\n table_oid: OID of the table whose columns we'll drop.\n column_attnums: The attnums of the columns to drop.\n conn: A psycopg connection to the relevant database.\n \"\"\"\n return db_conn.exec_msar_func(\n conn, 'drop_columns', table_oid, *column_attnums\n ).fetchone()[0]\n", "path": "db/columns/operations/drop.py"}]} | 2,127 | 847 |
gh_patches_debug_31953 | rasdani/github-patches | git_diff | goauthentik__authentik-5927 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
LDAP Source Bind from Federation & Social login is Broken Since Commit 1ca8feb
**Describe the bug**
LDAP Sources are broken since commit 1ca8feb. This is due to a double bind that is attempted on the Connection object. The second bind consistently causes a LDAPInvalidCredentailResult exception even if it is successful the first bind. I found this bug after updating Authentik to 2023.5.3 wondering why LDAP logins no longer worked. The LDAP backend is consistently changing passwords due to TOTP so a cached password was never hit always forcing a LDAP bind. Which is what made me find this issue.
**To Reproduce**
1. Set up a LDAP Source that works
2. Login in as any user using the LDAP Password for that users THATS NOT CACHED
3. User Always gets Invalid Credentials
**Expected behavior**
User should successfully login
**Logs**
None Provided
**Version and Deployment (please complete the following information):**
- authentik version: 2023.5.3
- Deployment: docker-compose
**Additional context**
I confirmed this bug on my own stack by creating multiple logging lines using LOGGER and checking DEBUG output. For some reason a second bind always causes this Exception with Authentik if the first was successful. I'm not sure if the state is reset per successful connection which is causing this. But by removing the extra bind performed in /authentik/sources/ldap/auth.py I no longer get erroneous invalid credential exceptions.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `authentik/sources/ldap/models.py`
Content:
```
1 """authentik LDAP Models"""
2 from ssl import CERT_REQUIRED
3 from typing import Optional
4
5 from django.db import models
6 from django.utils.translation import gettext_lazy as _
7 from ldap3 import ALL, NONE, RANDOM, Connection, Server, ServerPool, Tls
8 from ldap3.core.exceptions import LDAPSchemaError
9 from rest_framework.serializers import Serializer
10
11 from authentik.core.models import Group, PropertyMapping, Source
12 from authentik.crypto.models import CertificateKeyPair
13 from authentik.lib.config import CONFIG
14 from authentik.lib.models import DomainlessURLValidator
15
16 LDAP_TIMEOUT = 15
17
18
19 class MultiURLValidator(DomainlessURLValidator):
20 """Same as DomainlessURLValidator but supports multiple URLs separated with a comma."""
21
22 def __call__(self, value: str):
23 if "," in value:
24 for url in value.split(","):
25 super().__call__(url)
26 else:
27 super().__call__(value)
28
29
30 class LDAPSource(Source):
31 """Federate LDAP Directory with authentik, or create new accounts in LDAP."""
32
33 server_uri = models.TextField(
34 validators=[MultiURLValidator(schemes=["ldap", "ldaps"])],
35 verbose_name=_("Server URI"),
36 )
37 peer_certificate = models.ForeignKey(
38 CertificateKeyPair,
39 on_delete=models.SET_DEFAULT,
40 default=None,
41 null=True,
42 help_text=_(
43 "Optionally verify the LDAP Server's Certificate against the CA Chain in this keypair."
44 ),
45 )
46
47 bind_cn = models.TextField(verbose_name=_("Bind CN"), blank=True)
48 bind_password = models.TextField(blank=True)
49 start_tls = models.BooleanField(default=False, verbose_name=_("Enable Start TLS"))
50
51 base_dn = models.TextField(verbose_name=_("Base DN"))
52 additional_user_dn = models.TextField(
53 help_text=_("Prepended to Base DN for User-queries."),
54 verbose_name=_("Addition User DN"),
55 blank=True,
56 )
57 additional_group_dn = models.TextField(
58 help_text=_("Prepended to Base DN for Group-queries."),
59 verbose_name=_("Addition Group DN"),
60 blank=True,
61 )
62
63 user_object_filter = models.TextField(
64 default="(objectClass=person)",
65 help_text=_("Consider Objects matching this filter to be Users."),
66 )
67 group_membership_field = models.TextField(
68 default="member", help_text=_("Field which contains members of a group.")
69 )
70 group_object_filter = models.TextField(
71 default="(objectClass=group)",
72 help_text=_("Consider Objects matching this filter to be Groups."),
73 )
74 object_uniqueness_field = models.TextField(
75 default="objectSid", help_text=_("Field which contains a unique Identifier.")
76 )
77
78 property_mappings_group = models.ManyToManyField(
79 PropertyMapping,
80 default=None,
81 blank=True,
82 help_text=_("Property mappings used for group creation/updating."),
83 )
84
85 sync_users = models.BooleanField(default=True)
86 sync_users_password = models.BooleanField(
87 default=True,
88 help_text=_(
89 "When a user changes their password, sync it back to LDAP. "
90 "This can only be enabled on a single LDAP source."
91 ),
92 )
93 sync_groups = models.BooleanField(default=True)
94 sync_parent_group = models.ForeignKey(
95 Group, blank=True, null=True, default=None, on_delete=models.SET_DEFAULT
96 )
97
98 @property
99 def component(self) -> str:
100 return "ak-source-ldap-form"
101
102 @property
103 def serializer(self) -> type[Serializer]:
104 from authentik.sources.ldap.api import LDAPSourceSerializer
105
106 return LDAPSourceSerializer
107
108 def server(self, **kwargs) -> Server:
109 """Get LDAP Server/ServerPool"""
110 servers = []
111 tls_kwargs = {}
112 if self.peer_certificate:
113 tls_kwargs["ca_certs_data"] = self.peer_certificate.certificate_data
114 tls_kwargs["validate"] = CERT_REQUIRED
115 if ciphers := CONFIG.y("ldap.tls.ciphers", None):
116 tls_kwargs["ciphers"] = ciphers.strip()
117 server_kwargs = {
118 "get_info": ALL,
119 "connect_timeout": LDAP_TIMEOUT,
120 "tls": Tls(**tls_kwargs),
121 }
122 server_kwargs.update(kwargs)
123 if "," in self.server_uri:
124 for server in self.server_uri.split(","):
125 servers.append(Server(server, **server_kwargs))
126 else:
127 servers = [Server(self.server_uri, **server_kwargs)]
128 return ServerPool(servers, RANDOM, active=True, exhaust=True)
129
130 def connection(
131 self, server_kwargs: Optional[dict] = None, connection_kwargs: Optional[dict] = None
132 ) -> Connection:
133 """Get a fully connected and bound LDAP Connection"""
134 server_kwargs = server_kwargs or {}
135 connection_kwargs = connection_kwargs or {}
136 connection_kwargs.setdefault("user", self.bind_cn)
137 connection_kwargs.setdefault("password", self.bind_password)
138 connection = Connection(
139 self.server(**server_kwargs),
140 raise_exceptions=True,
141 receive_timeout=LDAP_TIMEOUT,
142 **connection_kwargs,
143 )
144
145 if self.start_tls:
146 connection.start_tls(read_server_info=False)
147 try:
148 connection.bind()
149 except LDAPSchemaError as exc:
150 # Schema error, so try connecting without schema info
151 # See https://github.com/goauthentik/authentik/issues/4590
152 if server_kwargs.get("get_info", ALL) == NONE:
153 raise exc
154 server_kwargs["get_info"] = NONE
155 return self.connection(server_kwargs, connection_kwargs)
156 return connection
157
158 class Meta:
159 verbose_name = _("LDAP Source")
160 verbose_name_plural = _("LDAP Sources")
161
162
163 class LDAPPropertyMapping(PropertyMapping):
164 """Map LDAP Property to User or Group object attribute"""
165
166 object_field = models.TextField()
167
168 @property
169 def component(self) -> str:
170 return "ak-property-mapping-ldap-form"
171
172 @property
173 def serializer(self) -> type[Serializer]:
174 from authentik.sources.ldap.api import LDAPPropertyMappingSerializer
175
176 return LDAPPropertyMappingSerializer
177
178 def __str__(self):
179 return str(self.name)
180
181 class Meta:
182 verbose_name = _("LDAP Property Mapping")
183 verbose_name_plural = _("LDAP Property Mappings")
184
```
Path: `authentik/sources/ldap/auth.py`
Content:
```
1 """authentik LDAP Authentication Backend"""
2 from typing import Optional
3
4 from django.http import HttpRequest
5 from ldap3.core.exceptions import LDAPException, LDAPInvalidCredentialsResult
6 from structlog.stdlib import get_logger
7
8 from authentik.core.auth import InbuiltBackend
9 from authentik.core.models import User
10 from authentik.sources.ldap.models import LDAPSource
11
12 LOGGER = get_logger()
13 LDAP_DISTINGUISHED_NAME = "distinguishedName"
14
15
16 class LDAPBackend(InbuiltBackend):
17 """Authenticate users against LDAP Server"""
18
19 def authenticate(self, request: HttpRequest, **kwargs):
20 """Try to authenticate a user via ldap"""
21 if "password" not in kwargs:
22 return None
23 for source in LDAPSource.objects.filter(enabled=True):
24 LOGGER.debug("LDAP Auth attempt", source=source)
25 user = self.auth_user(source, **kwargs)
26 if user:
27 self.set_method("ldap", request, source=source)
28 return user
29 return None
30
31 def auth_user(self, source: LDAPSource, password: str, **filters: str) -> Optional[User]:
32 """Try to bind as either user_dn or mail with password.
33 Returns True on success, otherwise False"""
34 users = User.objects.filter(**filters)
35 if not users.exists():
36 return None
37 user: User = users.first()
38 if LDAP_DISTINGUISHED_NAME not in user.attributes:
39 LOGGER.debug("User doesn't have DN set, assuming not LDAP imported.", user=user)
40 return None
41 # Either has unusable password,
42 # or has a password, but couldn't be authenticated by ModelBackend.
43 # This means we check with a bind to see if the LDAP password has changed
44 if self.auth_user_by_bind(source, user, password):
45 # Password given successfully binds to LDAP, so we save it in our Database
46 LOGGER.debug("Updating user's password in DB", user=user)
47 user.set_password(password, signal=False)
48 user.save()
49 return user
50 # Password doesn't match
51 LOGGER.debug("Failed to bind, password invalid")
52 return None
53
54 def auth_user_by_bind(self, source: LDAPSource, user: User, password: str) -> Optional[User]:
55 """Attempt authentication by binding to the LDAP server as `user`. This
56 method should be avoided as its slow to do the bind."""
57 # Try to bind as new user
58 LOGGER.debug("Attempting to bind as user", user=user)
59 try:
60 temp_connection = source.connection(
61 connection_kwargs={
62 "user": user.attributes.get(LDAP_DISTINGUISHED_NAME),
63 "password": password,
64 }
65 )
66 temp_connection.bind()
67 return user
68 except LDAPInvalidCredentialsResult as exc:
69 LOGGER.debug("invalid LDAP credentials", user=user, exc=exc)
70 except LDAPException as exc:
71 LOGGER.warning("failed to bind to LDAP", exc=exc)
72 return None
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/authentik/sources/ldap/auth.py b/authentik/sources/ldap/auth.py
--- a/authentik/sources/ldap/auth.py
+++ b/authentik/sources/ldap/auth.py
@@ -57,13 +57,13 @@
# Try to bind as new user
LOGGER.debug("Attempting to bind as user", user=user)
try:
- temp_connection = source.connection(
+ # source.connection also attempts to bind
+ source.connection(
connection_kwargs={
"user": user.attributes.get(LDAP_DISTINGUISHED_NAME),
"password": password,
}
)
- temp_connection.bind()
return user
except LDAPInvalidCredentialsResult as exc:
LOGGER.debug("invalid LDAP credentials", user=user, exc=exc)
diff --git a/authentik/sources/ldap/models.py b/authentik/sources/ldap/models.py
--- a/authentik/sources/ldap/models.py
+++ b/authentik/sources/ldap/models.py
@@ -145,7 +145,9 @@
if self.start_tls:
connection.start_tls(read_server_info=False)
try:
- connection.bind()
+ successful = connection.bind()
+ if successful:
+ return connection
except LDAPSchemaError as exc:
# Schema error, so try connecting without schema info
# See https://github.com/goauthentik/authentik/issues/4590
@@ -153,7 +155,7 @@
raise exc
server_kwargs["get_info"] = NONE
return self.connection(server_kwargs, connection_kwargs)
- return connection
+ return RuntimeError("Failed to bind")
class Meta:
verbose_name = _("LDAP Source")
| {"golden_diff": "diff --git a/authentik/sources/ldap/auth.py b/authentik/sources/ldap/auth.py\n--- a/authentik/sources/ldap/auth.py\n+++ b/authentik/sources/ldap/auth.py\n@@ -57,13 +57,13 @@\n # Try to bind as new user\n LOGGER.debug(\"Attempting to bind as user\", user=user)\n try:\n- temp_connection = source.connection(\n+ # source.connection also attempts to bind\n+ source.connection(\n connection_kwargs={\n \"user\": user.attributes.get(LDAP_DISTINGUISHED_NAME),\n \"password\": password,\n }\n )\n- temp_connection.bind()\n return user\n except LDAPInvalidCredentialsResult as exc:\n LOGGER.debug(\"invalid LDAP credentials\", user=user, exc=exc)\ndiff --git a/authentik/sources/ldap/models.py b/authentik/sources/ldap/models.py\n--- a/authentik/sources/ldap/models.py\n+++ b/authentik/sources/ldap/models.py\n@@ -145,7 +145,9 @@\n if self.start_tls:\n connection.start_tls(read_server_info=False)\n try:\n- connection.bind()\n+ successful = connection.bind()\n+ if successful:\n+ return connection\n except LDAPSchemaError as exc:\n # Schema error, so try connecting without schema info\n # See https://github.com/goauthentik/authentik/issues/4590\n@@ -153,7 +155,7 @@\n raise exc\n server_kwargs[\"get_info\"] = NONE\n return self.connection(server_kwargs, connection_kwargs)\n- return connection\n+ return RuntimeError(\"Failed to bind\")\n \n class Meta:\n verbose_name = _(\"LDAP Source\")\n", "issue": "LDAP Source Bind from Federation & Social login is Broken Since Commit 1ca8feb\n**Describe the bug**\r\nLDAP Sources are broken since commit 1ca8feb. This is due to a double bind that is attempted on the Connection object. The second bind consistently causes a LDAPInvalidCredentailResult exception even if it is successful the first bind. I found this bug after updating Authentik to 2023.5.3 wondering why LDAP logins no longer worked. The LDAP backend is consistently changing passwords due to TOTP so a cached password was never hit always forcing a LDAP bind. Which is what made me find this issue.\r\n\r\n**To Reproduce**\r\n1. Set up a LDAP Source that works\r\n2. Login in as any user using the LDAP Password for that users THATS NOT CACHED\r\n3. User Always gets Invalid Credentials\r\n\r\n**Expected behavior**\r\nUser should successfully login \r\n\r\n**Logs**\r\nNone Provided\r\n\r\n**Version and Deployment (please complete the following information):**\r\n\r\n- authentik version: 2023.5.3\r\n- Deployment: docker-compose\r\n\r\n\r\n**Additional context**\r\nI confirmed this bug on my own stack by creating multiple logging lines using LOGGER and checking DEBUG output. For some reason a second bind always causes this Exception with Authentik if the first was successful. I'm not sure if the state is reset per successful connection which is causing this. But by removing the extra bind performed in /authentik/sources/ldap/auth.py I no longer get erroneous invalid credential exceptions.\r\n\n", "before_files": [{"content": "\"\"\"authentik LDAP Models\"\"\"\nfrom ssl import CERT_REQUIRED\nfrom typing import Optional\n\nfrom django.db import models\nfrom django.utils.translation import gettext_lazy as _\nfrom ldap3 import ALL, NONE, RANDOM, Connection, Server, ServerPool, Tls\nfrom ldap3.core.exceptions import LDAPSchemaError\nfrom rest_framework.serializers import Serializer\n\nfrom authentik.core.models import Group, PropertyMapping, Source\nfrom authentik.crypto.models import CertificateKeyPair\nfrom authentik.lib.config import CONFIG\nfrom authentik.lib.models import DomainlessURLValidator\n\nLDAP_TIMEOUT = 15\n\n\nclass MultiURLValidator(DomainlessURLValidator):\n \"\"\"Same as DomainlessURLValidator but supports multiple URLs separated with a comma.\"\"\"\n\n def __call__(self, value: str):\n if \",\" in value:\n for url in value.split(\",\"):\n super().__call__(url)\n else:\n super().__call__(value)\n\n\nclass LDAPSource(Source):\n \"\"\"Federate LDAP Directory with authentik, or create new accounts in LDAP.\"\"\"\n\n server_uri = models.TextField(\n validators=[MultiURLValidator(schemes=[\"ldap\", \"ldaps\"])],\n verbose_name=_(\"Server URI\"),\n )\n peer_certificate = models.ForeignKey(\n CertificateKeyPair,\n on_delete=models.SET_DEFAULT,\n default=None,\n null=True,\n help_text=_(\n \"Optionally verify the LDAP Server's Certificate against the CA Chain in this keypair.\"\n ),\n )\n\n bind_cn = models.TextField(verbose_name=_(\"Bind CN\"), blank=True)\n bind_password = models.TextField(blank=True)\n start_tls = models.BooleanField(default=False, verbose_name=_(\"Enable Start TLS\"))\n\n base_dn = models.TextField(verbose_name=_(\"Base DN\"))\n additional_user_dn = models.TextField(\n help_text=_(\"Prepended to Base DN for User-queries.\"),\n verbose_name=_(\"Addition User DN\"),\n blank=True,\n )\n additional_group_dn = models.TextField(\n help_text=_(\"Prepended to Base DN for Group-queries.\"),\n verbose_name=_(\"Addition Group DN\"),\n blank=True,\n )\n\n user_object_filter = models.TextField(\n default=\"(objectClass=person)\",\n help_text=_(\"Consider Objects matching this filter to be Users.\"),\n )\n group_membership_field = models.TextField(\n default=\"member\", help_text=_(\"Field which contains members of a group.\")\n )\n group_object_filter = models.TextField(\n default=\"(objectClass=group)\",\n help_text=_(\"Consider Objects matching this filter to be Groups.\"),\n )\n object_uniqueness_field = models.TextField(\n default=\"objectSid\", help_text=_(\"Field which contains a unique Identifier.\")\n )\n\n property_mappings_group = models.ManyToManyField(\n PropertyMapping,\n default=None,\n blank=True,\n help_text=_(\"Property mappings used for group creation/updating.\"),\n )\n\n sync_users = models.BooleanField(default=True)\n sync_users_password = models.BooleanField(\n default=True,\n help_text=_(\n \"When a user changes their password, sync it back to LDAP. \"\n \"This can only be enabled on a single LDAP source.\"\n ),\n )\n sync_groups = models.BooleanField(default=True)\n sync_parent_group = models.ForeignKey(\n Group, blank=True, null=True, default=None, on_delete=models.SET_DEFAULT\n )\n\n @property\n def component(self) -> str:\n return \"ak-source-ldap-form\"\n\n @property\n def serializer(self) -> type[Serializer]:\n from authentik.sources.ldap.api import LDAPSourceSerializer\n\n return LDAPSourceSerializer\n\n def server(self, **kwargs) -> Server:\n \"\"\"Get LDAP Server/ServerPool\"\"\"\n servers = []\n tls_kwargs = {}\n if self.peer_certificate:\n tls_kwargs[\"ca_certs_data\"] = self.peer_certificate.certificate_data\n tls_kwargs[\"validate\"] = CERT_REQUIRED\n if ciphers := CONFIG.y(\"ldap.tls.ciphers\", None):\n tls_kwargs[\"ciphers\"] = ciphers.strip()\n server_kwargs = {\n \"get_info\": ALL,\n \"connect_timeout\": LDAP_TIMEOUT,\n \"tls\": Tls(**tls_kwargs),\n }\n server_kwargs.update(kwargs)\n if \",\" in self.server_uri:\n for server in self.server_uri.split(\",\"):\n servers.append(Server(server, **server_kwargs))\n else:\n servers = [Server(self.server_uri, **server_kwargs)]\n return ServerPool(servers, RANDOM, active=True, exhaust=True)\n\n def connection(\n self, server_kwargs: Optional[dict] = None, connection_kwargs: Optional[dict] = None\n ) -> Connection:\n \"\"\"Get a fully connected and bound LDAP Connection\"\"\"\n server_kwargs = server_kwargs or {}\n connection_kwargs = connection_kwargs or {}\n connection_kwargs.setdefault(\"user\", self.bind_cn)\n connection_kwargs.setdefault(\"password\", self.bind_password)\n connection = Connection(\n self.server(**server_kwargs),\n raise_exceptions=True,\n receive_timeout=LDAP_TIMEOUT,\n **connection_kwargs,\n )\n\n if self.start_tls:\n connection.start_tls(read_server_info=False)\n try:\n connection.bind()\n except LDAPSchemaError as exc:\n # Schema error, so try connecting without schema info\n # See https://github.com/goauthentik/authentik/issues/4590\n if server_kwargs.get(\"get_info\", ALL) == NONE:\n raise exc\n server_kwargs[\"get_info\"] = NONE\n return self.connection(server_kwargs, connection_kwargs)\n return connection\n\n class Meta:\n verbose_name = _(\"LDAP Source\")\n verbose_name_plural = _(\"LDAP Sources\")\n\n\nclass LDAPPropertyMapping(PropertyMapping):\n \"\"\"Map LDAP Property to User or Group object attribute\"\"\"\n\n object_field = models.TextField()\n\n @property\n def component(self) -> str:\n return \"ak-property-mapping-ldap-form\"\n\n @property\n def serializer(self) -> type[Serializer]:\n from authentik.sources.ldap.api import LDAPPropertyMappingSerializer\n\n return LDAPPropertyMappingSerializer\n\n def __str__(self):\n return str(self.name)\n\n class Meta:\n verbose_name = _(\"LDAP Property Mapping\")\n verbose_name_plural = _(\"LDAP Property Mappings\")\n", "path": "authentik/sources/ldap/models.py"}, {"content": "\"\"\"authentik LDAP Authentication Backend\"\"\"\nfrom typing import Optional\n\nfrom django.http import HttpRequest\nfrom ldap3.core.exceptions import LDAPException, LDAPInvalidCredentialsResult\nfrom structlog.stdlib import get_logger\n\nfrom authentik.core.auth import InbuiltBackend\nfrom authentik.core.models import User\nfrom authentik.sources.ldap.models import LDAPSource\n\nLOGGER = get_logger()\nLDAP_DISTINGUISHED_NAME = \"distinguishedName\"\n\n\nclass LDAPBackend(InbuiltBackend):\n \"\"\"Authenticate users against LDAP Server\"\"\"\n\n def authenticate(self, request: HttpRequest, **kwargs):\n \"\"\"Try to authenticate a user via ldap\"\"\"\n if \"password\" not in kwargs:\n return None\n for source in LDAPSource.objects.filter(enabled=True):\n LOGGER.debug(\"LDAP Auth attempt\", source=source)\n user = self.auth_user(source, **kwargs)\n if user:\n self.set_method(\"ldap\", request, source=source)\n return user\n return None\n\n def auth_user(self, source: LDAPSource, password: str, **filters: str) -> Optional[User]:\n \"\"\"Try to bind as either user_dn or mail with password.\n Returns True on success, otherwise False\"\"\"\n users = User.objects.filter(**filters)\n if not users.exists():\n return None\n user: User = users.first()\n if LDAP_DISTINGUISHED_NAME not in user.attributes:\n LOGGER.debug(\"User doesn't have DN set, assuming not LDAP imported.\", user=user)\n return None\n # Either has unusable password,\n # or has a password, but couldn't be authenticated by ModelBackend.\n # This means we check with a bind to see if the LDAP password has changed\n if self.auth_user_by_bind(source, user, password):\n # Password given successfully binds to LDAP, so we save it in our Database\n LOGGER.debug(\"Updating user's password in DB\", user=user)\n user.set_password(password, signal=False)\n user.save()\n return user\n # Password doesn't match\n LOGGER.debug(\"Failed to bind, password invalid\")\n return None\n\n def auth_user_by_bind(self, source: LDAPSource, user: User, password: str) -> Optional[User]:\n \"\"\"Attempt authentication by binding to the LDAP server as `user`. This\n method should be avoided as its slow to do the bind.\"\"\"\n # Try to bind as new user\n LOGGER.debug(\"Attempting to bind as user\", user=user)\n try:\n temp_connection = source.connection(\n connection_kwargs={\n \"user\": user.attributes.get(LDAP_DISTINGUISHED_NAME),\n \"password\": password,\n }\n )\n temp_connection.bind()\n return user\n except LDAPInvalidCredentialsResult as exc:\n LOGGER.debug(\"invalid LDAP credentials\", user=user, exc=exc)\n except LDAPException as exc:\n LOGGER.warning(\"failed to bind to LDAP\", exc=exc)\n return None\n", "path": "authentik/sources/ldap/auth.py"}], "after_files": [{"content": "\"\"\"authentik LDAP Models\"\"\"\nfrom ssl import CERT_REQUIRED\nfrom typing import Optional\n\nfrom django.db import models\nfrom django.utils.translation import gettext_lazy as _\nfrom ldap3 import ALL, NONE, RANDOM, Connection, Server, ServerPool, Tls\nfrom ldap3.core.exceptions import LDAPSchemaError\nfrom rest_framework.serializers import Serializer\n\nfrom authentik.core.models import Group, PropertyMapping, Source\nfrom authentik.crypto.models import CertificateKeyPair\nfrom authentik.lib.config import CONFIG\nfrom authentik.lib.models import DomainlessURLValidator\n\nLDAP_TIMEOUT = 15\n\n\nclass MultiURLValidator(DomainlessURLValidator):\n \"\"\"Same as DomainlessURLValidator but supports multiple URLs separated with a comma.\"\"\"\n\n def __call__(self, value: str):\n if \",\" in value:\n for url in value.split(\",\"):\n super().__call__(url)\n else:\n super().__call__(value)\n\n\nclass LDAPSource(Source):\n \"\"\"Federate LDAP Directory with authentik, or create new accounts in LDAP.\"\"\"\n\n server_uri = models.TextField(\n validators=[MultiURLValidator(schemes=[\"ldap\", \"ldaps\"])],\n verbose_name=_(\"Server URI\"),\n )\n peer_certificate = models.ForeignKey(\n CertificateKeyPair,\n on_delete=models.SET_DEFAULT,\n default=None,\n null=True,\n help_text=_(\n \"Optionally verify the LDAP Server's Certificate against the CA Chain in this keypair.\"\n ),\n )\n\n bind_cn = models.TextField(verbose_name=_(\"Bind CN\"), blank=True)\n bind_password = models.TextField(blank=True)\n start_tls = models.BooleanField(default=False, verbose_name=_(\"Enable Start TLS\"))\n\n base_dn = models.TextField(verbose_name=_(\"Base DN\"))\n additional_user_dn = models.TextField(\n help_text=_(\"Prepended to Base DN for User-queries.\"),\n verbose_name=_(\"Addition User DN\"),\n blank=True,\n )\n additional_group_dn = models.TextField(\n help_text=_(\"Prepended to Base DN for Group-queries.\"),\n verbose_name=_(\"Addition Group DN\"),\n blank=True,\n )\n\n user_object_filter = models.TextField(\n default=\"(objectClass=person)\",\n help_text=_(\"Consider Objects matching this filter to be Users.\"),\n )\n group_membership_field = models.TextField(\n default=\"member\", help_text=_(\"Field which contains members of a group.\")\n )\n group_object_filter = models.TextField(\n default=\"(objectClass=group)\",\n help_text=_(\"Consider Objects matching this filter to be Groups.\"),\n )\n object_uniqueness_field = models.TextField(\n default=\"objectSid\", help_text=_(\"Field which contains a unique Identifier.\")\n )\n\n property_mappings_group = models.ManyToManyField(\n PropertyMapping,\n default=None,\n blank=True,\n help_text=_(\"Property mappings used for group creation/updating.\"),\n )\n\n sync_users = models.BooleanField(default=True)\n sync_users_password = models.BooleanField(\n default=True,\n help_text=_(\n \"When a user changes their password, sync it back to LDAP. \"\n \"This can only be enabled on a single LDAP source.\"\n ),\n )\n sync_groups = models.BooleanField(default=True)\n sync_parent_group = models.ForeignKey(\n Group, blank=True, null=True, default=None, on_delete=models.SET_DEFAULT\n )\n\n @property\n def component(self) -> str:\n return \"ak-source-ldap-form\"\n\n @property\n def serializer(self) -> type[Serializer]:\n from authentik.sources.ldap.api import LDAPSourceSerializer\n\n return LDAPSourceSerializer\n\n def server(self, **kwargs) -> Server:\n \"\"\"Get LDAP Server/ServerPool\"\"\"\n servers = []\n tls_kwargs = {}\n if self.peer_certificate:\n tls_kwargs[\"ca_certs_data\"] = self.peer_certificate.certificate_data\n tls_kwargs[\"validate\"] = CERT_REQUIRED\n if ciphers := CONFIG.y(\"ldap.tls.ciphers\", None):\n tls_kwargs[\"ciphers\"] = ciphers.strip()\n server_kwargs = {\n \"get_info\": ALL,\n \"connect_timeout\": LDAP_TIMEOUT,\n \"tls\": Tls(**tls_kwargs),\n }\n server_kwargs.update(kwargs)\n if \",\" in self.server_uri:\n for server in self.server_uri.split(\",\"):\n servers.append(Server(server, **server_kwargs))\n else:\n servers = [Server(self.server_uri, **server_kwargs)]\n return ServerPool(servers, RANDOM, active=True, exhaust=True)\n\n def connection(\n self, server_kwargs: Optional[dict] = None, connection_kwargs: Optional[dict] = None\n ) -> Connection:\n \"\"\"Get a fully connected and bound LDAP Connection\"\"\"\n server_kwargs = server_kwargs or {}\n connection_kwargs = connection_kwargs or {}\n connection_kwargs.setdefault(\"user\", self.bind_cn)\n connection_kwargs.setdefault(\"password\", self.bind_password)\n connection = Connection(\n self.server(**server_kwargs),\n raise_exceptions=True,\n receive_timeout=LDAP_TIMEOUT,\n **connection_kwargs,\n )\n\n if self.start_tls:\n connection.start_tls(read_server_info=False)\n try:\n successful = connection.bind()\n if successful:\n return connection\n except LDAPSchemaError as exc:\n # Schema error, so try connecting without schema info\n # See https://github.com/goauthentik/authentik/issues/4590\n if server_kwargs.get(\"get_info\", ALL) == NONE:\n raise exc\n server_kwargs[\"get_info\"] = NONE\n return self.connection(server_kwargs, connection_kwargs)\n return RuntimeError(\"Failed to bind\")\n\n class Meta:\n verbose_name = _(\"LDAP Source\")\n verbose_name_plural = _(\"LDAP Sources\")\n\n\nclass LDAPPropertyMapping(PropertyMapping):\n \"\"\"Map LDAP Property to User or Group object attribute\"\"\"\n\n object_field = models.TextField()\n\n @property\n def component(self) -> str:\n return \"ak-property-mapping-ldap-form\"\n\n @property\n def serializer(self) -> type[Serializer]:\n from authentik.sources.ldap.api import LDAPPropertyMappingSerializer\n\n return LDAPPropertyMappingSerializer\n\n def __str__(self):\n return str(self.name)\n\n class Meta:\n verbose_name = _(\"LDAP Property Mapping\")\n verbose_name_plural = _(\"LDAP Property Mappings\")\n", "path": "authentik/sources/ldap/models.py"}, {"content": "\"\"\"authentik LDAP Authentication Backend\"\"\"\nfrom typing import Optional\n\nfrom django.http import HttpRequest\nfrom ldap3.core.exceptions import LDAPException, LDAPInvalidCredentialsResult\nfrom structlog.stdlib import get_logger\n\nfrom authentik.core.auth import InbuiltBackend\nfrom authentik.core.models import User\nfrom authentik.sources.ldap.models import LDAPSource\n\nLOGGER = get_logger()\nLDAP_DISTINGUISHED_NAME = \"distinguishedName\"\n\n\nclass LDAPBackend(InbuiltBackend):\n \"\"\"Authenticate users against LDAP Server\"\"\"\n\n def authenticate(self, request: HttpRequest, **kwargs):\n \"\"\"Try to authenticate a user via ldap\"\"\"\n if \"password\" not in kwargs:\n return None\n for source in LDAPSource.objects.filter(enabled=True):\n LOGGER.debug(\"LDAP Auth attempt\", source=source)\n user = self.auth_user(source, **kwargs)\n if user:\n self.set_method(\"ldap\", request, source=source)\n return user\n return None\n\n def auth_user(self, source: LDAPSource, password: str, **filters: str) -> Optional[User]:\n \"\"\"Try to bind as either user_dn or mail with password.\n Returns True on success, otherwise False\"\"\"\n users = User.objects.filter(**filters)\n if not users.exists():\n return None\n user: User = users.first()\n if LDAP_DISTINGUISHED_NAME not in user.attributes:\n LOGGER.debug(\"User doesn't have DN set, assuming not LDAP imported.\", user=user)\n return None\n # Either has unusable password,\n # or has a password, but couldn't be authenticated by ModelBackend.\n # This means we check with a bind to see if the LDAP password has changed\n if self.auth_user_by_bind(source, user, password):\n # Password given successfully binds to LDAP, so we save it in our Database\n LOGGER.debug(\"Updating user's password in DB\", user=user)\n user.set_password(password, signal=False)\n user.save()\n return user\n # Password doesn't match\n LOGGER.debug(\"Failed to bind, password invalid\")\n return None\n\n def auth_user_by_bind(self, source: LDAPSource, user: User, password: str) -> Optional[User]:\n \"\"\"Attempt authentication by binding to the LDAP server as `user`. This\n method should be avoided as its slow to do the bind.\"\"\"\n # Try to bind as new user\n LOGGER.debug(\"Attempting to bind as user\", user=user)\n try:\n # source.connection also attempts to bind\n source.connection(\n connection_kwargs={\n \"user\": user.attributes.get(LDAP_DISTINGUISHED_NAME),\n \"password\": password,\n }\n )\n return user\n except LDAPInvalidCredentialsResult as exc:\n LOGGER.debug(\"invalid LDAP credentials\", user=user, exc=exc)\n except LDAPException as exc:\n LOGGER.warning(\"failed to bind to LDAP\", exc=exc)\n return None\n", "path": "authentik/sources/ldap/auth.py"}]} | 3,109 | 374 |
gh_patches_debug_41083 | rasdani/github-patches | git_diff | iterative__dvc-925 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
S3 AWS: default profile is always used
Hi!
Defining a profile in dvc-config has no influence on the used profile in AWS credentials file.
I quickly took a look at the code and I never see boto3.session(profile=..) called. Or is this done using the AWS_PROFILE env variable?
(using dvc 0.14.0)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/remote/s3.py`
Content:
```
1 import os
2 import math
3 import threading
4 import posixpath
5
6 try:
7 import boto3
8 except ImportError:
9 boto3 = None
10
11 try:
12 from urlparse import urlparse
13 except ImportError:
14 from urllib.parse import urlparse
15
16 from dvc.logger import Logger
17 from dvc.progress import progress
18 from dvc.config import Config
19 from dvc.remote.base import RemoteBase
20 from dvc.remote.local import RemoteLOCAL
21 from dvc.exceptions import DvcException
22
23
24 class Callback(object):
25 def __init__(self, name, total):
26 self.name = name
27 self.total = total
28 self.current = 0
29 self.lock = threading.Lock()
30
31 def __call__(self, byts):
32 with self.lock:
33 self.current += byts
34 progress.update_target(self.name, self.current, self.total)
35
36
37 class RemoteS3(RemoteBase):
38 scheme = 's3'
39 REGEX = r'^s3://(?P<path>.*)$'
40 REQUIRES = {'boto3': boto3}
41 PARAM_ETAG = 'etag'
42
43 def __init__(self, project, config):
44 self.project = project
45 storagepath = 's3://' + config.get(Config.SECTION_AWS_STORAGEPATH, '').lstrip('/')
46 self.url = config.get(Config.SECTION_REMOTE_URL, storagepath)
47 self.region = config.get(Config.SECTION_AWS_REGION, None)
48 self.profile = config.get(Config.SECTION_AWS_PROFILE, None)
49 self.credentialpath = config.get(Config.SECTION_AWS_CREDENTIALPATH, None)
50 self.endpoint_url = config.get(Config.SECTION_AWS_ENDPOINT_URL, None)
51
52 @property
53 def bucket(self):
54 return urlparse(self.url).netloc
55
56 @property
57 def prefix(self):
58 return urlparse(self.url).path.lstrip('/')
59
60 @property
61 def s3(self):
62 return boto3.resource('s3', endpoint_url=self.endpoint_url)
63
64 @property
65 def s3_session_client(self):
66 session = boto3.session.Session()
67 s3 = session.client('s3', endpoint_url=self.endpoint_url)
68 return s3
69
70 def get_etag(self, bucket, key):
71 try:
72 obj = self.s3.Object(bucket, key).get()
73 except Exception:
74 raise DvcException('s3://{}/{} does not exist'.format(bucket, key))
75
76 return obj['ETag'].strip('"')
77
78 def save_info(self, path_info):
79 if path_info['scheme'] != 's3':
80 raise NotImplementedError
81
82 return {self.PARAM_ETAG: self.get_etag(path_info['bucket'], path_info['key'])}
83
84 def _copy(self, from_info, to_info, s3=None):
85 s3 = s3 if s3 else self.s3
86
87 source = {'Bucket': from_info['bucket'],
88 'Key': from_info['key']}
89 self.s3.Bucket(to_info['bucket']).copy(source, to_info['key'])
90
91 def save(self, path_info):
92 if path_info['scheme'] != 's3':
93 raise NotImplementedError
94
95 etag = self.get_etag(path_info['bucket'], path_info['key'])
96 key = posixpath.join(self.prefix, etag[0:2], etag[2:])
97 to_info = {'scheme': 's3', 'bucket': self.bucket, 'key': key}
98
99 self._copy(path_info, to_info)
100
101 return {self.PARAM_ETAG: etag}
102
103 def checkout(self, path_info, checksum_info):
104 if path_info['scheme'] != 's3':
105 raise NotImplementedError
106
107 etag = checksum_info.get(self.PARAM_ETAG, None)
108 if not etag:
109 return
110
111 key = posixpath.join(self.prefix, etag[0:2], etag[2:])
112 from_info = {'scheme': 's3', 'bucket': self.bucket, 'key': key}
113
114 self._copy(from_info, path_info)
115
116 def remove(self, path_info):
117 if path_info['scheme'] != 's3':
118 raise NotImplementedError
119
120 Logger.debug('Removing s3://{}/{}'.format(path_info['bucket'],
121 path_info['key']))
122
123 obj = self.s3.Object(path_info['bucket'], path_info['key'])
124 try:
125 obj.get()
126 except Exception:
127 return
128
129 obj.delete()
130
131 def md5s_to_path_infos(self, md5s):
132 return [{'scheme': self.scheme,
133 'bucket': self.bucket,
134 'key': posixpath.join(self.prefix, md5[0:2], md5[2:])} for md5 in md5s]
135
136 def exists(self, path_infos):
137 # NOTE: We mostly use exists() method when filtering a bulk of cache
138 # files to decide if we need to download/upload them and in s3
139 # list_objects_v2() is much-much faster than trying to check keys
140 # one-by-one.
141 ret = []
142 s3 = self.s3_session_client
143
144 keys = []
145 kwargs = {'Bucket': self.bucket,
146 'Prefix': self.prefix}
147 while True:
148 resp = s3.list_objects_v2(**kwargs)
149 contents = resp.get('Contents', None)
150 if not contents:
151 break
152
153 for obj in contents:
154 keys.append(obj['Key'])
155
156 token = resp.get('NextContinuationToken', None)
157 if not token:
158 break
159
160 kwargs['ContinuationToken'] = token
161
162 for path_info in path_infos:
163 exists = False
164 if path_info['key'] in keys:
165 exists = True
166 ret.append(exists)
167
168 return ret
169
170 def upload(self, from_infos, to_infos, names=None):
171 names = self._verify_path_args(to_infos, from_infos, names)
172
173 s3 = self.s3_session_client
174
175 for from_info, to_info, name in zip(from_infos, to_infos, names):
176 if to_info['scheme'] != 's3':
177 raise NotImplementedError
178
179 if from_info['scheme'] != 'local':
180 raise NotImplementedError
181
182 Logger.debug("Uploading '{}' to '{}/{}'".format(from_info['path'],
183 to_info['bucket'],
184 to_info['key']))
185
186 if not name:
187 name = os.path.basename(from_info['path'])
188
189 total = os.path.getsize(from_info['path'])
190 cb = Callback(name, total)
191
192 try:
193 s3.upload_file(from_info['path'], to_info['bucket'], to_info['key'], Callback=cb)
194 except Exception as exc:
195 Logger.error("Failed to upload '{}'".format(from_info['path']), exc)
196 continue
197
198 progress.finish_target(name)
199
200 def download(self, from_infos, to_infos, no_progress_bar=False, names=None):
201 names = self._verify_path_args(from_infos, to_infos, names)
202
203 s3 = self.s3_session_client
204
205 for to_info, from_info, name in zip(to_infos, from_infos, names):
206 if from_info['scheme'] != 's3':
207 raise NotImplementedError
208
209 if to_info['scheme'] == 's3':
210 self._copy(from_info, to_info, s3=s3)
211 continue
212
213 if to_info['scheme'] != 'local':
214 raise NotImplementedError
215
216 Logger.debug("Downloading '{}/{}' to '{}'".format(from_info['bucket'],
217 from_info['key'],
218 to_info['path']))
219
220 tmp_file = self.tmp_file(to_info['path'])
221 if not name:
222 name = os.path.basename(to_info['path'])
223
224 if no_progress_bar:
225 cb = None
226 else:
227 total = s3.head_object(Bucket=from_info['bucket'],
228 Key=from_info['key'])['ContentLength']
229 cb = Callback(name, total)
230
231 self._makedirs(to_info['path'])
232
233 try:
234 s3.download_file(from_info['bucket'], from_info['key'], tmp_file, Callback=cb)
235 except Exception as exc:
236 Logger.error("Failed to download '{}/{}'".format(from_info['bucket'],
237 from_info['key']), exc)
238 return
239
240 os.rename(tmp_file, to_info['path'])
241
242 if not no_progress_bar:
243 progress.finish_target(name)
244
245 def _path_to_etag(self, path):
246 relpath = posixpath.relpath(path, self.prefix)
247 return posixpath.dirname(relpath) + posixpath.basename(relpath)
248
249 def _all(self):
250 objects = self.s3.Bucket(self.bucket).objects.filter(Prefix=self.prefix)
251 return [self._path_to_etag(obj.key) for obj in objects]
252
253 def gc(self, checksum_infos):
254 used_etags = [info[self.PARAM_ETAG] for info in checksum_infos['s3']]
255 used_etags += [info[RemoteLOCAL.PARAM_MD5] for info in checksum_infos['local']]
256
257 for etag in self._all():
258 if etag in used_etags:
259 continue
260 path_info = {'scheme': 's3',
261 'key': posixpath.join(self.prefix, etag[0:2], etag[2:]),
262 'bucket': self.bucket}
263 self.remove(path_info)
264
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dvc/remote/s3.py b/dvc/remote/s3.py
--- a/dvc/remote/s3.py
+++ b/dvc/remote/s3.py
@@ -41,14 +41,26 @@
PARAM_ETAG = 'etag'
def __init__(self, project, config):
+ import configobj
+
self.project = project
storagepath = 's3://' + config.get(Config.SECTION_AWS_STORAGEPATH, '').lstrip('/')
self.url = config.get(Config.SECTION_REMOTE_URL, storagepath)
self.region = config.get(Config.SECTION_AWS_REGION, None)
- self.profile = config.get(Config.SECTION_AWS_PROFILE, None)
- self.credentialpath = config.get(Config.SECTION_AWS_CREDENTIALPATH, None)
+ self.profile = config.get(Config.SECTION_AWS_PROFILE, 'default')
self.endpoint_url = config.get(Config.SECTION_AWS_ENDPOINT_URL, None)
+ credentialpath = config.get(Config.SECTION_AWS_CREDENTIALPATH, None)
+ if credentialpath:
+ creds_conf = configobj.ConfigObj(credentialpath)
+ creds = creds_conf.get(self.profile, {})
+ else:
+ creds = {}
+
+ self.region = creds.get('region', self.region)
+ self.aws_access_key_id = creds.get('aws_access_key_id', None)
+ self.aws_secret_access_key = creds.get('aws_secret_access_key', None)
+
@property
def bucket(self):
return urlparse(self.url).netloc
@@ -59,13 +71,12 @@
@property
def s3(self):
- return boto3.resource('s3', endpoint_url=self.endpoint_url)
-
- @property
- def s3_session_client(self):
- session = boto3.session.Session()
- s3 = session.client('s3', endpoint_url=self.endpoint_url)
- return s3
+ session = boto3.session.Session(profile_name=self.profile)
+ return session.client('s3',
+ aws_access_key_id=self.aws_access_key_id,
+ aws_secret_access_key=self.aws_secret_access_key,
+ region_name=self.region,
+ endpoint_url=self.endpoint_url)
def get_etag(self, bucket, key):
try:
@@ -139,7 +150,7 @@
# list_objects_v2() is much-much faster than trying to check keys
# one-by-one.
ret = []
- s3 = self.s3_session_client
+ s3 = self.s3
keys = []
kwargs = {'Bucket': self.bucket,
@@ -170,7 +181,7 @@
def upload(self, from_infos, to_infos, names=None):
names = self._verify_path_args(to_infos, from_infos, names)
- s3 = self.s3_session_client
+ s3 = self.s3
for from_info, to_info, name in zip(from_infos, to_infos, names):
if to_info['scheme'] != 's3':
@@ -200,7 +211,7 @@
def download(self, from_infos, to_infos, no_progress_bar=False, names=None):
names = self._verify_path_args(from_infos, to_infos, names)
- s3 = self.s3_session_client
+ s3 = self.s3
for to_info, from_info, name in zip(to_infos, from_infos, names):
if from_info['scheme'] != 's3':
| {"golden_diff": "diff --git a/dvc/remote/s3.py b/dvc/remote/s3.py\n--- a/dvc/remote/s3.py\n+++ b/dvc/remote/s3.py\n@@ -41,14 +41,26 @@\n PARAM_ETAG = 'etag'\n \n def __init__(self, project, config):\n+ import configobj\n+\n self.project = project\n storagepath = 's3://' + config.get(Config.SECTION_AWS_STORAGEPATH, '').lstrip('/')\n self.url = config.get(Config.SECTION_REMOTE_URL, storagepath)\n self.region = config.get(Config.SECTION_AWS_REGION, None)\n- self.profile = config.get(Config.SECTION_AWS_PROFILE, None)\n- self.credentialpath = config.get(Config.SECTION_AWS_CREDENTIALPATH, None)\n+ self.profile = config.get(Config.SECTION_AWS_PROFILE, 'default')\n self.endpoint_url = config.get(Config.SECTION_AWS_ENDPOINT_URL, None)\n \n+ credentialpath = config.get(Config.SECTION_AWS_CREDENTIALPATH, None)\n+ if credentialpath:\n+ creds_conf = configobj.ConfigObj(credentialpath)\n+ creds = creds_conf.get(self.profile, {})\n+ else:\n+ creds = {}\n+\n+ self.region = creds.get('region', self.region)\n+ self.aws_access_key_id = creds.get('aws_access_key_id', None)\n+ self.aws_secret_access_key = creds.get('aws_secret_access_key', None)\n+\n @property\n def bucket(self):\n return urlparse(self.url).netloc\n@@ -59,13 +71,12 @@\n \n @property\n def s3(self):\n- return boto3.resource('s3', endpoint_url=self.endpoint_url)\n-\n- @property\n- def s3_session_client(self):\n- session = boto3.session.Session()\n- s3 = session.client('s3', endpoint_url=self.endpoint_url)\n- return s3\n+ session = boto3.session.Session(profile_name=self.profile)\n+ return session.client('s3',\n+ aws_access_key_id=self.aws_access_key_id,\n+ aws_secret_access_key=self.aws_secret_access_key,\n+ region_name=self.region,\n+ endpoint_url=self.endpoint_url)\n \n def get_etag(self, bucket, key):\n try:\n@@ -139,7 +150,7 @@\n # list_objects_v2() is much-much faster than trying to check keys\n # one-by-one.\n ret = []\n- s3 = self.s3_session_client\n+ s3 = self.s3\n \n keys = []\n kwargs = {'Bucket': self.bucket,\n@@ -170,7 +181,7 @@\n def upload(self, from_infos, to_infos, names=None):\n names = self._verify_path_args(to_infos, from_infos, names)\n \n- s3 = self.s3_session_client\n+ s3 = self.s3\n \n for from_info, to_info, name in zip(from_infos, to_infos, names):\n if to_info['scheme'] != 's3':\n@@ -200,7 +211,7 @@\n def download(self, from_infos, to_infos, no_progress_bar=False, names=None):\n names = self._verify_path_args(from_infos, to_infos, names)\n \n- s3 = self.s3_session_client\n+ s3 = self.s3\n \n for to_info, from_info, name in zip(to_infos, from_infos, names):\n if from_info['scheme'] != 's3':\n", "issue": "S3 AWS: default profile is always used\nHi!\r\n\r\nDefining a profile in dvc-config has no influence on the used profile in AWS credentials file.\r\n\r\nI quickly took a look at the code and I never see boto3.session(profile=..) called. Or is this done using the AWS_PROFILE env variable?\r\n\r\n(using dvc 0.14.0)\n", "before_files": [{"content": "import os\nimport math\nimport threading\nimport posixpath\n\ntry:\n import boto3\nexcept ImportError:\n boto3 = None\n\ntry:\n from urlparse import urlparse\nexcept ImportError:\n from urllib.parse import urlparse\n\nfrom dvc.logger import Logger\nfrom dvc.progress import progress\nfrom dvc.config import Config\nfrom dvc.remote.base import RemoteBase\nfrom dvc.remote.local import RemoteLOCAL\nfrom dvc.exceptions import DvcException\n\n\nclass Callback(object):\n def __init__(self, name, total):\n self.name = name\n self.total = total\n self.current = 0\n self.lock = threading.Lock()\n\n def __call__(self, byts):\n with self.lock:\n self.current += byts\n progress.update_target(self.name, self.current, self.total)\n\n\nclass RemoteS3(RemoteBase):\n scheme = 's3'\n REGEX = r'^s3://(?P<path>.*)$'\n REQUIRES = {'boto3': boto3}\n PARAM_ETAG = 'etag'\n\n def __init__(self, project, config):\n self.project = project\n storagepath = 's3://' + config.get(Config.SECTION_AWS_STORAGEPATH, '').lstrip('/')\n self.url = config.get(Config.SECTION_REMOTE_URL, storagepath)\n self.region = config.get(Config.SECTION_AWS_REGION, None)\n self.profile = config.get(Config.SECTION_AWS_PROFILE, None)\n self.credentialpath = config.get(Config.SECTION_AWS_CREDENTIALPATH, None)\n self.endpoint_url = config.get(Config.SECTION_AWS_ENDPOINT_URL, None)\n\n @property\n def bucket(self):\n return urlparse(self.url).netloc\n\n @property\n def prefix(self):\n return urlparse(self.url).path.lstrip('/')\n\n @property\n def s3(self):\n return boto3.resource('s3', endpoint_url=self.endpoint_url)\n\n @property\n def s3_session_client(self):\n session = boto3.session.Session()\n s3 = session.client('s3', endpoint_url=self.endpoint_url)\n return s3\n\n def get_etag(self, bucket, key):\n try:\n obj = self.s3.Object(bucket, key).get()\n except Exception:\n raise DvcException('s3://{}/{} does not exist'.format(bucket, key))\n\n return obj['ETag'].strip('\"')\n\n def save_info(self, path_info):\n if path_info['scheme'] != 's3':\n raise NotImplementedError\n\n return {self.PARAM_ETAG: self.get_etag(path_info['bucket'], path_info['key'])}\n\n def _copy(self, from_info, to_info, s3=None):\n s3 = s3 if s3 else self.s3\n\n source = {'Bucket': from_info['bucket'],\n 'Key': from_info['key']}\n self.s3.Bucket(to_info['bucket']).copy(source, to_info['key'])\n\n def save(self, path_info):\n if path_info['scheme'] != 's3':\n raise NotImplementedError\n\n etag = self.get_etag(path_info['bucket'], path_info['key'])\n key = posixpath.join(self.prefix, etag[0:2], etag[2:])\n to_info = {'scheme': 's3', 'bucket': self.bucket, 'key': key}\n\n self._copy(path_info, to_info)\n\n return {self.PARAM_ETAG: etag}\n\n def checkout(self, path_info, checksum_info):\n if path_info['scheme'] != 's3':\n raise NotImplementedError\n\n etag = checksum_info.get(self.PARAM_ETAG, None)\n if not etag:\n return\n\n key = posixpath.join(self.prefix, etag[0:2], etag[2:])\n from_info = {'scheme': 's3', 'bucket': self.bucket, 'key': key}\n\n self._copy(from_info, path_info)\n\n def remove(self, path_info):\n if path_info['scheme'] != 's3':\n raise NotImplementedError\n\n Logger.debug('Removing s3://{}/{}'.format(path_info['bucket'],\n path_info['key']))\n\n obj = self.s3.Object(path_info['bucket'], path_info['key'])\n try:\n obj.get()\n except Exception:\n return\n\n obj.delete()\n\n def md5s_to_path_infos(self, md5s):\n return [{'scheme': self.scheme,\n 'bucket': self.bucket,\n 'key': posixpath.join(self.prefix, md5[0:2], md5[2:])} for md5 in md5s]\n\n def exists(self, path_infos):\n # NOTE: We mostly use exists() method when filtering a bulk of cache\n # files to decide if we need to download/upload them and in s3\n # list_objects_v2() is much-much faster than trying to check keys\n # one-by-one.\n ret = []\n s3 = self.s3_session_client\n\n keys = []\n kwargs = {'Bucket': self.bucket,\n 'Prefix': self.prefix}\n while True:\n resp = s3.list_objects_v2(**kwargs)\n contents = resp.get('Contents', None)\n if not contents:\n break\n\n for obj in contents:\n keys.append(obj['Key'])\n\n token = resp.get('NextContinuationToken', None)\n if not token:\n break\n\n kwargs['ContinuationToken'] = token\n\n for path_info in path_infos:\n exists = False\n if path_info['key'] in keys:\n exists = True\n ret.append(exists)\n\n return ret\n\n def upload(self, from_infos, to_infos, names=None):\n names = self._verify_path_args(to_infos, from_infos, names)\n\n s3 = self.s3_session_client\n\n for from_info, to_info, name in zip(from_infos, to_infos, names):\n if to_info['scheme'] != 's3':\n raise NotImplementedError\n\n if from_info['scheme'] != 'local':\n raise NotImplementedError\n\n Logger.debug(\"Uploading '{}' to '{}/{}'\".format(from_info['path'],\n to_info['bucket'],\n to_info['key']))\n\n if not name:\n name = os.path.basename(from_info['path'])\n\n total = os.path.getsize(from_info['path'])\n cb = Callback(name, total)\n\n try:\n s3.upload_file(from_info['path'], to_info['bucket'], to_info['key'], Callback=cb)\n except Exception as exc:\n Logger.error(\"Failed to upload '{}'\".format(from_info['path']), exc)\n continue\n\n progress.finish_target(name)\n\n def download(self, from_infos, to_infos, no_progress_bar=False, names=None):\n names = self._verify_path_args(from_infos, to_infos, names)\n\n s3 = self.s3_session_client\n\n for to_info, from_info, name in zip(to_infos, from_infos, names):\n if from_info['scheme'] != 's3':\n raise NotImplementedError\n\n if to_info['scheme'] == 's3':\n self._copy(from_info, to_info, s3=s3)\n continue\n\n if to_info['scheme'] != 'local':\n raise NotImplementedError\n\n Logger.debug(\"Downloading '{}/{}' to '{}'\".format(from_info['bucket'],\n from_info['key'],\n to_info['path']))\n\n tmp_file = self.tmp_file(to_info['path'])\n if not name:\n name = os.path.basename(to_info['path'])\n\n if no_progress_bar:\n cb = None\n else:\n total = s3.head_object(Bucket=from_info['bucket'],\n Key=from_info['key'])['ContentLength']\n cb = Callback(name, total)\n\n self._makedirs(to_info['path'])\n\n try:\n s3.download_file(from_info['bucket'], from_info['key'], tmp_file, Callback=cb)\n except Exception as exc:\n Logger.error(\"Failed to download '{}/{}'\".format(from_info['bucket'],\n from_info['key']), exc)\n return\n\n os.rename(tmp_file, to_info['path'])\n\n if not no_progress_bar:\n progress.finish_target(name)\n\n def _path_to_etag(self, path):\n relpath = posixpath.relpath(path, self.prefix)\n return posixpath.dirname(relpath) + posixpath.basename(relpath)\n\n def _all(self):\n objects = self.s3.Bucket(self.bucket).objects.filter(Prefix=self.prefix)\n return [self._path_to_etag(obj.key) for obj in objects]\n\n def gc(self, checksum_infos):\n used_etags = [info[self.PARAM_ETAG] for info in checksum_infos['s3']]\n used_etags += [info[RemoteLOCAL.PARAM_MD5] for info in checksum_infos['local']]\n\n for etag in self._all():\n if etag in used_etags:\n continue\n path_info = {'scheme': 's3',\n 'key': posixpath.join(self.prefix, etag[0:2], etag[2:]),\n 'bucket': self.bucket}\n self.remove(path_info)\n", "path": "dvc/remote/s3.py"}], "after_files": [{"content": "import os\nimport math\nimport threading\nimport posixpath\n\ntry:\n import boto3\nexcept ImportError:\n boto3 = None\n\ntry:\n from urlparse import urlparse\nexcept ImportError:\n from urllib.parse import urlparse\n\nfrom dvc.logger import Logger\nfrom dvc.progress import progress\nfrom dvc.config import Config\nfrom dvc.remote.base import RemoteBase\nfrom dvc.remote.local import RemoteLOCAL\nfrom dvc.exceptions import DvcException\n\n\nclass Callback(object):\n def __init__(self, name, total):\n self.name = name\n self.total = total\n self.current = 0\n self.lock = threading.Lock()\n\n def __call__(self, byts):\n with self.lock:\n self.current += byts\n progress.update_target(self.name, self.current, self.total)\n\n\nclass RemoteS3(RemoteBase):\n scheme = 's3'\n REGEX = r'^s3://(?P<path>.*)$'\n REQUIRES = {'boto3': boto3}\n PARAM_ETAG = 'etag'\n\n def __init__(self, project, config):\n import configobj\n\n self.project = project\n storagepath = 's3://' + config.get(Config.SECTION_AWS_STORAGEPATH, '').lstrip('/')\n self.url = config.get(Config.SECTION_REMOTE_URL, storagepath)\n self.region = config.get(Config.SECTION_AWS_REGION, None)\n self.profile = config.get(Config.SECTION_AWS_PROFILE, 'default')\n self.endpoint_url = config.get(Config.SECTION_AWS_ENDPOINT_URL, None)\n\n credentialpath = config.get(Config.SECTION_AWS_CREDENTIALPATH, None)\n if credentialpath:\n creds_conf = configobj.ConfigObj(credentialpath)\n creds = creds_conf.get(self.profile, {})\n else:\n creds = {}\n\n self.region = creds.get('region', self.region)\n self.aws_access_key_id = creds.get('aws_access_key_id', None)\n self.aws_secret_access_key = creds.get('aws_secret_access_key', None)\n\n @property\n def bucket(self):\n return urlparse(self.url).netloc\n\n @property\n def prefix(self):\n return urlparse(self.url).path.lstrip('/')\n\n @property\n def s3(self):\n session = boto3.session.Session(profile_name=self.profile)\n return session.client('s3',\n aws_access_key_id=self.aws_access_key_id,\n aws_secret_access_key=self.aws_secret_access_key,\n region_name=self.region,\n endpoint_url=self.endpoint_url)\n\n def get_etag(self, bucket, key):\n try:\n obj = self.s3.Object(bucket, key).get()\n except Exception:\n raise DvcException('s3://{}/{} does not exist'.format(bucket, key))\n\n return obj['ETag'].strip('\"')\n\n def save_info(self, path_info):\n if path_info['scheme'] != 's3':\n raise NotImplementedError\n\n return {self.PARAM_ETAG: self.get_etag(path_info['bucket'], path_info['key'])}\n\n def _copy(self, from_info, to_info, s3=None):\n s3 = s3 if s3 else self.s3\n\n source = {'Bucket': from_info['bucket'],\n 'Key': from_info['key']}\n self.s3.Bucket(to_info['bucket']).copy(source, to_info['key'])\n\n def save(self, path_info):\n if path_info['scheme'] != 's3':\n raise NotImplementedError\n\n etag = self.get_etag(path_info['bucket'], path_info['key'])\n key = posixpath.join(self.prefix, etag[0:2], etag[2:])\n to_info = {'scheme': 's3', 'bucket': self.bucket, 'key': key}\n\n self._copy(path_info, to_info)\n\n return {self.PARAM_ETAG: etag}\n\n def checkout(self, path_info, checksum_info):\n if path_info['scheme'] != 's3':\n raise NotImplementedError\n\n etag = checksum_info.get(self.PARAM_ETAG, None)\n if not etag:\n return\n\n key = posixpath.join(self.prefix, etag[0:2], etag[2:])\n from_info = {'scheme': 's3', 'bucket': self.bucket, 'key': key}\n\n self._copy(from_info, path_info)\n\n def remove(self, path_info):\n if path_info['scheme'] != 's3':\n raise NotImplementedError\n\n Logger.debug('Removing s3://{}/{}'.format(path_info['bucket'],\n path_info['key']))\n\n obj = self.s3.Object(path_info['bucket'], path_info['key'])\n try:\n obj.get()\n except Exception:\n return\n\n obj.delete()\n\n def md5s_to_path_infos(self, md5s):\n return [{'scheme': self.scheme,\n 'bucket': self.bucket,\n 'key': posixpath.join(self.prefix, md5[0:2], md5[2:])} for md5 in md5s]\n\n def exists(self, path_infos):\n # NOTE: We mostly use exists() method when filtering a bulk of cache\n # files to decide if we need to download/upload them and in s3\n # list_objects_v2() is much-much faster than trying to check keys\n # one-by-one.\n ret = []\n s3 = self.s3\n\n keys = []\n kwargs = {'Bucket': self.bucket,\n 'Prefix': self.prefix}\n while True:\n resp = s3.list_objects_v2(**kwargs)\n contents = resp.get('Contents', None)\n if not contents:\n break\n\n for obj in contents:\n keys.append(obj['Key'])\n\n token = resp.get('NextContinuationToken', None)\n if not token:\n break\n\n kwargs['ContinuationToken'] = token\n\n for path_info in path_infos:\n exists = False\n if path_info['key'] in keys:\n exists = True\n ret.append(exists)\n\n return ret\n\n def upload(self, from_infos, to_infos, names=None):\n names = self._verify_path_args(to_infos, from_infos, names)\n\n s3 = self.s3\n\n for from_info, to_info, name in zip(from_infos, to_infos, names):\n if to_info['scheme'] != 's3':\n raise NotImplementedError\n\n if from_info['scheme'] != 'local':\n raise NotImplementedError\n\n Logger.debug(\"Uploading '{}' to '{}/{}'\".format(from_info['path'],\n to_info['bucket'],\n to_info['key']))\n\n if not name:\n name = os.path.basename(from_info['path'])\n\n total = os.path.getsize(from_info['path'])\n cb = Callback(name, total)\n\n try:\n s3.upload_file(from_info['path'], to_info['bucket'], to_info['key'], Callback=cb)\n except Exception as exc:\n Logger.error(\"Failed to upload '{}'\".format(from_info['path']), exc)\n continue\n\n progress.finish_target(name)\n\n def download(self, from_infos, to_infos, no_progress_bar=False, names=None):\n names = self._verify_path_args(from_infos, to_infos, names)\n\n s3 = self.s3\n\n for to_info, from_info, name in zip(to_infos, from_infos, names):\n if from_info['scheme'] != 's3':\n raise NotImplementedError\n\n if to_info['scheme'] == 's3':\n self._copy(from_info, to_info, s3=s3)\n continue\n\n if to_info['scheme'] != 'local':\n raise NotImplementedError\n\n Logger.debug(\"Downloading '{}/{}' to '{}'\".format(from_info['bucket'],\n from_info['key'],\n to_info['path']))\n\n tmp_file = self.tmp_file(to_info['path'])\n if not name:\n name = os.path.basename(to_info['path'])\n\n if no_progress_bar:\n cb = None\n else:\n total = s3.head_object(Bucket=from_info['bucket'],\n Key=from_info['key'])['ContentLength']\n cb = Callback(name, total)\n\n self._makedirs(to_info['path'])\n\n try:\n s3.download_file(from_info['bucket'], from_info['key'], tmp_file, Callback=cb)\n except Exception as exc:\n Logger.error(\"Failed to download '{}/{}'\".format(from_info['bucket'],\n from_info['key']), exc)\n return\n\n os.rename(tmp_file, to_info['path'])\n\n if not no_progress_bar:\n progress.finish_target(name)\n\n def _path_to_etag(self, path):\n relpath = posixpath.relpath(path, self.prefix)\n return posixpath.dirname(relpath) + posixpath.basename(relpath)\n\n def _all(self):\n objects = self.s3.Bucket(self.bucket).objects.filter(Prefix=self.prefix)\n return [self._path_to_etag(obj.key) for obj in objects]\n\n def gc(self, checksum_infos):\n used_etags = [info[self.PARAM_ETAG] for info in checksum_infos['s3']]\n used_etags += [info[RemoteLOCAL.PARAM_MD5] for info in checksum_infos['local']]\n\n for etag in self._all():\n if etag in used_etags:\n continue\n path_info = {'scheme': 's3',\n 'key': posixpath.join(self.prefix, etag[0:2], etag[2:]),\n 'bucket': self.bucket}\n self.remove(path_info)\n", "path": "dvc/remote/s3.py"}]} | 3,019 | 773 |
gh_patches_debug_24097 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-6360 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Mauvaise gestion de la désinscription dans le journal des événements
**Description du bug**
Sur la branche de dév, quand on a des actions du journal d'événements qui ont été faites par un utilisateur qui s'est désinscrit, la page ne marche plus avec une erreur comme quoi l'URL d'un profil ne peut pas être `reverse`. Ça le fait probablement aussi avec d'autres champs (tout ceux relatifs à des utilisateurs probablement).
**Comment reproduire ?**
* Se connecter avec un utilisateur.
* Créer un billet et le publier.
* Se désinscrire.
* Se reconnecter en staff et aller voir le journal d'événements : la page ne s'affiche pas et on a une erreur "NoReverseMatch".
**Comportement attendu**
Il faudrait que ça fonctionne sans erreur malgré la désinscription. La solution à adopter est probablement la même que pour tous les cas de figure similaires : lors de la désinscription, mettre à jour les champs des événements relatifs à l'utilisateur qui se désinscrit vers `anonymous`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zds/member/views/register.py`
Content:
```
1 from datetime import datetime, timedelta
2
3 from oauth2_provider.models import AccessToken
4
5 from django.conf import settings
6 from django.contrib import messages
7 from django.contrib.auth import logout
8 from django.contrib.auth.decorators import login_required
9 from django.contrib.auth.models import User
10 from django.core.mail import EmailMultiAlternatives
11 from django.urls import reverse
12 from django.db import transaction
13 from django.db.models import Q
14 from django.shortcuts import redirect, render, get_object_or_404
15 from django.template.loader import render_to_string
16 from django.utils.translation import gettext_lazy as _
17 from django.views.decorators.http import require_POST
18 from django.views.generic import CreateView, FormView
19
20 from zds.forum.models import Topic
21 from zds.gallery.models import UserGallery
22 from zds.member import NEW_ACCOUNT
23 from zds.member.commons import (
24 ProfileCreate,
25 TokenGenerator,
26 )
27 from zds.member.forms import RegisterForm, UsernameAndEmailForm, LoginForm
28 from zds.member.models import (
29 Profile,
30 TokenRegister,
31 KarmaNote,
32 Ban,
33 BannedEmailProvider,
34 NewEmailProvider,
35 )
36 from zds.member.views import get_client_ip
37 from zds.mp.models import PrivatePost, PrivateTopic
38 from zds.tutorialv2.models.database import PickListOperation
39 from zds.utils.models import (
40 Comment,
41 CommentVote,
42 Alert,
43 CommentEdit,
44 HatRequest,
45 get_hat_from_settings,
46 )
47 import logging
48
49 from zds.mp.utils import send_mp
50
51
52 class RegisterView(CreateView, ProfileCreate, TokenGenerator):
53 """Create a profile."""
54
55 form_class = RegisterForm
56 template_name = "member/register/index.html"
57
58 def dispatch(self, *args, **kwargs):
59 return super().dispatch(*args, **kwargs)
60
61 def get_object(self, queryset=None):
62 return get_object_or_404(Profile, user=self.request.user)
63
64 def get_form(self, form_class=RegisterForm):
65 return form_class()
66
67 def post(self, request, *args, **kwargs):
68 form = self.form_class(request.POST)
69
70 if form.is_valid():
71 return self.form_valid(form)
72 return render(request, self.template_name, {"form": form})
73
74 def form_valid(self, form):
75 profile = self.create_profile(form.data)
76 profile.last_ip_address = get_client_ip(self.request)
77 profile.username_skeleton = Profile.find_username_skeleton(profile.user.username)
78 self.save_profile(profile)
79 token = self.generate_token(profile.user)
80 try:
81 self.send_email(token, profile.user)
82 except Exception as e:
83 logging.getLogger(__name__).warning("Mail not sent", exc_info=e)
84 messages.warning(self.request, _("Impossible d'envoyer l'email."))
85 self.object = None
86 return self.form_invalid(form)
87 return render(self.request, self.get_success_template())
88
89 def get_success_template(self):
90 return "member/register/success.html"
91
92
93 class SendValidationEmailView(FormView, TokenGenerator):
94 """Send a validation email on demand."""
95
96 form_class = UsernameAndEmailForm
97 template_name = "member/register/send_validation_email.html"
98
99 usr = None
100
101 def get_user(self, username, email):
102
103 if username:
104 self.usr = get_object_or_404(User, username=username)
105
106 elif email:
107 self.usr = get_object_or_404(User, email=email)
108
109 def get_form(self, form_class=UsernameAndEmailForm):
110 return form_class()
111
112 def post(self, request, *args, **kwargs):
113 form = self.form_class(request.POST)
114
115 if form.is_valid():
116 # Fetch the user
117 self.get_user(form.data["username"], form.data["email"])
118
119 # User should not already be active
120 if not self.usr.is_active:
121 return self.form_valid(form)
122 else:
123 if form.data["username"]:
124 form.errors["username"] = form.error_class([self.get_error_message()])
125 else:
126 form.errors["email"] = form.error_class([self.get_error_message()])
127
128 return render(request, self.template_name, {"form": form})
129
130 def form_valid(self, form):
131 # Delete old token
132 token = TokenRegister.objects.filter(user=self.usr)
133 if token.count() >= 1:
134 token.all().delete()
135
136 # Generate new token and send email
137 token = self.generate_token(self.usr)
138 try:
139 self.send_email(token, self.usr)
140 except Exception as e:
141 logging.getLogger(__name__).warning("Mail not sent", exc_info=e)
142 messages.warning(_("Impossible d'envoyer l'email."))
143 return self.form_invalid(form)
144
145 return render(self.request, self.get_success_template())
146
147 def get_success_template(self):
148 return "member/register/send_validation_email_success.html"
149
150 def get_error_message(self):
151 return _("Le compte est déjà activé.")
152
153
154 @login_required
155 def warning_unregister(request):
156 """
157 Display a warning page showing what will happen when the user
158 unregisters.
159 """
160 return render(request, "member/settings/unregister.html", {"user": request.user})
161
162
163 @login_required
164 @require_POST
165 @transaction.atomic
166 def unregister(request):
167 """Allow members to unregister."""
168
169 anonymous = get_object_or_404(User, username=settings.ZDS_APP["member"]["anonymous_account"])
170 external = get_object_or_404(User, username=settings.ZDS_APP["member"]["external_account"])
171 current = request.user
172 # Nota : as of v21 all about content paternity is held by a proper receiver in zds.tutorialv2.models.database
173 PickListOperation.objects.filter(staff_user=current).update(staff_user=anonymous)
174 PickListOperation.objects.filter(canceler_user=current).update(canceler_user=anonymous)
175 # Comments likes / dislikes
176 votes = CommentVote.objects.filter(user=current)
177 for vote in votes:
178 if vote.positive:
179 vote.comment.like -= 1
180 else:
181 vote.comment.dislike -= 1
182 vote.comment.save()
183 votes.delete()
184 # All contents anonymization
185 Comment.objects.filter(author=current).update(author=anonymous)
186 PrivatePost.objects.filter(author=current).update(author=anonymous)
187 CommentEdit.objects.filter(editor=current).update(editor=anonymous)
188 CommentEdit.objects.filter(deleted_by=current).update(deleted_by=anonymous)
189 # Karma notes, alerts and sanctions anonymization (to keep them)
190 KarmaNote.objects.filter(moderator=current).update(moderator=anonymous)
191 Ban.objects.filter(moderator=current).update(moderator=anonymous)
192 Alert.objects.filter(author=current).update(author=anonymous)
193 Alert.objects.filter(moderator=current).update(moderator=anonymous)
194 BannedEmailProvider.objects.filter(moderator=current).update(moderator=anonymous)
195 # Solved hat requests anonymization
196 HatRequest.objects.filter(moderator=current).update(moderator=anonymous)
197 # In case current user has been moderator in the past
198 Comment.objects.filter(editor=current).update(editor=anonymous)
199 for topic in PrivateTopic.objects.filter(Q(author=current) | Q(participants__in=[current])):
200 if topic.one_participant_remaining():
201 topic.delete()
202 else:
203 topic.remove_participant(current)
204 topic.save()
205 Topic.objects.filter(solved_by=current).update(solved_by=anonymous)
206 Topic.objects.filter(author=current).update(author=anonymous)
207
208 # Any content exclusively owned by the unregistering member will
209 # be deleted just before the User object (using a pre_delete
210 # receiver).
211 #
212 # Regarding galleries, there are two cases:
213 #
214 # - "personal galleries" with one owner (the unregistering
215 # user). The user's ownership is removed and replaced by an
216 # anonymous user in order not to lost the gallery.
217 #
218 # - "personal galleries" with many other owners. It is safe to
219 # remove the user's ownership, the gallery won't be lost.
220
221 galleries = UserGallery.objects.filter(user=current)
222 for gallery in galleries:
223 if gallery.gallery.get_linked_users().count() == 1:
224 anonymous_gallery = UserGallery()
225 anonymous_gallery.user = external
226 anonymous_gallery.mode = "w"
227 anonymous_gallery.gallery = gallery.gallery
228 anonymous_gallery.save()
229 galleries.delete()
230
231 # Remove API access (tokens + applications)
232 for token in AccessToken.objects.filter(user=current):
233 token.revoke()
234
235 logout(request)
236 User.objects.filter(pk=current.pk).delete()
237 return redirect(reverse("homepage"))
238
239
240 def activate_account(request):
241 """Activate an account with a token."""
242 try:
243 token = request.GET["token"]
244 except KeyError:
245 return redirect(reverse("homepage"))
246 token = get_object_or_404(TokenRegister, token=token)
247 usr = token.user
248
249 # User can't confirm their request if their account is already active
250 if usr.is_active:
251 return render(request, "member/register/token_already_used.html")
252
253 # User can't confirm their request if it is too late
254 if datetime.now() > token.date_end:
255 return render(request, "member/register/token_failed.html", {"token": token})
256 usr.is_active = True
257 usr.save()
258
259 # Send welcome message
260 bot = get_object_or_404(User, username=settings.ZDS_APP["member"]["bot_account"])
261 msg = render_to_string(
262 "member/messages/account_activated.md",
263 {
264 "username": usr.username,
265 "site_name": settings.ZDS_APP["site"]["literal_name"],
266 "library_url": settings.ZDS_APP["site"]["url"] + reverse("publication:list"),
267 "opinions_url": settings.ZDS_APP["site"]["url"] + reverse("opinion:list"),
268 "forums_url": settings.ZDS_APP["site"]["url"] + reverse("forum:cats-forums-list"),
269 },
270 )
271
272 send_mp(
273 bot,
274 [usr],
275 _("Bienvenue sur {}").format(settings.ZDS_APP["site"]["literal_name"]),
276 _("Le manuel du nouveau membre"),
277 msg,
278 send_by_mail=False,
279 leave=True,
280 direct=False,
281 hat=get_hat_from_settings("moderation"),
282 )
283 token.delete()
284
285 # Create an alert for the staff if it's a new provider
286 if usr.email:
287 provider = usr.email.split("@")[-1].lower()
288 if (
289 not NewEmailProvider.objects.filter(provider=provider).exists()
290 and not User.objects.filter(email__iendswith=f"@{provider}").exclude(pk=usr.pk).exists()
291 ):
292 NewEmailProvider.objects.create(user=usr, provider=provider, use=NEW_ACCOUNT)
293
294 form = LoginForm(initial={"username": usr.username})
295 return render(request, "member/register/token_success.html", {"usr": usr, "form": form})
296
297
298 def generate_token_account(request):
299 """Generate a token for an account."""
300
301 try:
302 token = request.GET["token"]
303 except KeyError:
304 return redirect(reverse("homepage"))
305 token = get_object_or_404(TokenRegister, token=token)
306
307 # Push date
308
309 date_end = datetime.now() + timedelta(days=0, hours=1, minutes=0, seconds=0)
310 token.date_end = date_end
311 token.save()
312
313 # Send email
314 subject = _("{} - Confirmation d'inscription").format(settings.ZDS_APP["site"]["literal_name"])
315 from_email = "{} <{}>".format(settings.ZDS_APP["site"]["literal_name"], settings.ZDS_APP["site"]["email_noreply"])
316 context = {
317 "username": token.user.username,
318 "site_url": settings.ZDS_APP["site"]["url"],
319 "site_name": settings.ZDS_APP["site"]["literal_name"],
320 "url": settings.ZDS_APP["site"]["url"] + token.get_absolute_url(),
321 }
322 message_html = render_to_string("email/member/confirm_registration.html", context)
323 message_txt = render_to_string("email/member/confirm_registration.txt", context)
324
325 msg = EmailMultiAlternatives(subject, message_txt, from_email, [token.user.email])
326 msg.attach_alternative(message_html, "text/html")
327 try:
328 msg.send()
329 except:
330 msg = None
331 return render(request, "member/register/success.html", {})
332
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zds/member/views/register.py b/zds/member/views/register.py
--- a/zds/member/views/register.py
+++ b/zds/member/views/register.py
@@ -36,6 +36,7 @@
from zds.member.views import get_client_ip
from zds.mp.models import PrivatePost, PrivateTopic
from zds.tutorialv2.models.database import PickListOperation
+from zds.tutorialv2.models.events import Event
from zds.utils.models import (
Comment,
CommentVote,
@@ -172,6 +173,11 @@
# Nota : as of v21 all about content paternity is held by a proper receiver in zds.tutorialv2.models.database
PickListOperation.objects.filter(staff_user=current).update(staff_user=anonymous)
PickListOperation.objects.filter(canceler_user=current).update(canceler_user=anonymous)
+
+ Event.objects.filter(performer=current).update(performer=external)
+ Event.objects.filter(author=current).update(author=external)
+ Event.objects.filter(contributor=current).update(contributor=external)
+
# Comments likes / dislikes
votes = CommentVote.objects.filter(user=current)
for vote in votes:
| {"golden_diff": "diff --git a/zds/member/views/register.py b/zds/member/views/register.py\n--- a/zds/member/views/register.py\n+++ b/zds/member/views/register.py\n@@ -36,6 +36,7 @@\n from zds.member.views import get_client_ip\n from zds.mp.models import PrivatePost, PrivateTopic\n from zds.tutorialv2.models.database import PickListOperation\n+from zds.tutorialv2.models.events import Event\n from zds.utils.models import (\n Comment,\n CommentVote,\n@@ -172,6 +173,11 @@\n # Nota : as of v21 all about content paternity is held by a proper receiver in zds.tutorialv2.models.database\n PickListOperation.objects.filter(staff_user=current).update(staff_user=anonymous)\n PickListOperation.objects.filter(canceler_user=current).update(canceler_user=anonymous)\n+\n+ Event.objects.filter(performer=current).update(performer=external)\n+ Event.objects.filter(author=current).update(author=external)\n+ Event.objects.filter(contributor=current).update(contributor=external)\n+\n # Comments likes / dislikes\n votes = CommentVote.objects.filter(user=current)\n for vote in votes:\n", "issue": "Mauvaise gestion de la d\u00e9sinscription dans le journal des \u00e9v\u00e9nements\n**Description du bug**\r\n\r\nSur la branche de d\u00e9v, quand on a des actions du journal d'\u00e9v\u00e9nements qui ont \u00e9t\u00e9 faites par un utilisateur qui s'est d\u00e9sinscrit, la page ne marche plus avec une erreur comme quoi l'URL d'un profil ne peut pas \u00eatre `reverse`. \u00c7a le fait probablement aussi avec d'autres champs (tout ceux relatifs \u00e0 des utilisateurs probablement).\r\n\r\n**Comment reproduire ?**\r\n\r\n* Se connecter avec un utilisateur.\r\n* Cr\u00e9er un billet et le publier.\r\n* Se d\u00e9sinscrire.\r\n* Se reconnecter en staff et aller voir le journal d'\u00e9v\u00e9nements : la page ne s'affiche pas et on a une erreur \"NoReverseMatch\".\r\n\r\n**Comportement attendu**\r\n\r\nIl faudrait que \u00e7a fonctionne sans erreur malgr\u00e9 la d\u00e9sinscription. La solution \u00e0 adopter est probablement la m\u00eame que pour tous les cas de figure similaires : lors de la d\u00e9sinscription, mettre \u00e0 jour les champs des \u00e9v\u00e9nements relatifs \u00e0 l'utilisateur qui se d\u00e9sinscrit vers `anonymous`.\r\n\n", "before_files": [{"content": "from datetime import datetime, timedelta\n\nfrom oauth2_provider.models import AccessToken\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth import logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.urls import reverse\nfrom django.db import transaction\nfrom django.db.models import Q\nfrom django.shortcuts import redirect, render, get_object_or_404\nfrom django.template.loader import render_to_string\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.decorators.http import require_POST\nfrom django.views.generic import CreateView, FormView\n\nfrom zds.forum.models import Topic\nfrom zds.gallery.models import UserGallery\nfrom zds.member import NEW_ACCOUNT\nfrom zds.member.commons import (\n ProfileCreate,\n TokenGenerator,\n)\nfrom zds.member.forms import RegisterForm, UsernameAndEmailForm, LoginForm\nfrom zds.member.models import (\n Profile,\n TokenRegister,\n KarmaNote,\n Ban,\n BannedEmailProvider,\n NewEmailProvider,\n)\nfrom zds.member.views import get_client_ip\nfrom zds.mp.models import PrivatePost, PrivateTopic\nfrom zds.tutorialv2.models.database import PickListOperation\nfrom zds.utils.models import (\n Comment,\n CommentVote,\n Alert,\n CommentEdit,\n HatRequest,\n get_hat_from_settings,\n)\nimport logging\n\nfrom zds.mp.utils import send_mp\n\n\nclass RegisterView(CreateView, ProfileCreate, TokenGenerator):\n \"\"\"Create a profile.\"\"\"\n\n form_class = RegisterForm\n template_name = \"member/register/index.html\"\n\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n def get_object(self, queryset=None):\n return get_object_or_404(Profile, user=self.request.user)\n\n def get_form(self, form_class=RegisterForm):\n return form_class()\n\n def post(self, request, *args, **kwargs):\n form = self.form_class(request.POST)\n\n if form.is_valid():\n return self.form_valid(form)\n return render(request, self.template_name, {\"form\": form})\n\n def form_valid(self, form):\n profile = self.create_profile(form.data)\n profile.last_ip_address = get_client_ip(self.request)\n profile.username_skeleton = Profile.find_username_skeleton(profile.user.username)\n self.save_profile(profile)\n token = self.generate_token(profile.user)\n try:\n self.send_email(token, profile.user)\n except Exception as e:\n logging.getLogger(__name__).warning(\"Mail not sent\", exc_info=e)\n messages.warning(self.request, _(\"Impossible d'envoyer l'email.\"))\n self.object = None\n return self.form_invalid(form)\n return render(self.request, self.get_success_template())\n\n def get_success_template(self):\n return \"member/register/success.html\"\n\n\nclass SendValidationEmailView(FormView, TokenGenerator):\n \"\"\"Send a validation email on demand.\"\"\"\n\n form_class = UsernameAndEmailForm\n template_name = \"member/register/send_validation_email.html\"\n\n usr = None\n\n def get_user(self, username, email):\n\n if username:\n self.usr = get_object_or_404(User, username=username)\n\n elif email:\n self.usr = get_object_or_404(User, email=email)\n\n def get_form(self, form_class=UsernameAndEmailForm):\n return form_class()\n\n def post(self, request, *args, **kwargs):\n form = self.form_class(request.POST)\n\n if form.is_valid():\n # Fetch the user\n self.get_user(form.data[\"username\"], form.data[\"email\"])\n\n # User should not already be active\n if not self.usr.is_active:\n return self.form_valid(form)\n else:\n if form.data[\"username\"]:\n form.errors[\"username\"] = form.error_class([self.get_error_message()])\n else:\n form.errors[\"email\"] = form.error_class([self.get_error_message()])\n\n return render(request, self.template_name, {\"form\": form})\n\n def form_valid(self, form):\n # Delete old token\n token = TokenRegister.objects.filter(user=self.usr)\n if token.count() >= 1:\n token.all().delete()\n\n # Generate new token and send email\n token = self.generate_token(self.usr)\n try:\n self.send_email(token, self.usr)\n except Exception as e:\n logging.getLogger(__name__).warning(\"Mail not sent\", exc_info=e)\n messages.warning(_(\"Impossible d'envoyer l'email.\"))\n return self.form_invalid(form)\n\n return render(self.request, self.get_success_template())\n\n def get_success_template(self):\n return \"member/register/send_validation_email_success.html\"\n\n def get_error_message(self):\n return _(\"Le compte est d\u00e9j\u00e0 activ\u00e9.\")\n\n\n@login_required\ndef warning_unregister(request):\n \"\"\"\n Display a warning page showing what will happen when the user\n unregisters.\n \"\"\"\n return render(request, \"member/settings/unregister.html\", {\"user\": request.user})\n\n\n@login_required\n@require_POST\[email protected]\ndef unregister(request):\n \"\"\"Allow members to unregister.\"\"\"\n\n anonymous = get_object_or_404(User, username=settings.ZDS_APP[\"member\"][\"anonymous_account\"])\n external = get_object_or_404(User, username=settings.ZDS_APP[\"member\"][\"external_account\"])\n current = request.user\n # Nota : as of v21 all about content paternity is held by a proper receiver in zds.tutorialv2.models.database\n PickListOperation.objects.filter(staff_user=current).update(staff_user=anonymous)\n PickListOperation.objects.filter(canceler_user=current).update(canceler_user=anonymous)\n # Comments likes / dislikes\n votes = CommentVote.objects.filter(user=current)\n for vote in votes:\n if vote.positive:\n vote.comment.like -= 1\n else:\n vote.comment.dislike -= 1\n vote.comment.save()\n votes.delete()\n # All contents anonymization\n Comment.objects.filter(author=current).update(author=anonymous)\n PrivatePost.objects.filter(author=current).update(author=anonymous)\n CommentEdit.objects.filter(editor=current).update(editor=anonymous)\n CommentEdit.objects.filter(deleted_by=current).update(deleted_by=anonymous)\n # Karma notes, alerts and sanctions anonymization (to keep them)\n KarmaNote.objects.filter(moderator=current).update(moderator=anonymous)\n Ban.objects.filter(moderator=current).update(moderator=anonymous)\n Alert.objects.filter(author=current).update(author=anonymous)\n Alert.objects.filter(moderator=current).update(moderator=anonymous)\n BannedEmailProvider.objects.filter(moderator=current).update(moderator=anonymous)\n # Solved hat requests anonymization\n HatRequest.objects.filter(moderator=current).update(moderator=anonymous)\n # In case current user has been moderator in the past\n Comment.objects.filter(editor=current).update(editor=anonymous)\n for topic in PrivateTopic.objects.filter(Q(author=current) | Q(participants__in=[current])):\n if topic.one_participant_remaining():\n topic.delete()\n else:\n topic.remove_participant(current)\n topic.save()\n Topic.objects.filter(solved_by=current).update(solved_by=anonymous)\n Topic.objects.filter(author=current).update(author=anonymous)\n\n # Any content exclusively owned by the unregistering member will\n # be deleted just before the User object (using a pre_delete\n # receiver).\n #\n # Regarding galleries, there are two cases:\n #\n # - \"personal galleries\" with one owner (the unregistering\n # user). The user's ownership is removed and replaced by an\n # anonymous user in order not to lost the gallery.\n #\n # - \"personal galleries\" with many other owners. It is safe to\n # remove the user's ownership, the gallery won't be lost.\n\n galleries = UserGallery.objects.filter(user=current)\n for gallery in galleries:\n if gallery.gallery.get_linked_users().count() == 1:\n anonymous_gallery = UserGallery()\n anonymous_gallery.user = external\n anonymous_gallery.mode = \"w\"\n anonymous_gallery.gallery = gallery.gallery\n anonymous_gallery.save()\n galleries.delete()\n\n # Remove API access (tokens + applications)\n for token in AccessToken.objects.filter(user=current):\n token.revoke()\n\n logout(request)\n User.objects.filter(pk=current.pk).delete()\n return redirect(reverse(\"homepage\"))\n\n\ndef activate_account(request):\n \"\"\"Activate an account with a token.\"\"\"\n try:\n token = request.GET[\"token\"]\n except KeyError:\n return redirect(reverse(\"homepage\"))\n token = get_object_or_404(TokenRegister, token=token)\n usr = token.user\n\n # User can't confirm their request if their account is already active\n if usr.is_active:\n return render(request, \"member/register/token_already_used.html\")\n\n # User can't confirm their request if it is too late\n if datetime.now() > token.date_end:\n return render(request, \"member/register/token_failed.html\", {\"token\": token})\n usr.is_active = True\n usr.save()\n\n # Send welcome message\n bot = get_object_or_404(User, username=settings.ZDS_APP[\"member\"][\"bot_account\"])\n msg = render_to_string(\n \"member/messages/account_activated.md\",\n {\n \"username\": usr.username,\n \"site_name\": settings.ZDS_APP[\"site\"][\"literal_name\"],\n \"library_url\": settings.ZDS_APP[\"site\"][\"url\"] + reverse(\"publication:list\"),\n \"opinions_url\": settings.ZDS_APP[\"site\"][\"url\"] + reverse(\"opinion:list\"),\n \"forums_url\": settings.ZDS_APP[\"site\"][\"url\"] + reverse(\"forum:cats-forums-list\"),\n },\n )\n\n send_mp(\n bot,\n [usr],\n _(\"Bienvenue sur {}\").format(settings.ZDS_APP[\"site\"][\"literal_name\"]),\n _(\"Le manuel du nouveau membre\"),\n msg,\n send_by_mail=False,\n leave=True,\n direct=False,\n hat=get_hat_from_settings(\"moderation\"),\n )\n token.delete()\n\n # Create an alert for the staff if it's a new provider\n if usr.email:\n provider = usr.email.split(\"@\")[-1].lower()\n if (\n not NewEmailProvider.objects.filter(provider=provider).exists()\n and not User.objects.filter(email__iendswith=f\"@{provider}\").exclude(pk=usr.pk).exists()\n ):\n NewEmailProvider.objects.create(user=usr, provider=provider, use=NEW_ACCOUNT)\n\n form = LoginForm(initial={\"username\": usr.username})\n return render(request, \"member/register/token_success.html\", {\"usr\": usr, \"form\": form})\n\n\ndef generate_token_account(request):\n \"\"\"Generate a token for an account.\"\"\"\n\n try:\n token = request.GET[\"token\"]\n except KeyError:\n return redirect(reverse(\"homepage\"))\n token = get_object_or_404(TokenRegister, token=token)\n\n # Push date\n\n date_end = datetime.now() + timedelta(days=0, hours=1, minutes=0, seconds=0)\n token.date_end = date_end\n token.save()\n\n # Send email\n subject = _(\"{} - Confirmation d'inscription\").format(settings.ZDS_APP[\"site\"][\"literal_name\"])\n from_email = \"{} <{}>\".format(settings.ZDS_APP[\"site\"][\"literal_name\"], settings.ZDS_APP[\"site\"][\"email_noreply\"])\n context = {\n \"username\": token.user.username,\n \"site_url\": settings.ZDS_APP[\"site\"][\"url\"],\n \"site_name\": settings.ZDS_APP[\"site\"][\"literal_name\"],\n \"url\": settings.ZDS_APP[\"site\"][\"url\"] + token.get_absolute_url(),\n }\n message_html = render_to_string(\"email/member/confirm_registration.html\", context)\n message_txt = render_to_string(\"email/member/confirm_registration.txt\", context)\n\n msg = EmailMultiAlternatives(subject, message_txt, from_email, [token.user.email])\n msg.attach_alternative(message_html, \"text/html\")\n try:\n msg.send()\n except:\n msg = None\n return render(request, \"member/register/success.html\", {})\n", "path": "zds/member/views/register.py"}], "after_files": [{"content": "from datetime import datetime, timedelta\n\nfrom oauth2_provider.models import AccessToken\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth import logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.urls import reverse\nfrom django.db import transaction\nfrom django.db.models import Q\nfrom django.shortcuts import redirect, render, get_object_or_404\nfrom django.template.loader import render_to_string\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.decorators.http import require_POST\nfrom django.views.generic import CreateView, FormView\n\nfrom zds.forum.models import Topic\nfrom zds.gallery.models import UserGallery\nfrom zds.member import NEW_ACCOUNT\nfrom zds.member.commons import (\n ProfileCreate,\n TokenGenerator,\n)\nfrom zds.member.forms import RegisterForm, UsernameAndEmailForm, LoginForm\nfrom zds.member.models import (\n Profile,\n TokenRegister,\n KarmaNote,\n Ban,\n BannedEmailProvider,\n NewEmailProvider,\n)\nfrom zds.member.views import get_client_ip\nfrom zds.mp.models import PrivatePost, PrivateTopic\nfrom zds.tutorialv2.models.database import PickListOperation\nfrom zds.tutorialv2.models.events import Event\nfrom zds.utils.models import (\n Comment,\n CommentVote,\n Alert,\n CommentEdit,\n HatRequest,\n get_hat_from_settings,\n)\nimport logging\n\nfrom zds.mp.utils import send_mp\n\n\nclass RegisterView(CreateView, ProfileCreate, TokenGenerator):\n \"\"\"Create a profile.\"\"\"\n\n form_class = RegisterForm\n template_name = \"member/register/index.html\"\n\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n def get_object(self, queryset=None):\n return get_object_or_404(Profile, user=self.request.user)\n\n def get_form(self, form_class=RegisterForm):\n return form_class()\n\n def post(self, request, *args, **kwargs):\n form = self.form_class(request.POST)\n\n if form.is_valid():\n return self.form_valid(form)\n return render(request, self.template_name, {\"form\": form})\n\n def form_valid(self, form):\n profile = self.create_profile(form.data)\n profile.last_ip_address = get_client_ip(self.request)\n profile.username_skeleton = Profile.find_username_skeleton(profile.user.username)\n self.save_profile(profile)\n token = self.generate_token(profile.user)\n try:\n self.send_email(token, profile.user)\n except Exception as e:\n logging.getLogger(__name__).warning(\"Mail not sent\", exc_info=e)\n messages.warning(self.request, _(\"Impossible d'envoyer l'email.\"))\n self.object = None\n return self.form_invalid(form)\n return render(self.request, self.get_success_template())\n\n def get_success_template(self):\n return \"member/register/success.html\"\n\n\nclass SendValidationEmailView(FormView, TokenGenerator):\n \"\"\"Send a validation email on demand.\"\"\"\n\n form_class = UsernameAndEmailForm\n template_name = \"member/register/send_validation_email.html\"\n\n usr = None\n\n def get_user(self, username, email):\n\n if username:\n self.usr = get_object_or_404(User, username=username)\n\n elif email:\n self.usr = get_object_or_404(User, email=email)\n\n def get_form(self, form_class=UsernameAndEmailForm):\n return form_class()\n\n def post(self, request, *args, **kwargs):\n form = self.form_class(request.POST)\n\n if form.is_valid():\n # Fetch the user\n self.get_user(form.data[\"username\"], form.data[\"email\"])\n\n # User should not already be active\n if not self.usr.is_active:\n return self.form_valid(form)\n else:\n if form.data[\"username\"]:\n form.errors[\"username\"] = form.error_class([self.get_error_message()])\n else:\n form.errors[\"email\"] = form.error_class([self.get_error_message()])\n\n return render(request, self.template_name, {\"form\": form})\n\n def form_valid(self, form):\n # Delete old token\n token = TokenRegister.objects.filter(user=self.usr)\n if token.count() >= 1:\n token.all().delete()\n\n # Generate new token and send email\n token = self.generate_token(self.usr)\n try:\n self.send_email(token, self.usr)\n except Exception as e:\n logging.getLogger(__name__).warning(\"Mail not sent\", exc_info=e)\n messages.warning(_(\"Impossible d'envoyer l'email.\"))\n return self.form_invalid(form)\n\n return render(self.request, self.get_success_template())\n\n def get_success_template(self):\n return \"member/register/send_validation_email_success.html\"\n\n def get_error_message(self):\n return _(\"Le compte est d\u00e9j\u00e0 activ\u00e9.\")\n\n\n@login_required\ndef warning_unregister(request):\n \"\"\"\n Display a warning page showing what will happen when the user\n unregisters.\n \"\"\"\n return render(request, \"member/settings/unregister.html\", {\"user\": request.user})\n\n\n@login_required\n@require_POST\[email protected]\ndef unregister(request):\n \"\"\"Allow members to unregister.\"\"\"\n\n anonymous = get_object_or_404(User, username=settings.ZDS_APP[\"member\"][\"anonymous_account\"])\n external = get_object_or_404(User, username=settings.ZDS_APP[\"member\"][\"external_account\"])\n current = request.user\n # Nota : as of v21 all about content paternity is held by a proper receiver in zds.tutorialv2.models.database\n PickListOperation.objects.filter(staff_user=current).update(staff_user=anonymous)\n PickListOperation.objects.filter(canceler_user=current).update(canceler_user=anonymous)\n\n Event.objects.filter(performer=current).update(performer=external)\n Event.objects.filter(author=current).update(author=external)\n Event.objects.filter(contributor=current).update(contributor=external)\n\n # Comments likes / dislikes\n votes = CommentVote.objects.filter(user=current)\n for vote in votes:\n if vote.positive:\n vote.comment.like -= 1\n else:\n vote.comment.dislike -= 1\n vote.comment.save()\n votes.delete()\n # All contents anonymization\n Comment.objects.filter(author=current).update(author=anonymous)\n PrivatePost.objects.filter(author=current).update(author=anonymous)\n CommentEdit.objects.filter(editor=current).update(editor=anonymous)\n CommentEdit.objects.filter(deleted_by=current).update(deleted_by=anonymous)\n # Karma notes, alerts and sanctions anonymization (to keep them)\n KarmaNote.objects.filter(moderator=current).update(moderator=anonymous)\n Ban.objects.filter(moderator=current).update(moderator=anonymous)\n Alert.objects.filter(author=current).update(author=anonymous)\n Alert.objects.filter(moderator=current).update(moderator=anonymous)\n BannedEmailProvider.objects.filter(moderator=current).update(moderator=anonymous)\n # Solved hat requests anonymization\n HatRequest.objects.filter(moderator=current).update(moderator=anonymous)\n # In case current user has been moderator in the past\n Comment.objects.filter(editor=current).update(editor=anonymous)\n for topic in PrivateTopic.objects.filter(Q(author=current) | Q(participants__in=[current])):\n if topic.one_participant_remaining():\n topic.delete()\n else:\n topic.remove_participant(current)\n topic.save()\n Topic.objects.filter(solved_by=current).update(solved_by=anonymous)\n Topic.objects.filter(author=current).update(author=anonymous)\n\n # Any content exclusively owned by the unregistering member will\n # be deleted just before the User object (using a pre_delete\n # receiver).\n #\n # Regarding galleries, there are two cases:\n #\n # - \"personal galleries\" with one owner (the unregistering\n # user). The user's ownership is removed and replaced by an\n # anonymous user in order not to lost the gallery.\n #\n # - \"personal galleries\" with many other owners. It is safe to\n # remove the user's ownership, the gallery won't be lost.\n\n galleries = UserGallery.objects.filter(user=current)\n for gallery in galleries:\n if gallery.gallery.get_linked_users().count() == 1:\n anonymous_gallery = UserGallery()\n anonymous_gallery.user = external\n anonymous_gallery.mode = \"w\"\n anonymous_gallery.gallery = gallery.gallery\n anonymous_gallery.save()\n galleries.delete()\n\n # Remove API access (tokens + applications)\n for token in AccessToken.objects.filter(user=current):\n token.revoke()\n\n logout(request)\n User.objects.filter(pk=current.pk).delete()\n return redirect(reverse(\"homepage\"))\n\n\ndef activate_account(request):\n \"\"\"Activate an account with a token.\"\"\"\n try:\n token = request.GET[\"token\"]\n except KeyError:\n return redirect(reverse(\"homepage\"))\n token = get_object_or_404(TokenRegister, token=token)\n usr = token.user\n\n # User can't confirm their request if their account is already active\n if usr.is_active:\n return render(request, \"member/register/token_already_used.html\")\n\n # User can't confirm their request if it is too late\n if datetime.now() > token.date_end:\n return render(request, \"member/register/token_failed.html\", {\"token\": token})\n usr.is_active = True\n usr.save()\n\n # Send welcome message\n bot = get_object_or_404(User, username=settings.ZDS_APP[\"member\"][\"bot_account\"])\n msg = render_to_string(\n \"member/messages/account_activated.md\",\n {\n \"username\": usr.username,\n \"site_name\": settings.ZDS_APP[\"site\"][\"literal_name\"],\n \"library_url\": settings.ZDS_APP[\"site\"][\"url\"] + reverse(\"publication:list\"),\n \"opinions_url\": settings.ZDS_APP[\"site\"][\"url\"] + reverse(\"opinion:list\"),\n \"forums_url\": settings.ZDS_APP[\"site\"][\"url\"] + reverse(\"forum:cats-forums-list\"),\n },\n )\n\n send_mp(\n bot,\n [usr],\n _(\"Bienvenue sur {}\").format(settings.ZDS_APP[\"site\"][\"literal_name\"]),\n _(\"Le manuel du nouveau membre\"),\n msg,\n send_by_mail=False,\n leave=True,\n direct=False,\n hat=get_hat_from_settings(\"moderation\"),\n )\n token.delete()\n\n # Create an alert for the staff if it's a new provider\n if usr.email:\n provider = usr.email.split(\"@\")[-1].lower()\n if (\n not NewEmailProvider.objects.filter(provider=provider).exists()\n and not User.objects.filter(email__iendswith=f\"@{provider}\").exclude(pk=usr.pk).exists()\n ):\n NewEmailProvider.objects.create(user=usr, provider=provider, use=NEW_ACCOUNT)\n\n form = LoginForm(initial={\"username\": usr.username})\n return render(request, \"member/register/token_success.html\", {\"usr\": usr, \"form\": form})\n\n\ndef generate_token_account(request):\n \"\"\"Generate a token for an account.\"\"\"\n\n try:\n token = request.GET[\"token\"]\n except KeyError:\n return redirect(reverse(\"homepage\"))\n token = get_object_or_404(TokenRegister, token=token)\n\n # Push date\n\n date_end = datetime.now() + timedelta(days=0, hours=1, minutes=0, seconds=0)\n token.date_end = date_end\n token.save()\n\n # Send email\n subject = _(\"{} - Confirmation d'inscription\").format(settings.ZDS_APP[\"site\"][\"literal_name\"])\n from_email = \"{} <{}>\".format(settings.ZDS_APP[\"site\"][\"literal_name\"], settings.ZDS_APP[\"site\"][\"email_noreply\"])\n context = {\n \"username\": token.user.username,\n \"site_url\": settings.ZDS_APP[\"site\"][\"url\"],\n \"site_name\": settings.ZDS_APP[\"site\"][\"literal_name\"],\n \"url\": settings.ZDS_APP[\"site\"][\"url\"] + token.get_absolute_url(),\n }\n message_html = render_to_string(\"email/member/confirm_registration.html\", context)\n message_txt = render_to_string(\"email/member/confirm_registration.txt\", context)\n\n msg = EmailMultiAlternatives(subject, message_txt, from_email, [token.user.email])\n msg.attach_alternative(message_html, \"text/html\")\n try:\n msg.send()\n except:\n msg = None\n return render(request, \"member/register/success.html\", {})\n", "path": "zds/member/views/register.py"}]} | 4,027 | 265 |
gh_patches_debug_30340 | rasdani/github-patches | git_diff | acl-org__acl-anthology-1422 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Adding Anthology ID 2021.naacl-srw.21
## Revision or erratum: please add the following information**
- [x] I have attached the revised PDF or erratum to this issue
Hi! I (one of the NAACL SRW chairs) mistakenly omitted this paper when compiling the proceedings. Would it be possible to add it as a new entry `2021.naacl-srw.21` ?
The paper is attached to this issue.
The title is: `Towards Multi-Modal Text-Image Retrieval to improve Human Reading`
The authors are: `Florian Schneider, Özge Alacam, Xintong Wang, Chris Biemann`
[37_Final_Paper_PDF.pdf](https://github.com/acl-org/acl-anthology/files/6704101/37_Final_Paper_PDF.pdf)
Thank you so much!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bin/likely_name_split.py`
Content:
```
1 #!/usr/bin/env python3
2 # Daniel Gildea, 2020
3
4 """Usage: likely_name_split.py [--importdir=DIR]
5
6 Counts first and last names in anthology.
7 Predicts best split into first and last.
8 Checks whether current names match our predictions.
9
10 Options:
11 --importdir=DIR Directory to import XML files from. [default: {scriptdir}/../data/]
12 -h, --help Display this helpful text.
13 """
14
15 from collections import defaultdict
16 from docopt import docopt
17 import pickle, json
18 import sys
19 import re
20 import os
21 from math import *
22
23 from anthology import Anthology
24 from anthology.people import PersonName
25
26
27 class NameSplitter:
28 def __init__(self, anthology=None, anthology_dir=None):
29 # counts of how often each name appears
30 self.first_count = defaultdict(lambda: 0) # "Maria" "Victoria"
31 self.first_full_count = defaultdict(lambda: 0) # "Maria Victoria"
32 self.last_count = defaultdict(lambda: 0) # "van" "den" "Bosch"
33 self.last_full_count = defaultdict(lambda: 0) # "van den Bosch"
34 self.first_total = 0
35 self.last_total = 0
36
37 if os.path.exists("names.cache"):
38 self.load_cache()
39 else:
40 if anthology is None and anthology_dir is not None:
41 anthology = Anthology(os.path.join(anthology_dir, "data"))
42 self.count_names(anthology)
43 self.dump_cache()
44
45 def load_cache(self):
46 with open("names.cache", "r") as cache:
47 p = json.load(cache)
48 self.first_count = defaultdict(int, p["first_count"])
49 self.first_full_count = defaultdict(int, p["first_full_count"])
50 self.first_total = p["first_total"]
51 self.last_count = defaultdict(int, p["last_count"])
52 self.last_full_count = defaultdict(int, p["last_full_count"])
53 self.last_total = p["last_total"]
54 print(f"Loaded cache from names.cache", file=sys.stderr)
55
56 def dump_cache(self):
57 with open("names.cache", "w") as cache:
58 p = {
59 "first_count": self.first_count,
60 "first_full_count": self.first_full_count,
61 "first_total": self.first_total,
62 "last_count": self.last_count,
63 "last_full_count": self.last_full_count,
64 "last_total": self.last_total,
65 }
66 print(json.dumps(p), file=cache)
67 print(f"Dumped counts to names.cache", file=sys.stderr)
68
69 # counts names in anthology database into global vars
70 # first_count last_count (dicts)
71 # first_full_count last_full_count (dicts)
72 # first_total last_total (floats)
73 def count_names(self, anthology):
74 for person in anthology.people.personids():
75 name = anthology.people.get_canonical_name(person)
76 num_papers = len(anthology.people.get_papers(person)) + 0.0
77 # print(name.last, ", ", name.first, num_papers)
78 for w in name.first.split(" "):
79 self.first_count[w] += num_papers
80 self.first_full_count[name.first] += num_papers
81 self.first_total += num_papers
82
83 for w in name.last.split(" "):
84 self.last_count[w] += num_papers
85 self.last_full_count[name.last] += num_papers
86 self.last_total += num_papers
87
88 # takes "Maria Victoria Lopez Gonzalez"
89 # returns ("Lopez Gonzalez", "Maria Victoria")
90 # uses counts of words in first and last names in current database
91 def best_split(self, name):
92 if "," in name and not "Jr." in name:
93 # Short-circuit names that are already split
94 # comma in "William Baumgartner, Jr." does not count as a split
95 surname, given_names = name.split(",")
96 return (surname.strip(), given_names.strip())
97
98 words = name.split(" ")
99 best_score = -inf
100 best = ("", "")
101 # loop over possible split points between first/last
102 for i in range(1, len(words)): # at least one word in each part
103 first = " ".join(words[0:i])
104 last = " ".join(words[i:])
105 # max of log prob of "Maria Victoria" and
106 # log prob of "Maria" + log prob of "Victoria"
107 first_probs = [
108 log((self.first_count[x] + 0.01) / self.first_total) for x in words[0:i]
109 ]
110 first_score = max(
111 log((self.first_full_count[first] + 0.000001) / self.first_total),
112 sum(first_probs),
113 )
114 last_probs = [
115 log((self.last_count[x] + 0.01) / self.last_total) for x in words[i:]
116 ]
117 last_score = max(
118 log((self.last_full_count[last] + 0.000001) / self.last_total),
119 sum(last_probs),
120 )
121
122 if first_score + last_score > best_score:
123 best_score = first_score + last_score
124 best = (last, first)
125 # end of loop over split points
126 return best
127
128
129 if __name__ == "__main__":
130 args = docopt(__doc__)
131 scriptdir = os.path.dirname(os.path.abspath(__file__))
132 if "{scriptdir}" in args["--importdir"]:
133 args["--importdir"] = os.path.abspath(
134 args["--importdir"].format(scriptdir=scriptdir)
135 )
136
137 anthology = Anthology(importdir=args["--importdir"])
138 splitter = NameSplitter(anthology)
139
140 # for all names currently in anthology,
141 # see if they match what we predict
142 for person in anthology.people.personids():
143 name = anthology.people.get_canonical_name(person)
144
145 # find our prediction of split
146 best = splitter.best_split(name.first + " " + name.last)
147
148 # if current split does not match our prediction
149 if not (best[0] == name.last and best[1] == name.first):
150 # print suggested replacement
151 print(name.last, ",", name.first, " ==> ", best[0], ",", best[1])
152
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bin/likely_name_split.py b/bin/likely_name_split.py
--- a/bin/likely_name_split.py
+++ b/bin/likely_name_split.py
@@ -24,6 +24,13 @@
from anthology.people import PersonName
+def log0(x):
+ if x == 0:
+ return -inf
+ else:
+ return log(x)
+
+
class NameSplitter:
def __init__(self, anthology=None, anthology_dir=None):
# counts of how often each name appears
@@ -105,18 +112,21 @@
# max of log prob of "Maria Victoria" and
# log prob of "Maria" + log prob of "Victoria"
first_probs = [
- log((self.first_count[x] + 0.01) / self.first_total) for x in words[0:i]
+ # more smoothing for first than last name,
+ # so that default is one-word last name when all counts are zero
+ log((self.first_count[x] + 0.1) / self.first_total)
+ for x in words[0:i]
]
first_score = max(
- log((self.first_full_count[first] + 0.000001) / self.first_total),
+ # no smoothing for multiword name: log(0) => -inf
+ log0((self.first_full_count[first]) / self.first_total),
sum(first_probs),
)
last_probs = [
log((self.last_count[x] + 0.01) / self.last_total) for x in words[i:]
]
last_score = max(
- log((self.last_full_count[last] + 0.000001) / self.last_total),
- sum(last_probs),
+ log0((self.last_full_count[last]) / self.last_total), sum(last_probs)
)
if first_score + last_score > best_score:
| {"golden_diff": "diff --git a/bin/likely_name_split.py b/bin/likely_name_split.py\n--- a/bin/likely_name_split.py\n+++ b/bin/likely_name_split.py\n@@ -24,6 +24,13 @@\n from anthology.people import PersonName\n \n \n+def log0(x):\n+ if x == 0:\n+ return -inf\n+ else:\n+ return log(x)\n+\n+\n class NameSplitter:\n def __init__(self, anthology=None, anthology_dir=None):\n # counts of how often each name appears\n@@ -105,18 +112,21 @@\n # max of log prob of \"Maria Victoria\" and\n # log prob of \"Maria\" + log prob of \"Victoria\"\n first_probs = [\n- log((self.first_count[x] + 0.01) / self.first_total) for x in words[0:i]\n+ # more smoothing for first than last name,\n+ # so that default is one-word last name when all counts are zero\n+ log((self.first_count[x] + 0.1) / self.first_total)\n+ for x in words[0:i]\n ]\n first_score = max(\n- log((self.first_full_count[first] + 0.000001) / self.first_total),\n+ # no smoothing for multiword name: log(0) => -inf\n+ log0((self.first_full_count[first]) / self.first_total),\n sum(first_probs),\n )\n last_probs = [\n log((self.last_count[x] + 0.01) / self.last_total) for x in words[i:]\n ]\n last_score = max(\n- log((self.last_full_count[last] + 0.000001) / self.last_total),\n- sum(last_probs),\n+ log0((self.last_full_count[last]) / self.last_total), sum(last_probs)\n )\n \n if first_score + last_score > best_score:\n", "issue": "Adding Anthology ID 2021.naacl-srw.21\n## Revision or erratum: please add the following information**\r\n\r\n- [x] I have attached the revised PDF or erratum to this issue\r\n\r\nHi! I (one of the NAACL SRW chairs) mistakenly omitted this paper when compiling the proceedings. Would it be possible to add it as a new entry `2021.naacl-srw.21` ?\r\n\r\nThe paper is attached to this issue.\r\nThe title is: `Towards Multi-Modal Text-Image Retrieval to improve Human Reading`\r\nThe authors are: `Florian Schneider, \u00d6zge Alacam, Xintong Wang, Chris Biemann`\r\n[37_Final_Paper_PDF.pdf](https://github.com/acl-org/acl-anthology/files/6704101/37_Final_Paper_PDF.pdf)\r\n\r\nThank you so much!\n", "before_files": [{"content": "#!/usr/bin/env python3\n# Daniel Gildea, 2020\n\n\"\"\"Usage: likely_name_split.py [--importdir=DIR]\n\nCounts first and last names in anthology.\nPredicts best split into first and last.\nChecks whether current names match our predictions.\n\nOptions:\n --importdir=DIR Directory to import XML files from. [default: {scriptdir}/../data/]\n -h, --help Display this helpful text.\n\"\"\"\n\nfrom collections import defaultdict\nfrom docopt import docopt\nimport pickle, json\nimport sys\nimport re\nimport os\nfrom math import *\n\nfrom anthology import Anthology\nfrom anthology.people import PersonName\n\n\nclass NameSplitter:\n def __init__(self, anthology=None, anthology_dir=None):\n # counts of how often each name appears\n self.first_count = defaultdict(lambda: 0) # \"Maria\" \"Victoria\"\n self.first_full_count = defaultdict(lambda: 0) # \"Maria Victoria\"\n self.last_count = defaultdict(lambda: 0) # \"van\" \"den\" \"Bosch\"\n self.last_full_count = defaultdict(lambda: 0) # \"van den Bosch\"\n self.first_total = 0\n self.last_total = 0\n\n if os.path.exists(\"names.cache\"):\n self.load_cache()\n else:\n if anthology is None and anthology_dir is not None:\n anthology = Anthology(os.path.join(anthology_dir, \"data\"))\n self.count_names(anthology)\n self.dump_cache()\n\n def load_cache(self):\n with open(\"names.cache\", \"r\") as cache:\n p = json.load(cache)\n self.first_count = defaultdict(int, p[\"first_count\"])\n self.first_full_count = defaultdict(int, p[\"first_full_count\"])\n self.first_total = p[\"first_total\"]\n self.last_count = defaultdict(int, p[\"last_count\"])\n self.last_full_count = defaultdict(int, p[\"last_full_count\"])\n self.last_total = p[\"last_total\"]\n print(f\"Loaded cache from names.cache\", file=sys.stderr)\n\n def dump_cache(self):\n with open(\"names.cache\", \"w\") as cache:\n p = {\n \"first_count\": self.first_count,\n \"first_full_count\": self.first_full_count,\n \"first_total\": self.first_total,\n \"last_count\": self.last_count,\n \"last_full_count\": self.last_full_count,\n \"last_total\": self.last_total,\n }\n print(json.dumps(p), file=cache)\n print(f\"Dumped counts to names.cache\", file=sys.stderr)\n\n # counts names in anthology database into global vars\n # first_count last_count (dicts)\n # first_full_count last_full_count (dicts)\n # first_total last_total (floats)\n def count_names(self, anthology):\n for person in anthology.people.personids():\n name = anthology.people.get_canonical_name(person)\n num_papers = len(anthology.people.get_papers(person)) + 0.0\n # print(name.last, \", \", name.first, num_papers)\n for w in name.first.split(\" \"):\n self.first_count[w] += num_papers\n self.first_full_count[name.first] += num_papers\n self.first_total += num_papers\n\n for w in name.last.split(\" \"):\n self.last_count[w] += num_papers\n self.last_full_count[name.last] += num_papers\n self.last_total += num_papers\n\n # takes \"Maria Victoria Lopez Gonzalez\"\n # returns (\"Lopez Gonzalez\", \"Maria Victoria\")\n # uses counts of words in first and last names in current database\n def best_split(self, name):\n if \",\" in name and not \"Jr.\" in name:\n # Short-circuit names that are already split\n # comma in \"William Baumgartner, Jr.\" does not count as a split\n surname, given_names = name.split(\",\")\n return (surname.strip(), given_names.strip())\n\n words = name.split(\" \")\n best_score = -inf\n best = (\"\", \"\")\n # loop over possible split points between first/last\n for i in range(1, len(words)): # at least one word in each part\n first = \" \".join(words[0:i])\n last = \" \".join(words[i:])\n # max of log prob of \"Maria Victoria\" and\n # log prob of \"Maria\" + log prob of \"Victoria\"\n first_probs = [\n log((self.first_count[x] + 0.01) / self.first_total) for x in words[0:i]\n ]\n first_score = max(\n log((self.first_full_count[first] + 0.000001) / self.first_total),\n sum(first_probs),\n )\n last_probs = [\n log((self.last_count[x] + 0.01) / self.last_total) for x in words[i:]\n ]\n last_score = max(\n log((self.last_full_count[last] + 0.000001) / self.last_total),\n sum(last_probs),\n )\n\n if first_score + last_score > best_score:\n best_score = first_score + last_score\n best = (last, first)\n # end of loop over split points\n return best\n\n\nif __name__ == \"__main__\":\n args = docopt(__doc__)\n scriptdir = os.path.dirname(os.path.abspath(__file__))\n if \"{scriptdir}\" in args[\"--importdir\"]:\n args[\"--importdir\"] = os.path.abspath(\n args[\"--importdir\"].format(scriptdir=scriptdir)\n )\n\n anthology = Anthology(importdir=args[\"--importdir\"])\n splitter = NameSplitter(anthology)\n\n # for all names currently in anthology,\n # see if they match what we predict\n for person in anthology.people.personids():\n name = anthology.people.get_canonical_name(person)\n\n # find our prediction of split\n best = splitter.best_split(name.first + \" \" + name.last)\n\n # if current split does not match our prediction\n if not (best[0] == name.last and best[1] == name.first):\n # print suggested replacement\n print(name.last, \",\", name.first, \" ==> \", best[0], \",\", best[1])\n", "path": "bin/likely_name_split.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# Daniel Gildea, 2020\n\n\"\"\"Usage: likely_name_split.py [--importdir=DIR]\n\nCounts first and last names in anthology.\nPredicts best split into first and last.\nChecks whether current names match our predictions.\n\nOptions:\n --importdir=DIR Directory to import XML files from. [default: {scriptdir}/../data/]\n -h, --help Display this helpful text.\n\"\"\"\n\nfrom collections import defaultdict\nfrom docopt import docopt\nimport pickle, json\nimport sys\nimport re\nimport os\nfrom math import *\n\nfrom anthology import Anthology\nfrom anthology.people import PersonName\n\n\ndef log0(x):\n if x == 0:\n return -inf\n else:\n return log(x)\n\n\nclass NameSplitter:\n def __init__(self, anthology=None, anthology_dir=None):\n # counts of how often each name appears\n self.first_count = defaultdict(lambda: 0) # \"Maria\" \"Victoria\"\n self.first_full_count = defaultdict(lambda: 0) # \"Maria Victoria\"\n self.last_count = defaultdict(lambda: 0) # \"van\" \"den\" \"Bosch\"\n self.last_full_count = defaultdict(lambda: 0) # \"van den Bosch\"\n self.first_total = 0\n self.last_total = 0\n\n if os.path.exists(\"names.cache\"):\n self.load_cache()\n else:\n if anthology is None and anthology_dir is not None:\n anthology = Anthology(os.path.join(anthology_dir, \"data\"))\n self.count_names(anthology)\n self.dump_cache()\n\n def load_cache(self):\n with open(\"names.cache\", \"r\") as cache:\n p = json.load(cache)\n self.first_count = defaultdict(int, p[\"first_count\"])\n self.first_full_count = defaultdict(int, p[\"first_full_count\"])\n self.first_total = p[\"first_total\"]\n self.last_count = defaultdict(int, p[\"last_count\"])\n self.last_full_count = defaultdict(int, p[\"last_full_count\"])\n self.last_total = p[\"last_total\"]\n print(f\"Loaded cache from names.cache\", file=sys.stderr)\n\n def dump_cache(self):\n with open(\"names.cache\", \"w\") as cache:\n p = {\n \"first_count\": self.first_count,\n \"first_full_count\": self.first_full_count,\n \"first_total\": self.first_total,\n \"last_count\": self.last_count,\n \"last_full_count\": self.last_full_count,\n \"last_total\": self.last_total,\n }\n print(json.dumps(p), file=cache)\n print(f\"Dumped counts to names.cache\", file=sys.stderr)\n\n # counts names in anthology database into global vars\n # first_count last_count (dicts)\n # first_full_count last_full_count (dicts)\n # first_total last_total (floats)\n def count_names(self, anthology):\n for person in anthology.people.personids():\n name = anthology.people.get_canonical_name(person)\n num_papers = len(anthology.people.get_papers(person)) + 0.0\n # print(name.last, \", \", name.first, num_papers)\n for w in name.first.split(\" \"):\n self.first_count[w] += num_papers\n self.first_full_count[name.first] += num_papers\n self.first_total += num_papers\n\n for w in name.last.split(\" \"):\n self.last_count[w] += num_papers\n self.last_full_count[name.last] += num_papers\n self.last_total += num_papers\n\n # takes \"Maria Victoria Lopez Gonzalez\"\n # returns (\"Lopez Gonzalez\", \"Maria Victoria\")\n # uses counts of words in first and last names in current database\n def best_split(self, name):\n if \",\" in name and not \"Jr.\" in name:\n # Short-circuit names that are already split\n # comma in \"William Baumgartner, Jr.\" does not count as a split\n surname, given_names = name.split(\",\")\n return (surname.strip(), given_names.strip())\n\n words = name.split(\" \")\n best_score = -inf\n best = (\"\", \"\")\n # loop over possible split points between first/last\n for i in range(1, len(words)): # at least one word in each part\n first = \" \".join(words[0:i])\n last = \" \".join(words[i:])\n # max of log prob of \"Maria Victoria\" and\n # log prob of \"Maria\" + log prob of \"Victoria\"\n first_probs = [\n # more smoothing for first than last name,\n # so that default is one-word last name when all counts are zero\n log((self.first_count[x] + 0.1) / self.first_total)\n for x in words[0:i]\n ]\n first_score = max(\n # no smoothing for multiword name: log(0) => -inf\n log0((self.first_full_count[first]) / self.first_total),\n sum(first_probs),\n )\n last_probs = [\n log((self.last_count[x] + 0.01) / self.last_total) for x in words[i:]\n ]\n last_score = max(\n log0((self.last_full_count[last]) / self.last_total), sum(last_probs)\n )\n\n if first_score + last_score > best_score:\n best_score = first_score + last_score\n best = (last, first)\n # end of loop over split points\n return best\n\n\nif __name__ == \"__main__\":\n args = docopt(__doc__)\n scriptdir = os.path.dirname(os.path.abspath(__file__))\n if \"{scriptdir}\" in args[\"--importdir\"]:\n args[\"--importdir\"] = os.path.abspath(\n args[\"--importdir\"].format(scriptdir=scriptdir)\n )\n\n anthology = Anthology(importdir=args[\"--importdir\"])\n splitter = NameSplitter(anthology)\n\n # for all names currently in anthology,\n # see if they match what we predict\n for person in anthology.people.personids():\n name = anthology.people.get_canonical_name(person)\n\n # find our prediction of split\n best = splitter.best_split(name.first + \" \" + name.last)\n\n # if current split does not match our prediction\n if not (best[0] == name.last and best[1] == name.first):\n # print suggested replacement\n print(name.last, \",\", name.first, \" ==> \", best[0], \",\", best[1])\n", "path": "bin/likely_name_split.py"}]} | 2,171 | 434 |
gh_patches_debug_15957 | rasdani/github-patches | git_diff | ansible__ansible-16239 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Chroot doesn't work with busybox
With busybox, `/bin/sh` can be a relative symlink, and won't resolve correctly prior to `chroot()`.
```
PLAY ***************************************************************************
TASK [setup] *******************************************************************
An exception occurred during task execution. To see the full traceback, use -vvv. The error was: OSError: [Errno 2] No such file or directory: '/rootfs/bin/sh'
fatal: [/rootfs]: FAILED! => {"failed": true, "stdout": ""}
NO MORE HOSTS LEFT *************************************************************
PLAY RECAP *********************************************************************
/rootfs : ok=0 changed=0 unreachable=0 failed=1
```
```
# ls -la /rootfs/bin/sh
lrwxrwxrwx 1 root root 12 Jun 1 22:17 /rootfs/bin/sh -> /bin/busybox
```
A sensible solution for this would be through an option to override [the hardcoded shell path](https://github.com/ansible/ansible/blob/v2.0.0.1-1/lib/ansible/plugins/connection/chroot.py#L67-L68), or something like that.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/ansible/plugins/connection/chroot.py`
Content:
```
1 # Based on local.py (c) 2012, Michael DeHaan <[email protected]>
2 # (c) 2013, Maykel Moya <[email protected]>
3 # (c) 2015, Toshio Kuratomi <[email protected]>
4 #
5 # This file is part of Ansible
6 #
7 # Ansible is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # Ansible is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
19 from __future__ import (absolute_import, division, print_function)
20 __metaclass__ = type
21
22 import distutils.spawn
23 import os
24 import os.path
25 import pipes
26 import subprocess
27 import traceback
28
29 from ansible import constants as C
30 from ansible.errors import AnsibleError
31 from ansible.plugins.connection import ConnectionBase, BUFSIZE
32 from ansible.module_utils.basic import is_executable
33 from ansible.utils.unicode import to_bytes
34
35 try:
36 from __main__ import display
37 except ImportError:
38 from ansible.utils.display import Display
39 display = Display()
40
41
42 class Connection(ConnectionBase):
43 ''' Local chroot based connections '''
44
45 transport = 'chroot'
46 has_pipelining = True
47 # su currently has an undiagnosed issue with calculating the file
48 # checksums (so copy, for instance, doesn't work right)
49 # Have to look into that before re-enabling this
50 become_methods = frozenset(C.BECOME_METHODS).difference(('su',))
51
52 def __init__(self, play_context, new_stdin, *args, **kwargs):
53 super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
54
55 self.chroot = self._play_context.remote_addr
56
57 if os.geteuid() != 0:
58 raise AnsibleError("chroot connection requires running as root")
59
60 # we're running as root on the local system so do some
61 # trivial checks for ensuring 'host' is actually a chroot'able dir
62 if not os.path.isdir(self.chroot):
63 raise AnsibleError("%s is not a directory" % self.chroot)
64
65 chrootsh = os.path.join(self.chroot, 'bin/sh')
66 if not is_executable(chrootsh):
67 raise AnsibleError("%s does not look like a chrootable dir (/bin/sh missing)" % self.chroot)
68
69 self.chroot_cmd = distutils.spawn.find_executable('chroot')
70 if not self.chroot_cmd:
71 raise AnsibleError("chroot command not found in PATH")
72
73 def _connect(self):
74 ''' connect to the chroot; nothing to do here '''
75 super(Connection, self)._connect()
76 if not self._connected:
77 display.vvv("THIS IS A LOCAL CHROOT DIR", host=self.chroot)
78 self._connected = True
79
80 def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE):
81 ''' run a command on the chroot. This is only needed for implementing
82 put_file() get_file() so that we don't have to read the whole file
83 into memory.
84
85 compared to exec_command() it looses some niceties like being able to
86 return the process's exit code immediately.
87 '''
88 executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh'
89 local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd]
90
91 display.vvv("EXEC %s" % (local_cmd), host=self.chroot)
92 local_cmd = [to_bytes(i, errors='strict') for i in local_cmd]
93 p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
94 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
95
96 return p
97
98 def exec_command(self, cmd, in_data=None, sudoable=False):
99 ''' run a command on the chroot '''
100 super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
101
102 p = self._buffered_exec_command(cmd)
103
104 stdout, stderr = p.communicate(in_data)
105 return (p.returncode, stdout, stderr)
106
107 def _prefix_login_path(self, remote_path):
108 ''' Make sure that we put files into a standard path
109
110 If a path is relative, then we need to choose where to put it.
111 ssh chooses $HOME but we aren't guaranteed that a home dir will
112 exist in any given chroot. So for now we're choosing "/" instead.
113 This also happens to be the former default.
114
115 Can revisit using $HOME instead if it's a problem
116 '''
117 if not remote_path.startswith(os.path.sep):
118 remote_path = os.path.join(os.path.sep, remote_path)
119 return os.path.normpath(remote_path)
120
121 def put_file(self, in_path, out_path):
122 ''' transfer a file from local to chroot '''
123 super(Connection, self).put_file(in_path, out_path)
124 display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.chroot)
125
126 out_path = pipes.quote(self._prefix_login_path(out_path))
127 try:
128 with open(to_bytes(in_path, errors='strict'), 'rb') as in_file:
129 try:
130 p = self._buffered_exec_command('dd of=%s bs=%s' % (out_path, BUFSIZE), stdin=in_file)
131 except OSError:
132 raise AnsibleError("chroot connection requires dd command in the chroot")
133 try:
134 stdout, stderr = p.communicate()
135 except:
136 traceback.print_exc()
137 raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
138 if p.returncode != 0:
139 raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
140 except IOError:
141 raise AnsibleError("file or module does not exist at: %s" % in_path)
142
143 def fetch_file(self, in_path, out_path):
144 ''' fetch a file from chroot to local '''
145 super(Connection, self).fetch_file(in_path, out_path)
146 display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.chroot)
147
148 in_path = pipes.quote(self._prefix_login_path(in_path))
149 try:
150 p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE))
151 except OSError:
152 raise AnsibleError("chroot connection requires dd command in the chroot")
153
154 with open(to_bytes(out_path, errors='strict'), 'wb+') as out_file:
155 try:
156 chunk = p.stdout.read(BUFSIZE)
157 while chunk:
158 out_file.write(chunk)
159 chunk = p.stdout.read(BUFSIZE)
160 except:
161 traceback.print_exc()
162 raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
163 stdout, stderr = p.communicate()
164 if p.returncode != 0:
165 raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
166
167 def close(self):
168 ''' terminate the connection; nothing to do here '''
169 super(Connection, self).close()
170 self._connected = False
171
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/ansible/plugins/connection/chroot.py b/lib/ansible/plugins/connection/chroot.py
--- a/lib/ansible/plugins/connection/chroot.py
+++ b/lib/ansible/plugins/connection/chroot.py
@@ -63,7 +63,11 @@
raise AnsibleError("%s is not a directory" % self.chroot)
chrootsh = os.path.join(self.chroot, 'bin/sh')
- if not is_executable(chrootsh):
+ # Want to check for a usable bourne shell inside the chroot.
+ # is_executable() == True is sufficient. For symlinks it
+ # gets really complicated really fast. So we punt on finding that
+ # out. As long as it's a symlink we assume that it will work
+ if not (is_executable(chrootsh) or (os.path.lexists(chrootsh) and os.path.islink(chrootsh))):
raise AnsibleError("%s does not look like a chrootable dir (/bin/sh missing)" % self.chroot)
self.chroot_cmd = distutils.spawn.find_executable('chroot')
| {"golden_diff": "diff --git a/lib/ansible/plugins/connection/chroot.py b/lib/ansible/plugins/connection/chroot.py\n--- a/lib/ansible/plugins/connection/chroot.py\n+++ b/lib/ansible/plugins/connection/chroot.py\n@@ -63,7 +63,11 @@\n raise AnsibleError(\"%s is not a directory\" % self.chroot)\n \n chrootsh = os.path.join(self.chroot, 'bin/sh')\n- if not is_executable(chrootsh):\n+ # Want to check for a usable bourne shell inside the chroot.\n+ # is_executable() == True is sufficient. For symlinks it\n+ # gets really complicated really fast. So we punt on finding that\n+ # out. As long as it's a symlink we assume that it will work\n+ if not (is_executable(chrootsh) or (os.path.lexists(chrootsh) and os.path.islink(chrootsh))):\n raise AnsibleError(\"%s does not look like a chrootable dir (/bin/sh missing)\" % self.chroot)\n \n self.chroot_cmd = distutils.spawn.find_executable('chroot')\n", "issue": "Chroot doesn't work with busybox\nWith busybox, `/bin/sh` can be a relative symlink, and won't resolve correctly prior to `chroot()`.\n\n```\nPLAY ***************************************************************************\n\nTASK [setup] *******************************************************************\nAn exception occurred during task execution. To see the full traceback, use -vvv. The error was: OSError: [Errno 2] No such file or directory: '/rootfs/bin/sh'\nfatal: [/rootfs]: FAILED! => {\"failed\": true, \"stdout\": \"\"}\n\nNO MORE HOSTS LEFT *************************************************************\n\nPLAY RECAP *********************************************************************\n/rootfs : ok=0 changed=0 unreachable=0 failed=1 \n```\n\n```\n# ls -la /rootfs/bin/sh\nlrwxrwxrwx 1 root root 12 Jun 1 22:17 /rootfs/bin/sh -> /bin/busybox\n```\n\nA sensible solution for this would be through an option to override [the hardcoded shell path](https://github.com/ansible/ansible/blob/v2.0.0.1-1/lib/ansible/plugins/connection/chroot.py#L67-L68), or something like that.\n\n", "before_files": [{"content": "# Based on local.py (c) 2012, Michael DeHaan <[email protected]>\n# (c) 2013, Maykel Moya <[email protected]>\n# (c) 2015, Toshio Kuratomi <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nimport distutils.spawn\nimport os\nimport os.path\nimport pipes\nimport subprocess\nimport traceback\n\nfrom ansible import constants as C\nfrom ansible.errors import AnsibleError\nfrom ansible.plugins.connection import ConnectionBase, BUFSIZE\nfrom ansible.module_utils.basic import is_executable\nfrom ansible.utils.unicode import to_bytes\n\ntry:\n from __main__ import display\nexcept ImportError:\n from ansible.utils.display import Display\n display = Display()\n\n\nclass Connection(ConnectionBase):\n ''' Local chroot based connections '''\n\n transport = 'chroot'\n has_pipelining = True\n # su currently has an undiagnosed issue with calculating the file\n # checksums (so copy, for instance, doesn't work right)\n # Have to look into that before re-enabling this\n become_methods = frozenset(C.BECOME_METHODS).difference(('su',))\n\n def __init__(self, play_context, new_stdin, *args, **kwargs):\n super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)\n\n self.chroot = self._play_context.remote_addr\n\n if os.geteuid() != 0:\n raise AnsibleError(\"chroot connection requires running as root\")\n\n # we're running as root on the local system so do some\n # trivial checks for ensuring 'host' is actually a chroot'able dir\n if not os.path.isdir(self.chroot):\n raise AnsibleError(\"%s is not a directory\" % self.chroot)\n\n chrootsh = os.path.join(self.chroot, 'bin/sh')\n if not is_executable(chrootsh):\n raise AnsibleError(\"%s does not look like a chrootable dir (/bin/sh missing)\" % self.chroot)\n\n self.chroot_cmd = distutils.spawn.find_executable('chroot')\n if not self.chroot_cmd:\n raise AnsibleError(\"chroot command not found in PATH\")\n\n def _connect(self):\n ''' connect to the chroot; nothing to do here '''\n super(Connection, self)._connect()\n if not self._connected:\n display.vvv(\"THIS IS A LOCAL CHROOT DIR\", host=self.chroot)\n self._connected = True\n\n def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE):\n ''' run a command on the chroot. This is only needed for implementing\n put_file() get_file() so that we don't have to read the whole file\n into memory.\n\n compared to exec_command() it looses some niceties like being able to\n return the process's exit code immediately.\n '''\n executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh'\n local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd]\n\n display.vvv(\"EXEC %s\" % (local_cmd), host=self.chroot)\n local_cmd = [to_bytes(i, errors='strict') for i in local_cmd]\n p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n return p\n\n def exec_command(self, cmd, in_data=None, sudoable=False):\n ''' run a command on the chroot '''\n super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)\n\n p = self._buffered_exec_command(cmd)\n\n stdout, stderr = p.communicate(in_data)\n return (p.returncode, stdout, stderr)\n\n def _prefix_login_path(self, remote_path):\n ''' Make sure that we put files into a standard path\n\n If a path is relative, then we need to choose where to put it.\n ssh chooses $HOME but we aren't guaranteed that a home dir will\n exist in any given chroot. So for now we're choosing \"/\" instead.\n This also happens to be the former default.\n\n Can revisit using $HOME instead if it's a problem\n '''\n if not remote_path.startswith(os.path.sep):\n remote_path = os.path.join(os.path.sep, remote_path)\n return os.path.normpath(remote_path)\n\n def put_file(self, in_path, out_path):\n ''' transfer a file from local to chroot '''\n super(Connection, self).put_file(in_path, out_path)\n display.vvv(\"PUT %s TO %s\" % (in_path, out_path), host=self.chroot)\n\n out_path = pipes.quote(self._prefix_login_path(out_path))\n try:\n with open(to_bytes(in_path, errors='strict'), 'rb') as in_file:\n try:\n p = self._buffered_exec_command('dd of=%s bs=%s' % (out_path, BUFSIZE), stdin=in_file)\n except OSError:\n raise AnsibleError(\"chroot connection requires dd command in the chroot\")\n try:\n stdout, stderr = p.communicate()\n except:\n traceback.print_exc()\n raise AnsibleError(\"failed to transfer file %s to %s\" % (in_path, out_path))\n if p.returncode != 0:\n raise AnsibleError(\"failed to transfer file %s to %s:\\n%s\\n%s\" % (in_path, out_path, stdout, stderr))\n except IOError:\n raise AnsibleError(\"file or module does not exist at: %s\" % in_path)\n\n def fetch_file(self, in_path, out_path):\n ''' fetch a file from chroot to local '''\n super(Connection, self).fetch_file(in_path, out_path)\n display.vvv(\"FETCH %s TO %s\" % (in_path, out_path), host=self.chroot)\n\n in_path = pipes.quote(self._prefix_login_path(in_path))\n try:\n p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE))\n except OSError:\n raise AnsibleError(\"chroot connection requires dd command in the chroot\")\n\n with open(to_bytes(out_path, errors='strict'), 'wb+') as out_file:\n try:\n chunk = p.stdout.read(BUFSIZE)\n while chunk:\n out_file.write(chunk)\n chunk = p.stdout.read(BUFSIZE)\n except:\n traceback.print_exc()\n raise AnsibleError(\"failed to transfer file %s to %s\" % (in_path, out_path))\n stdout, stderr = p.communicate()\n if p.returncode != 0:\n raise AnsibleError(\"failed to transfer file %s to %s:\\n%s\\n%s\" % (in_path, out_path, stdout, stderr))\n\n def close(self):\n ''' terminate the connection; nothing to do here '''\n super(Connection, self).close()\n self._connected = False\n", "path": "lib/ansible/plugins/connection/chroot.py"}], "after_files": [{"content": "# Based on local.py (c) 2012, Michael DeHaan <[email protected]>\n# (c) 2013, Maykel Moya <[email protected]>\n# (c) 2015, Toshio Kuratomi <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nimport distutils.spawn\nimport os\nimport os.path\nimport pipes\nimport subprocess\nimport traceback\n\nfrom ansible import constants as C\nfrom ansible.errors import AnsibleError\nfrom ansible.plugins.connection import ConnectionBase, BUFSIZE\nfrom ansible.module_utils.basic import is_executable\nfrom ansible.utils.unicode import to_bytes\n\ntry:\n from __main__ import display\nexcept ImportError:\n from ansible.utils.display import Display\n display = Display()\n\n\nclass Connection(ConnectionBase):\n ''' Local chroot based connections '''\n\n transport = 'chroot'\n has_pipelining = True\n # su currently has an undiagnosed issue with calculating the file\n # checksums (so copy, for instance, doesn't work right)\n # Have to look into that before re-enabling this\n become_methods = frozenset(C.BECOME_METHODS).difference(('su',))\n\n def __init__(self, play_context, new_stdin, *args, **kwargs):\n super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)\n\n self.chroot = self._play_context.remote_addr\n\n if os.geteuid() != 0:\n raise AnsibleError(\"chroot connection requires running as root\")\n\n # we're running as root on the local system so do some\n # trivial checks for ensuring 'host' is actually a chroot'able dir\n if not os.path.isdir(self.chroot):\n raise AnsibleError(\"%s is not a directory\" % self.chroot)\n\n chrootsh = os.path.join(self.chroot, 'bin/sh')\n # Want to check for a usable bourne shell inside the chroot.\n # is_executable() == True is sufficient. For symlinks it\n # gets really complicated really fast. So we punt on finding that\n # out. As long as it's a symlink we assume that it will work\n if not (is_executable(chrootsh) or (os.path.lexists(chrootsh) and os.path.islink(chrootsh))):\n raise AnsibleError(\"%s does not look like a chrootable dir (/bin/sh missing)\" % self.chroot)\n\n self.chroot_cmd = distutils.spawn.find_executable('chroot')\n if not self.chroot_cmd:\n raise AnsibleError(\"chroot command not found in PATH\")\n\n def _connect(self):\n ''' connect to the chroot; nothing to do here '''\n super(Connection, self)._connect()\n if not self._connected:\n display.vvv(\"THIS IS A LOCAL CHROOT DIR\", host=self.chroot)\n self._connected = True\n\n def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE):\n ''' run a command on the chroot. This is only needed for implementing\n put_file() get_file() so that we don't have to read the whole file\n into memory.\n\n compared to exec_command() it looses some niceties like being able to\n return the process's exit code immediately.\n '''\n executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh'\n local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd]\n\n display.vvv(\"EXEC %s\" % (local_cmd), host=self.chroot)\n local_cmd = [to_bytes(i, errors='strict') for i in local_cmd]\n p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n return p\n\n def exec_command(self, cmd, in_data=None, sudoable=False):\n ''' run a command on the chroot '''\n super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)\n\n p = self._buffered_exec_command(cmd)\n\n stdout, stderr = p.communicate(in_data)\n return (p.returncode, stdout, stderr)\n\n def _prefix_login_path(self, remote_path):\n ''' Make sure that we put files into a standard path\n\n If a path is relative, then we need to choose where to put it.\n ssh chooses $HOME but we aren't guaranteed that a home dir will\n exist in any given chroot. So for now we're choosing \"/\" instead.\n This also happens to be the former default.\n\n Can revisit using $HOME instead if it's a problem\n '''\n if not remote_path.startswith(os.path.sep):\n remote_path = os.path.join(os.path.sep, remote_path)\n return os.path.normpath(remote_path)\n\n def put_file(self, in_path, out_path):\n ''' transfer a file from local to chroot '''\n super(Connection, self).put_file(in_path, out_path)\n display.vvv(\"PUT %s TO %s\" % (in_path, out_path), host=self.chroot)\n\n out_path = pipes.quote(self._prefix_login_path(out_path))\n try:\n with open(to_bytes(in_path, errors='strict'), 'rb') as in_file:\n try:\n p = self._buffered_exec_command('dd of=%s bs=%s' % (out_path, BUFSIZE), stdin=in_file)\n except OSError:\n raise AnsibleError(\"chroot connection requires dd command in the chroot\")\n try:\n stdout, stderr = p.communicate()\n except:\n traceback.print_exc()\n raise AnsibleError(\"failed to transfer file %s to %s\" % (in_path, out_path))\n if p.returncode != 0:\n raise AnsibleError(\"failed to transfer file %s to %s:\\n%s\\n%s\" % (in_path, out_path, stdout, stderr))\n except IOError:\n raise AnsibleError(\"file or module does not exist at: %s\" % in_path)\n\n def fetch_file(self, in_path, out_path):\n ''' fetch a file from chroot to local '''\n super(Connection, self).fetch_file(in_path, out_path)\n display.vvv(\"FETCH %s TO %s\" % (in_path, out_path), host=self.chroot)\n\n in_path = pipes.quote(self._prefix_login_path(in_path))\n try:\n p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE))\n except OSError:\n raise AnsibleError(\"chroot connection requires dd command in the chroot\")\n\n with open(to_bytes(out_path, errors='strict'), 'wb+') as out_file:\n try:\n chunk = p.stdout.read(BUFSIZE)\n while chunk:\n out_file.write(chunk)\n chunk = p.stdout.read(BUFSIZE)\n except:\n traceback.print_exc()\n raise AnsibleError(\"failed to transfer file %s to %s\" % (in_path, out_path))\n stdout, stderr = p.communicate()\n if p.returncode != 0:\n raise AnsibleError(\"failed to transfer file %s to %s:\\n%s\\n%s\" % (in_path, out_path, stdout, stderr))\n\n def close(self):\n ''' terminate the connection; nothing to do here '''\n super(Connection, self).close()\n self._connected = False\n", "path": "lib/ansible/plugins/connection/chroot.py"}]} | 2,623 | 249 |
gh_patches_debug_3456 | rasdani/github-patches | git_diff | CTFd__CTFd-1827 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Set plugin migration version in between each migration
https://github.com/CTFd/CTFd/blob/e1991e16963b10302baa7cc50d52071a5053bf2f/CTFd/plugins/migrations.py#L72-L77
This code here probably should be setting the plugin version in between each migration so that if a migration fails it doesn't need to be started from the beginning again.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `CTFd/plugins/migrations.py`
Content:
```
1 import inspect
2 import os
3
4 from alembic.config import Config
5 from alembic.migration import MigrationContext
6 from alembic.operations import Operations
7 from alembic.script import ScriptDirectory
8 from flask import current_app
9 from sqlalchemy import create_engine, pool
10
11 from CTFd.utils import get_config, set_config
12
13
14 def current(plugin_name=None):
15 if plugin_name is None:
16 # Get the directory name of the plugin if unspecified
17 # Doing it this way doesn't waste the rest of the inspect.stack call
18 frame = inspect.currentframe()
19 caller_info = inspect.getframeinfo(frame.f_back)
20 caller_path = caller_info[0]
21 plugin_name = os.path.basename(os.path.dirname(caller_path))
22
23 return get_config(plugin_name + "_alembic_version")
24
25
26 def upgrade(plugin_name=None, revision=None, lower="current"):
27 database_url = current_app.config.get("SQLALCHEMY_DATABASE_URI")
28 if database_url.startswith("sqlite"):
29 current_app.db.create_all()
30 return
31
32 if plugin_name is None:
33 # Get the directory name of the plugin if unspecified
34 # Doing it this way doesn't waste the rest of the inspect.stack call
35 frame = inspect.currentframe()
36 caller_info = inspect.getframeinfo(frame.f_back)
37 caller_path = caller_info[0]
38 plugin_name = os.path.basename(os.path.dirname(caller_path))
39
40 # Check if the plugin has migraitons
41 migrations_path = os.path.join(current_app.plugins_dir, plugin_name, "migrations")
42 if os.path.isdir(migrations_path) is False:
43 return
44
45 engine = create_engine(database_url, poolclass=pool.NullPool)
46 conn = engine.connect()
47 context = MigrationContext.configure(conn)
48 op = Operations(context)
49
50 # Find the list of migrations to run
51 config = Config()
52 config.set_main_option("script_location", migrations_path)
53 config.set_main_option("version_locations", migrations_path)
54 script = ScriptDirectory.from_config(config)
55
56 # Choose base revision for plugin upgrade
57 # "current" points to the current plugin version stored in config
58 # None represents the absolute base layer (e.g. first installation)
59 if lower == "current":
60 lower = get_config(plugin_name + "_alembic_version")
61
62 # Do we upgrade to head or to a specific revision
63 if revision is None:
64 upper = script.get_current_head()
65 else:
66 upper = revision
67
68 # Apply from lower to upper
69 revs = list(script.iterate_revisions(lower=lower, upper=upper))
70 revs.reverse()
71
72 try:
73 for r in revs:
74 with context.begin_transaction():
75 r.module.upgrade(op=op)
76 finally:
77 conn.close()
78
79 # Set the new latest revision
80 set_config(plugin_name + "_alembic_version", upper)
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/CTFd/plugins/migrations.py b/CTFd/plugins/migrations.py
--- a/CTFd/plugins/migrations.py
+++ b/CTFd/plugins/migrations.py
@@ -73,6 +73,9 @@
for r in revs:
with context.begin_transaction():
r.module.upgrade(op=op)
+ # Set revision that succeeded so we don't need
+ # to start from the beginning on failure
+ set_config(plugin_name + "_alembic_version", r.revision)
finally:
conn.close()
| {"golden_diff": "diff --git a/CTFd/plugins/migrations.py b/CTFd/plugins/migrations.py\n--- a/CTFd/plugins/migrations.py\n+++ b/CTFd/plugins/migrations.py\n@@ -73,6 +73,9 @@\n for r in revs:\n with context.begin_transaction():\n r.module.upgrade(op=op)\n+ # Set revision that succeeded so we don't need\n+ # to start from the beginning on failure\n+ set_config(plugin_name + \"_alembic_version\", r.revision)\n finally:\n conn.close()\n", "issue": "Set plugin migration version in between each migration\nhttps://github.com/CTFd/CTFd/blob/e1991e16963b10302baa7cc50d52071a5053bf2f/CTFd/plugins/migrations.py#L72-L77\r\n\r\nThis code here probably should be setting the plugin version in between each migration so that if a migration fails it doesn't need to be started from the beginning again. \n", "before_files": [{"content": "import inspect\nimport os\n\nfrom alembic.config import Config\nfrom alembic.migration import MigrationContext\nfrom alembic.operations import Operations\nfrom alembic.script import ScriptDirectory\nfrom flask import current_app\nfrom sqlalchemy import create_engine, pool\n\nfrom CTFd.utils import get_config, set_config\n\n\ndef current(plugin_name=None):\n if plugin_name is None:\n # Get the directory name of the plugin if unspecified\n # Doing it this way doesn't waste the rest of the inspect.stack call\n frame = inspect.currentframe()\n caller_info = inspect.getframeinfo(frame.f_back)\n caller_path = caller_info[0]\n plugin_name = os.path.basename(os.path.dirname(caller_path))\n\n return get_config(plugin_name + \"_alembic_version\")\n\n\ndef upgrade(plugin_name=None, revision=None, lower=\"current\"):\n database_url = current_app.config.get(\"SQLALCHEMY_DATABASE_URI\")\n if database_url.startswith(\"sqlite\"):\n current_app.db.create_all()\n return\n\n if plugin_name is None:\n # Get the directory name of the plugin if unspecified\n # Doing it this way doesn't waste the rest of the inspect.stack call\n frame = inspect.currentframe()\n caller_info = inspect.getframeinfo(frame.f_back)\n caller_path = caller_info[0]\n plugin_name = os.path.basename(os.path.dirname(caller_path))\n\n # Check if the plugin has migraitons\n migrations_path = os.path.join(current_app.plugins_dir, plugin_name, \"migrations\")\n if os.path.isdir(migrations_path) is False:\n return\n\n engine = create_engine(database_url, poolclass=pool.NullPool)\n conn = engine.connect()\n context = MigrationContext.configure(conn)\n op = Operations(context)\n\n # Find the list of migrations to run\n config = Config()\n config.set_main_option(\"script_location\", migrations_path)\n config.set_main_option(\"version_locations\", migrations_path)\n script = ScriptDirectory.from_config(config)\n\n # Choose base revision for plugin upgrade\n # \"current\" points to the current plugin version stored in config\n # None represents the absolute base layer (e.g. first installation)\n if lower == \"current\":\n lower = get_config(plugin_name + \"_alembic_version\")\n\n # Do we upgrade to head or to a specific revision\n if revision is None:\n upper = script.get_current_head()\n else:\n upper = revision\n\n # Apply from lower to upper\n revs = list(script.iterate_revisions(lower=lower, upper=upper))\n revs.reverse()\n\n try:\n for r in revs:\n with context.begin_transaction():\n r.module.upgrade(op=op)\n finally:\n conn.close()\n\n # Set the new latest revision\n set_config(plugin_name + \"_alembic_version\", upper)\n", "path": "CTFd/plugins/migrations.py"}], "after_files": [{"content": "import inspect\nimport os\n\nfrom alembic.config import Config\nfrom alembic.migration import MigrationContext\nfrom alembic.operations import Operations\nfrom alembic.script import ScriptDirectory\nfrom flask import current_app\nfrom sqlalchemy import create_engine, pool\n\nfrom CTFd.utils import get_config, set_config\n\n\ndef current(plugin_name=None):\n if plugin_name is None:\n # Get the directory name of the plugin if unspecified\n # Doing it this way doesn't waste the rest of the inspect.stack call\n frame = inspect.currentframe()\n caller_info = inspect.getframeinfo(frame.f_back)\n caller_path = caller_info[0]\n plugin_name = os.path.basename(os.path.dirname(caller_path))\n\n return get_config(plugin_name + \"_alembic_version\")\n\n\ndef upgrade(plugin_name=None, revision=None, lower=\"current\"):\n database_url = current_app.config.get(\"SQLALCHEMY_DATABASE_URI\")\n if database_url.startswith(\"sqlite\"):\n current_app.db.create_all()\n return\n\n if plugin_name is None:\n # Get the directory name of the plugin if unspecified\n # Doing it this way doesn't waste the rest of the inspect.stack call\n frame = inspect.currentframe()\n caller_info = inspect.getframeinfo(frame.f_back)\n caller_path = caller_info[0]\n plugin_name = os.path.basename(os.path.dirname(caller_path))\n\n # Check if the plugin has migraitons\n migrations_path = os.path.join(current_app.plugins_dir, plugin_name, \"migrations\")\n if os.path.isdir(migrations_path) is False:\n return\n\n engine = create_engine(database_url, poolclass=pool.NullPool)\n conn = engine.connect()\n context = MigrationContext.configure(conn)\n op = Operations(context)\n\n # Find the list of migrations to run\n config = Config()\n config.set_main_option(\"script_location\", migrations_path)\n config.set_main_option(\"version_locations\", migrations_path)\n script = ScriptDirectory.from_config(config)\n\n # Choose base revision for plugin upgrade\n # \"current\" points to the current plugin version stored in config\n # None represents the absolute base layer (e.g. first installation)\n if lower == \"current\":\n lower = get_config(plugin_name + \"_alembic_version\")\n\n # Do we upgrade to head or to a specific revision\n if revision is None:\n upper = script.get_current_head()\n else:\n upper = revision\n\n # Apply from lower to upper\n revs = list(script.iterate_revisions(lower=lower, upper=upper))\n revs.reverse()\n\n try:\n for r in revs:\n with context.begin_transaction():\n r.module.upgrade(op=op)\n # Set revision that succeeded so we don't need\n # to start from the beginning on failure\n set_config(plugin_name + \"_alembic_version\", r.revision)\n finally:\n conn.close()\n\n # Set the new latest revision\n set_config(plugin_name + \"_alembic_version\", upper)\n", "path": "CTFd/plugins/migrations.py"}]} | 1,129 | 122 |
gh_patches_debug_23213 | rasdani/github-patches | git_diff | microsoft__lisa-1567 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Command not found (PATH does not contain /usr/sbin)
Getting errors when using LISAv3 to deploy and test CentOS 7_9 on Azure
`[ERROR] lisa.env[generated_0].node[0].cmd[7289] not found command: Command not found: modinfo. Check that modinfo is installed and on $PATH`
`[ERROR] lisa.env[generated_0].node[0].cmd[1038] not found command: Command not found: waagent. Check that waagent is installed and on $PATH`
`[ERROR] lisa.env[generated_0].node[0].cmd[8629] not found command: Command not found: lsmod. Check that lsmod is installed and on $PATH`
SSHing into the node confirms that all three of these commands are present and runnable on the node.
The error about modinfo missing appears to occur before any tests start running. These errors do not occur when deploying and testing Ubuntu 18.04-LTS.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lisa/tools/modinfo.py`
Content:
```
1 # Copyright (c) Microsoft Corporation.
2 # Licensed under the MIT license.
3
4 import re
5 from typing import Any
6
7 from lisa.executable import Tool
8 from lisa.util import find_patterns_in_lines
9
10
11 class Modinfo(Tool):
12 __version_pattern = re.compile(r"^version:[ \t]*([^ \n]*)")
13
14 @property
15 def command(self) -> str:
16 return self._command
17
18 def _check_exists(self) -> bool:
19 return True
20
21 def _initialize(self, *args: Any, **kwargs: Any) -> None:
22 self._command = "modinfo"
23
24 def get_info(
25 self,
26 mod_name: str,
27 force_run: bool = False,
28 no_info_log: bool = True,
29 no_error_log: bool = True,
30 ) -> str:
31 result = self.run(
32 mod_name,
33 force_run=force_run,
34 no_info_log=no_info_log,
35 no_error_log=no_error_log,
36 )
37 if result.exit_code != 0:
38 # CentOS may not include the path when started,
39 # specify path and try again.
40 self._command = "/usr/sbin/modinfo"
41 result = self.run(
42 mod_name,
43 force_run=force_run,
44 no_info_log=no_info_log,
45 no_error_log=no_error_log,
46 )
47 return result.stdout
48
49 def get_version(
50 self,
51 mod_name: str,
52 force_run: bool = False,
53 no_info_log: bool = True,
54 no_error_log: bool = True,
55 ) -> str:
56 output = self.get_info(
57 mod_name=mod_name,
58 force_run=force_run,
59 no_info_log=no_info_log,
60 no_error_log=no_error_log,
61 )
62 found_version = find_patterns_in_lines(output, [self.__version_pattern])
63 return found_version[0][0] if found_version[0] else ""
64
```
Path: `lisa/tools/lsmod.py`
Content:
```
1 # Copyright (c) Microsoft Corporation.
2 # Licensed under the MIT license.
3
4 import re
5 from typing import Any
6
7 from lisa.executable import Tool
8 from lisa.util import LisaException, find_patterns_in_lines
9
10
11 class Lsmod(Tool):
12 # The output of lsmod command is in below format -
13 # Module Size Used by
14 # fuse 52176 3
15 # cryptd 14125 0
16 # aes_generic 32970 1 aes_i586
17 __output_pattern = re.compile(
18 r"^(?P<name>[^\s]+)\s+(?P<size>[^\s]+)\s+(?P<usedby>.*)?$", re.MULTILINE
19 )
20
21 @property
22 def command(self) -> str:
23 return self._command
24
25 def _check_exists(self) -> bool:
26 return True
27
28 def _initialize(self, *args: Any, **kwargs: Any) -> None:
29 self._command = "lsmod"
30
31 def module_exists(
32 self,
33 mod_name: str = "",
34 force_run: bool = False,
35 no_info_log: bool = True,
36 no_error_log: bool = True,
37 ) -> bool:
38 result = self.run(
39 force_run=force_run,
40 no_info_log=no_info_log,
41 no_error_log=no_error_log,
42 )
43 if result.exit_code != 0:
44 raise LisaException(
45 f"{self._command} command got non-zero exit code: {result.exit_code}"
46 )
47
48 module_info = find_patterns_in_lines(result.stdout, [self.__output_pattern])
49 if any(mod_name in info for sublist in module_info for info in sublist):
50 return True
51
52 return False
53
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lisa/tools/lsmod.py b/lisa/tools/lsmod.py
--- a/lisa/tools/lsmod.py
+++ b/lisa/tools/lsmod.py
@@ -36,6 +36,7 @@
no_error_log: bool = True,
) -> bool:
result = self.run(
+ sudo=True,
force_run=force_run,
no_info_log=no_info_log,
no_error_log=no_error_log,
diff --git a/lisa/tools/modinfo.py b/lisa/tools/modinfo.py
--- a/lisa/tools/modinfo.py
+++ b/lisa/tools/modinfo.py
@@ -30,20 +30,13 @@
) -> str:
result = self.run(
mod_name,
+ sudo=True,
force_run=force_run,
no_info_log=no_info_log,
no_error_log=no_error_log,
+ expected_exit_code=0,
+ expected_exit_code_failure_message=f"Modinfo failed for module {mod_name}",
)
- if result.exit_code != 0:
- # CentOS may not include the path when started,
- # specify path and try again.
- self._command = "/usr/sbin/modinfo"
- result = self.run(
- mod_name,
- force_run=force_run,
- no_info_log=no_info_log,
- no_error_log=no_error_log,
- )
return result.stdout
def get_version(
| {"golden_diff": "diff --git a/lisa/tools/lsmod.py b/lisa/tools/lsmod.py\n--- a/lisa/tools/lsmod.py\n+++ b/lisa/tools/lsmod.py\n@@ -36,6 +36,7 @@\n no_error_log: bool = True,\n ) -> bool:\n result = self.run(\n+ sudo=True,\n force_run=force_run,\n no_info_log=no_info_log,\n no_error_log=no_error_log,\ndiff --git a/lisa/tools/modinfo.py b/lisa/tools/modinfo.py\n--- a/lisa/tools/modinfo.py\n+++ b/lisa/tools/modinfo.py\n@@ -30,20 +30,13 @@\n ) -> str:\n result = self.run(\n mod_name,\n+ sudo=True,\n force_run=force_run,\n no_info_log=no_info_log,\n no_error_log=no_error_log,\n+ expected_exit_code=0,\n+ expected_exit_code_failure_message=f\"Modinfo failed for module {mod_name}\",\n )\n- if result.exit_code != 0:\n- # CentOS may not include the path when started,\n- # specify path and try again.\n- self._command = \"/usr/sbin/modinfo\"\n- result = self.run(\n- mod_name,\n- force_run=force_run,\n- no_info_log=no_info_log,\n- no_error_log=no_error_log,\n- )\n return result.stdout\n \n def get_version(\n", "issue": "Command not found (PATH does not contain /usr/sbin)\nGetting errors when using LISAv3 to deploy and test CentOS 7_9 on Azure\r\n\r\n`[ERROR] lisa.env[generated_0].node[0].cmd[7289] not found command: Command not found: modinfo. Check that modinfo is installed and on $PATH`\r\n\r\n`[ERROR] lisa.env[generated_0].node[0].cmd[1038] not found command: Command not found: waagent. Check that waagent is installed and on $PATH`\r\n\r\n`[ERROR] lisa.env[generated_0].node[0].cmd[8629] not found command: Command not found: lsmod. Check that lsmod is installed and on $PATH`\r\n\r\nSSHing into the node confirms that all three of these commands are present and runnable on the node.\r\n\r\nThe error about modinfo missing appears to occur before any tests start running. These errors do not occur when deploying and testing Ubuntu 18.04-LTS.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport re\nfrom typing import Any\n\nfrom lisa.executable import Tool\nfrom lisa.util import find_patterns_in_lines\n\n\nclass Modinfo(Tool):\n __version_pattern = re.compile(r\"^version:[ \\t]*([^ \\n]*)\")\n\n @property\n def command(self) -> str:\n return self._command\n\n def _check_exists(self) -> bool:\n return True\n\n def _initialize(self, *args: Any, **kwargs: Any) -> None:\n self._command = \"modinfo\"\n\n def get_info(\n self,\n mod_name: str,\n force_run: bool = False,\n no_info_log: bool = True,\n no_error_log: bool = True,\n ) -> str:\n result = self.run(\n mod_name,\n force_run=force_run,\n no_info_log=no_info_log,\n no_error_log=no_error_log,\n )\n if result.exit_code != 0:\n # CentOS may not include the path when started,\n # specify path and try again.\n self._command = \"/usr/sbin/modinfo\"\n result = self.run(\n mod_name,\n force_run=force_run,\n no_info_log=no_info_log,\n no_error_log=no_error_log,\n )\n return result.stdout\n\n def get_version(\n self,\n mod_name: str,\n force_run: bool = False,\n no_info_log: bool = True,\n no_error_log: bool = True,\n ) -> str:\n output = self.get_info(\n mod_name=mod_name,\n force_run=force_run,\n no_info_log=no_info_log,\n no_error_log=no_error_log,\n )\n found_version = find_patterns_in_lines(output, [self.__version_pattern])\n return found_version[0][0] if found_version[0] else \"\"\n", "path": "lisa/tools/modinfo.py"}, {"content": "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport re\nfrom typing import Any\n\nfrom lisa.executable import Tool\nfrom lisa.util import LisaException, find_patterns_in_lines\n\n\nclass Lsmod(Tool):\n # The output of lsmod command is in below format -\n # Module Size Used by\n # fuse 52176 3\n # cryptd 14125 0\n # aes_generic 32970 1 aes_i586\n __output_pattern = re.compile(\n r\"^(?P<name>[^\\s]+)\\s+(?P<size>[^\\s]+)\\s+(?P<usedby>.*)?$\", re.MULTILINE\n )\n\n @property\n def command(self) -> str:\n return self._command\n\n def _check_exists(self) -> bool:\n return True\n\n def _initialize(self, *args: Any, **kwargs: Any) -> None:\n self._command = \"lsmod\"\n\n def module_exists(\n self,\n mod_name: str = \"\",\n force_run: bool = False,\n no_info_log: bool = True,\n no_error_log: bool = True,\n ) -> bool:\n result = self.run(\n force_run=force_run,\n no_info_log=no_info_log,\n no_error_log=no_error_log,\n )\n if result.exit_code != 0:\n raise LisaException(\n f\"{self._command} command got non-zero exit code: {result.exit_code}\"\n )\n\n module_info = find_patterns_in_lines(result.stdout, [self.__output_pattern])\n if any(mod_name in info for sublist in module_info for info in sublist):\n return True\n\n return False\n", "path": "lisa/tools/lsmod.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport re\nfrom typing import Any\n\nfrom lisa.executable import Tool\nfrom lisa.util import find_patterns_in_lines\n\n\nclass Modinfo(Tool):\n __version_pattern = re.compile(r\"^version:[ \\t]*([^ \\n]*)\")\n\n @property\n def command(self) -> str:\n return self._command\n\n def _check_exists(self) -> bool:\n return True\n\n def _initialize(self, *args: Any, **kwargs: Any) -> None:\n self._command = \"modinfo\"\n\n def get_info(\n self,\n mod_name: str,\n force_run: bool = False,\n no_info_log: bool = True,\n no_error_log: bool = True,\n ) -> str:\n result = self.run(\n mod_name,\n sudo=True,\n force_run=force_run,\n no_info_log=no_info_log,\n no_error_log=no_error_log,\n expected_exit_code=0,\n expected_exit_code_failure_message=f\"Modinfo failed for module {mod_name}\",\n )\n return result.stdout\n\n def get_version(\n self,\n mod_name: str,\n force_run: bool = False,\n no_info_log: bool = True,\n no_error_log: bool = True,\n ) -> str:\n output = self.get_info(\n mod_name=mod_name,\n force_run=force_run,\n no_info_log=no_info_log,\n no_error_log=no_error_log,\n )\n found_version = find_patterns_in_lines(output, [self.__version_pattern])\n return found_version[0][0] if found_version[0] else \"\"\n", "path": "lisa/tools/modinfo.py"}, {"content": "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport re\nfrom typing import Any\n\nfrom lisa.executable import Tool\nfrom lisa.util import LisaException, find_patterns_in_lines\n\n\nclass Lsmod(Tool):\n # The output of lsmod command is in below format -\n # Module Size Used by\n # fuse 52176 3\n # cryptd 14125 0\n # aes_generic 32970 1 aes_i586\n __output_pattern = re.compile(\n r\"^(?P<name>[^\\s]+)\\s+(?P<size>[^\\s]+)\\s+(?P<usedby>.*)?$\", re.MULTILINE\n )\n\n @property\n def command(self) -> str:\n return self._command\n\n def _check_exists(self) -> bool:\n return True\n\n def _initialize(self, *args: Any, **kwargs: Any) -> None:\n self._command = \"lsmod\"\n\n def module_exists(\n self,\n mod_name: str = \"\",\n force_run: bool = False,\n no_info_log: bool = True,\n no_error_log: bool = True,\n ) -> bool:\n result = self.run(\n sudo=True,\n force_run=force_run,\n no_info_log=no_info_log,\n no_error_log=no_error_log,\n )\n if result.exit_code != 0:\n raise LisaException(\n f\"{self._command} command got non-zero exit code: {result.exit_code}\"\n )\n\n module_info = find_patterns_in_lines(result.stdout, [self.__output_pattern])\n if any(mod_name in info for sublist in module_info for info in sublist):\n return True\n\n return False\n", "path": "lisa/tools/lsmod.py"}]} | 1,526 | 315 |
gh_patches_debug_13297 | rasdani/github-patches | git_diff | celery__celery-1206 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
MongoDB and BROKER_USE_SSL=True
I've recently started with mongodb and BROKER_USE_SSL=True, this doesn't seem to work. Celery is trying to reconnect with Re-establishing connection message. BROKER_USE_SSL=False works well.
``` python
[2013-02-21 14:57:45,708: DEBUG/MainProcess] consumer: Re-establishing connection to the broker...
[2013-02-21 14:57:45,710: INFO/MainProcess] consumer: Connected to mongodb://localhost%3A27017%2Fdata/data.
[2013-02-21 14:57:45,714: ERROR/MainProcess] consumer: Connection to broker lost. Trying to re-establish the connection...
Traceback (most recent call last):
File "/stuff/eggs/celery-3.0.13-py2.7.egg/celery/worker/consumer.py", line 392, in start
self.reset_connection()
File "/stuff/eggs/celery-3.0.13-py2.7.egg/celery/worker/consumer.py", line 741, in reset_connection
self.connection, on_decode_error=self.on_decode_error,
File "/stuff/eggs/celery-3.0.13-py2.7.egg/celery/app/amqp.py", line 291, in __init__
queues or self.app.amqp.queues.consume_from.values(), **kw
File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/messaging.py", line 338, in __init__
self.revive(self.channel)
File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/messaging.py", line 350, in revive
self.declare()
File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/messaging.py", line 360, in declare
queue.declare()
File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/entity.py", line 471, in declare
self.queue_declare(nowait, passive=False)
File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/entity.py", line 497, in queue_declare
nowait=nowait)
File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/transport/virtual/__init__.py", line 398, in queue_declare
return queue, self._size(queue), 0
File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/transport/mongodb.py", line 76, in _size
return self.client.messages.find({'queue': queue}).count()
File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/transport/mongodb.py", line 204, in client
self._client = self._open()
File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/transport/mongodb.py", line 133, in _open
mongoconn = Connection(host=hostname)
File "/stuff/eggs/pymongo-2.4.2-py2.7-linux-x86_64.egg/pymongo/connection.py", line 180, in __init__
max_pool_size, document_class, tz_aware, _connect, **kwargs)
File "/stuff/eggs/pymongo-2.4.2-py2.7-linux-x86_64.egg/pymongo/mongo_client.py", line 269, in __init__
raise ConnectionFailure(str(e))
ConnectionFailure: [Errno 104] Connection reset by peer
[2013-02-21 14:57:45,716: DEBUG/MainProcess] consumer: Re-establishing connection to the broker...
[2013-02-21 14:57:45,718: INFO/MainProcess] consumer: Connected to mongodb://localhost%3A27017%2Fdata/data.
[2013-02-21 14:57:45,721: ERROR/MainProcess] consumer: Connection to broker lost. Trying to re-establish the connection...
```
Problem seems to be generated by this line
https://github.com/celery/kombu/blob/master/kombu/transport/mongodb.py#L135
which should take ssl=True parameter for SSL connections.
I know it's kombu component, but setting (BROKER_USE_SSL) which is leading to this problem is part of celery library.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `celery/backends/mongodb.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 celery.backends.mongodb
4 ~~~~~~~~~~~~~~~~~~~~~~~
5
6 MongoDB result store backend.
7
8 """
9 from __future__ import absolute_import
10
11 from datetime import datetime
12
13 try:
14 import pymongo
15 except ImportError: # pragma: no cover
16 pymongo = None # noqa
17
18 if pymongo:
19 try:
20 from bson.binary import Binary
21 except ImportError: # pragma: no cover
22 from pymongo.binary import Binary # noqa
23 else: # pragma: no cover
24 Binary = None # noqa
25
26 from kombu.utils import cached_property
27
28 from celery import states
29 from celery.exceptions import ImproperlyConfigured
30 from celery.five import string_t
31 from celery.utils.timeutils import maybe_timedelta
32
33 from .base import BaseBackend
34
35
36 class Bunch(object):
37
38 def __init__(self, **kw):
39 self.__dict__.update(kw)
40
41
42 class MongoBackend(BaseBackend):
43 mongodb_host = 'localhost'
44 mongodb_port = 27017
45 mongodb_user = None
46 mongodb_password = None
47 mongodb_database = 'celery'
48 mongodb_taskmeta_collection = 'celery_taskmeta'
49 mongodb_max_pool_size = 10
50
51 def __init__(self, *args, **kwargs):
52 """Initialize MongoDB backend instance.
53
54 :raises celery.exceptions.ImproperlyConfigured: if
55 module :mod:`pymongo` is not available.
56
57 """
58 super(MongoBackend, self).__init__(*args, **kwargs)
59 self.expires = kwargs.get('expires') or maybe_timedelta(
60 self.app.conf.CELERY_TASK_RESULT_EXPIRES)
61
62 if not pymongo:
63 raise ImproperlyConfigured(
64 'You need to install the pymongo library to use the '
65 'MongoDB backend.')
66
67 config = self.app.conf.get('CELERY_MONGODB_BACKEND_SETTINGS', None)
68 if config is not None:
69 if not isinstance(config, dict):
70 raise ImproperlyConfigured(
71 'MongoDB backend settings should be grouped in a dict')
72
73 self.mongodb_host = config.get('host', self.mongodb_host)
74 self.mongodb_port = int(config.get('port', self.mongodb_port))
75 self.mongodb_user = config.get('user', self.mongodb_user)
76 self.mongodb_password = config.get(
77 'password', self.mongodb_password)
78 self.mongodb_database = config.get(
79 'database', self.mongodb_database)
80 self.mongodb_taskmeta_collection = config.get(
81 'taskmeta_collection', self.mongodb_taskmeta_collection)
82 self.mongodb_max_pool_size = config.get(
83 'max_pool_size', self.mongodb_max_pool_size)
84
85 self._connection = None
86
87 def _get_connection(self):
88 """Connect to the MongoDB server."""
89 if self._connection is None:
90 from pymongo.connection import Connection
91
92 # The first pymongo.Connection() argument (host) can be
93 # a list of ['host:port'] elements or a mongodb connection
94 # URI. If this is the case, don't use self.mongodb_port
95 # but let pymongo get the port(s) from the URI instead.
96 # This enables the use of replica sets and sharding.
97 # See pymongo.Connection() for more info.
98 args = [self.mongodb_host]
99 kwargs = {'max_pool_size': self.mongodb_max_pool_size}
100 if isinstance(self.mongodb_host, string_t) \
101 and not self.mongodb_host.startswith('mongodb://'):
102 args.append(self.mongodb_port)
103
104 self._connection = Connection(*args, **kwargs)
105
106 return self._connection
107
108 def process_cleanup(self):
109 if self._connection is not None:
110 # MongoDB connection will be closed automatically when object
111 # goes out of scope
112 self._connection = None
113
114 def _store_result(self, task_id, result, status, traceback=None):
115 """Store return value and status of an executed task."""
116 meta = {'_id': task_id,
117 'status': status,
118 'result': Binary(self.encode(result)),
119 'date_done': datetime.utcnow(),
120 'traceback': Binary(self.encode(traceback)),
121 'children': Binary(self.encode(self.current_task_children()))}
122 self.collection.save(meta, safe=True)
123
124 return result
125
126 def _get_task_meta_for(self, task_id):
127 """Get task metadata for a task by id."""
128
129 obj = self.collection.find_one({'_id': task_id})
130 if not obj:
131 return {'status': states.PENDING, 'result': None}
132
133 meta = {
134 'task_id': obj['_id'],
135 'status': obj['status'],
136 'result': self.decode(obj['result']),
137 'date_done': obj['date_done'],
138 'traceback': self.decode(obj['traceback']),
139 'children': self.decode(obj['children']),
140 }
141
142 return meta
143
144 def _save_group(self, group_id, result):
145 """Save the group result."""
146 meta = {'_id': group_id,
147 'result': Binary(self.encode(result)),
148 'date_done': datetime.utcnow()}
149 self.collection.save(meta, safe=True)
150
151 return result
152
153 def _restore_group(self, group_id):
154 """Get the result for a group by id."""
155 obj = self.collection.find_one({'_id': group_id})
156 if not obj:
157 return
158
159 meta = {
160 'task_id': obj['_id'],
161 'result': self.decode(obj['result']),
162 'date_done': obj['date_done'],
163 }
164
165 return meta
166
167 def _delete_group(self, group_id):
168 """Delete a group by id."""
169 self.collection.remove({'_id': group_id})
170
171 def _forget(self, task_id):
172 """
173 Remove result from MongoDB.
174
175 :raises celery.exceptions.OperationsError: if the task_id could not be
176 removed.
177 """
178 # By using safe=True, this will wait until it receives a response from
179 # the server. Likewise, it will raise an OperationsError if the
180 # response was unable to be completed.
181 self.collection.remove({'_id': task_id}, safe=True)
182
183 def cleanup(self):
184 """Delete expired metadata."""
185 self.collection.remove(
186 {'date_done': {'$lt': self.app.now() - self.expires}},
187 )
188
189 def __reduce__(self, args=(), kwargs={}):
190 kwargs.update(
191 dict(expires=self.expires))
192 return super(MongoBackend, self).__reduce__(args, kwargs)
193
194 def _get_database(self):
195 conn = self._get_connection()
196 db = conn[self.mongodb_database]
197 if self.mongodb_user and self.mongodb_password:
198 if not db.authenticate(self.mongodb_user,
199 self.mongodb_password):
200 raise ImproperlyConfigured(
201 'Invalid MongoDB username or password.')
202 return db
203
204 @cached_property
205 def database(self):
206 """Get database from MongoDB connection and perform authentication
207 if necessary."""
208 return self._get_database()
209
210 @cached_property
211 def collection(self):
212 """Get the metadata task collection."""
213 collection = self.database[self.mongodb_taskmeta_collection]
214
215 # Ensure an index on date_done is there, if not process the index
216 # in the background. Once completed cleanup will be much faster
217 collection.ensure_index('date_done', background='true')
218 return collection
219
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/celery/backends/mongodb.py b/celery/backends/mongodb.py
--- a/celery/backends/mongodb.py
+++ b/celery/backends/mongodb.py
@@ -96,7 +96,10 @@
# This enables the use of replica sets and sharding.
# See pymongo.Connection() for more info.
args = [self.mongodb_host]
- kwargs = {'max_pool_size': self.mongodb_max_pool_size}
+ kwargs = {
+ 'max_pool_size': self.mongodb_max_pool_size,
+ 'ssl': self.app.conf.BROKER_USE_SSL
+ }
if isinstance(self.mongodb_host, string_t) \
and not self.mongodb_host.startswith('mongodb://'):
args.append(self.mongodb_port)
| {"golden_diff": "diff --git a/celery/backends/mongodb.py b/celery/backends/mongodb.py\n--- a/celery/backends/mongodb.py\n+++ b/celery/backends/mongodb.py\n@@ -96,7 +96,10 @@\n # This enables the use of replica sets and sharding.\n # See pymongo.Connection() for more info.\n args = [self.mongodb_host]\n- kwargs = {'max_pool_size': self.mongodb_max_pool_size}\n+ kwargs = {\n+ 'max_pool_size': self.mongodb_max_pool_size,\n+ 'ssl': self.app.conf.BROKER_USE_SSL\n+ }\n if isinstance(self.mongodb_host, string_t) \\\n and not self.mongodb_host.startswith('mongodb://'):\n args.append(self.mongodb_port)\n", "issue": "MongoDB and BROKER_USE_SSL=True\nI've recently started with mongodb and BROKER_USE_SSL=True, this doesn't seem to work. Celery is trying to reconnect with Re-establishing connection message. BROKER_USE_SSL=False works well. \n\n``` python\n[2013-02-21 14:57:45,708: DEBUG/MainProcess] consumer: Re-establishing connection to the broker...\n[2013-02-21 14:57:45,710: INFO/MainProcess] consumer: Connected to mongodb://localhost%3A27017%2Fdata/data.\n[2013-02-21 14:57:45,714: ERROR/MainProcess] consumer: Connection to broker lost. Trying to re-establish the connection...\nTraceback (most recent call last):\n File \"/stuff/eggs/celery-3.0.13-py2.7.egg/celery/worker/consumer.py\", line 392, in start\n self.reset_connection()\n File \"/stuff/eggs/celery-3.0.13-py2.7.egg/celery/worker/consumer.py\", line 741, in reset_connection\n self.connection, on_decode_error=self.on_decode_error,\n File \"/stuff/eggs/celery-3.0.13-py2.7.egg/celery/app/amqp.py\", line 291, in __init__\n queues or self.app.amqp.queues.consume_from.values(), **kw\n File \"/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/messaging.py\", line 338, in __init__\n self.revive(self.channel)\n File \"/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/messaging.py\", line 350, in revive\n self.declare()\n File \"/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/messaging.py\", line 360, in declare\n queue.declare()\n File \"/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/entity.py\", line 471, in declare\n self.queue_declare(nowait, passive=False)\n File \"/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/entity.py\", line 497, in queue_declare\n nowait=nowait)\n File \"/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/transport/virtual/__init__.py\", line 398, in queue_declare\n return queue, self._size(queue), 0\n File \"/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/transport/mongodb.py\", line 76, in _size\n return self.client.messages.find({'queue': queue}).count()\n File \"/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/transport/mongodb.py\", line 204, in client\n self._client = self._open()\n File \"/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/transport/mongodb.py\", line 133, in _open\n mongoconn = Connection(host=hostname)\n File \"/stuff/eggs/pymongo-2.4.2-py2.7-linux-x86_64.egg/pymongo/connection.py\", line 180, in __init__\n max_pool_size, document_class, tz_aware, _connect, **kwargs)\n File \"/stuff/eggs/pymongo-2.4.2-py2.7-linux-x86_64.egg/pymongo/mongo_client.py\", line 269, in __init__\n raise ConnectionFailure(str(e))\nConnectionFailure: [Errno 104] Connection reset by peer\n[2013-02-21 14:57:45,716: DEBUG/MainProcess] consumer: Re-establishing connection to the broker...\n[2013-02-21 14:57:45,718: INFO/MainProcess] consumer: Connected to mongodb://localhost%3A27017%2Fdata/data.\n[2013-02-21 14:57:45,721: ERROR/MainProcess] consumer: Connection to broker lost. Trying to re-establish the connection...\n```\n\nProblem seems to be generated by this line \nhttps://github.com/celery/kombu/blob/master/kombu/transport/mongodb.py#L135\nwhich should take ssl=True parameter for SSL connections.\n\nI know it's kombu component, but setting (BROKER_USE_SSL) which is leading to this problem is part of celery library. \n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\n celery.backends.mongodb\n ~~~~~~~~~~~~~~~~~~~~~~~\n\n MongoDB result store backend.\n\n\"\"\"\nfrom __future__ import absolute_import\n\nfrom datetime import datetime\n\ntry:\n import pymongo\nexcept ImportError: # pragma: no cover\n pymongo = None # noqa\n\nif pymongo:\n try:\n from bson.binary import Binary\n except ImportError: # pragma: no cover\n from pymongo.binary import Binary # noqa\nelse: # pragma: no cover\n Binary = None # noqa\n\nfrom kombu.utils import cached_property\n\nfrom celery import states\nfrom celery.exceptions import ImproperlyConfigured\nfrom celery.five import string_t\nfrom celery.utils.timeutils import maybe_timedelta\n\nfrom .base import BaseBackend\n\n\nclass Bunch(object):\n\n def __init__(self, **kw):\n self.__dict__.update(kw)\n\n\nclass MongoBackend(BaseBackend):\n mongodb_host = 'localhost'\n mongodb_port = 27017\n mongodb_user = None\n mongodb_password = None\n mongodb_database = 'celery'\n mongodb_taskmeta_collection = 'celery_taskmeta'\n mongodb_max_pool_size = 10\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize MongoDB backend instance.\n\n :raises celery.exceptions.ImproperlyConfigured: if\n module :mod:`pymongo` is not available.\n\n \"\"\"\n super(MongoBackend, self).__init__(*args, **kwargs)\n self.expires = kwargs.get('expires') or maybe_timedelta(\n self.app.conf.CELERY_TASK_RESULT_EXPIRES)\n\n if not pymongo:\n raise ImproperlyConfigured(\n 'You need to install the pymongo library to use the '\n 'MongoDB backend.')\n\n config = self.app.conf.get('CELERY_MONGODB_BACKEND_SETTINGS', None)\n if config is not None:\n if not isinstance(config, dict):\n raise ImproperlyConfigured(\n 'MongoDB backend settings should be grouped in a dict')\n\n self.mongodb_host = config.get('host', self.mongodb_host)\n self.mongodb_port = int(config.get('port', self.mongodb_port))\n self.mongodb_user = config.get('user', self.mongodb_user)\n self.mongodb_password = config.get(\n 'password', self.mongodb_password)\n self.mongodb_database = config.get(\n 'database', self.mongodb_database)\n self.mongodb_taskmeta_collection = config.get(\n 'taskmeta_collection', self.mongodb_taskmeta_collection)\n self.mongodb_max_pool_size = config.get(\n 'max_pool_size', self.mongodb_max_pool_size)\n\n self._connection = None\n\n def _get_connection(self):\n \"\"\"Connect to the MongoDB server.\"\"\"\n if self._connection is None:\n from pymongo.connection import Connection\n\n # The first pymongo.Connection() argument (host) can be\n # a list of ['host:port'] elements or a mongodb connection\n # URI. If this is the case, don't use self.mongodb_port\n # but let pymongo get the port(s) from the URI instead.\n # This enables the use of replica sets and sharding.\n # See pymongo.Connection() for more info.\n args = [self.mongodb_host]\n kwargs = {'max_pool_size': self.mongodb_max_pool_size}\n if isinstance(self.mongodb_host, string_t) \\\n and not self.mongodb_host.startswith('mongodb://'):\n args.append(self.mongodb_port)\n\n self._connection = Connection(*args, **kwargs)\n\n return self._connection\n\n def process_cleanup(self):\n if self._connection is not None:\n # MongoDB connection will be closed automatically when object\n # goes out of scope\n self._connection = None\n\n def _store_result(self, task_id, result, status, traceback=None):\n \"\"\"Store return value and status of an executed task.\"\"\"\n meta = {'_id': task_id,\n 'status': status,\n 'result': Binary(self.encode(result)),\n 'date_done': datetime.utcnow(),\n 'traceback': Binary(self.encode(traceback)),\n 'children': Binary(self.encode(self.current_task_children()))}\n self.collection.save(meta, safe=True)\n\n return result\n\n def _get_task_meta_for(self, task_id):\n \"\"\"Get task metadata for a task by id.\"\"\"\n\n obj = self.collection.find_one({'_id': task_id})\n if not obj:\n return {'status': states.PENDING, 'result': None}\n\n meta = {\n 'task_id': obj['_id'],\n 'status': obj['status'],\n 'result': self.decode(obj['result']),\n 'date_done': obj['date_done'],\n 'traceback': self.decode(obj['traceback']),\n 'children': self.decode(obj['children']),\n }\n\n return meta\n\n def _save_group(self, group_id, result):\n \"\"\"Save the group result.\"\"\"\n meta = {'_id': group_id,\n 'result': Binary(self.encode(result)),\n 'date_done': datetime.utcnow()}\n self.collection.save(meta, safe=True)\n\n return result\n\n def _restore_group(self, group_id):\n \"\"\"Get the result for a group by id.\"\"\"\n obj = self.collection.find_one({'_id': group_id})\n if not obj:\n return\n\n meta = {\n 'task_id': obj['_id'],\n 'result': self.decode(obj['result']),\n 'date_done': obj['date_done'],\n }\n\n return meta\n\n def _delete_group(self, group_id):\n \"\"\"Delete a group by id.\"\"\"\n self.collection.remove({'_id': group_id})\n\n def _forget(self, task_id):\n \"\"\"\n Remove result from MongoDB.\n\n :raises celery.exceptions.OperationsError: if the task_id could not be\n removed.\n \"\"\"\n # By using safe=True, this will wait until it receives a response from\n # the server. Likewise, it will raise an OperationsError if the\n # response was unable to be completed.\n self.collection.remove({'_id': task_id}, safe=True)\n\n def cleanup(self):\n \"\"\"Delete expired metadata.\"\"\"\n self.collection.remove(\n {'date_done': {'$lt': self.app.now() - self.expires}},\n )\n\n def __reduce__(self, args=(), kwargs={}):\n kwargs.update(\n dict(expires=self.expires))\n return super(MongoBackend, self).__reduce__(args, kwargs)\n\n def _get_database(self):\n conn = self._get_connection()\n db = conn[self.mongodb_database]\n if self.mongodb_user and self.mongodb_password:\n if not db.authenticate(self.mongodb_user,\n self.mongodb_password):\n raise ImproperlyConfigured(\n 'Invalid MongoDB username or password.')\n return db\n\n @cached_property\n def database(self):\n \"\"\"Get database from MongoDB connection and perform authentication\n if necessary.\"\"\"\n return self._get_database()\n\n @cached_property\n def collection(self):\n \"\"\"Get the metadata task collection.\"\"\"\n collection = self.database[self.mongodb_taskmeta_collection]\n\n # Ensure an index on date_done is there, if not process the index\n # in the background. Once completed cleanup will be much faster\n collection.ensure_index('date_done', background='true')\n return collection\n", "path": "celery/backends/mongodb.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\n celery.backends.mongodb\n ~~~~~~~~~~~~~~~~~~~~~~~\n\n MongoDB result store backend.\n\n\"\"\"\nfrom __future__ import absolute_import\n\nfrom datetime import datetime\n\ntry:\n import pymongo\nexcept ImportError: # pragma: no cover\n pymongo = None # noqa\n\nif pymongo:\n try:\n from bson.binary import Binary\n except ImportError: # pragma: no cover\n from pymongo.binary import Binary # noqa\nelse: # pragma: no cover\n Binary = None # noqa\n\nfrom kombu.utils import cached_property\n\nfrom celery import states\nfrom celery.exceptions import ImproperlyConfigured\nfrom celery.five import string_t\nfrom celery.utils.timeutils import maybe_timedelta\n\nfrom .base import BaseBackend\n\n\nclass Bunch(object):\n\n def __init__(self, **kw):\n self.__dict__.update(kw)\n\n\nclass MongoBackend(BaseBackend):\n mongodb_host = 'localhost'\n mongodb_port = 27017\n mongodb_user = None\n mongodb_password = None\n mongodb_database = 'celery'\n mongodb_taskmeta_collection = 'celery_taskmeta'\n mongodb_max_pool_size = 10\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize MongoDB backend instance.\n\n :raises celery.exceptions.ImproperlyConfigured: if\n module :mod:`pymongo` is not available.\n\n \"\"\"\n super(MongoBackend, self).__init__(*args, **kwargs)\n self.expires = kwargs.get('expires') or maybe_timedelta(\n self.app.conf.CELERY_TASK_RESULT_EXPIRES)\n\n if not pymongo:\n raise ImproperlyConfigured(\n 'You need to install the pymongo library to use the '\n 'MongoDB backend.')\n\n config = self.app.conf.get('CELERY_MONGODB_BACKEND_SETTINGS', None)\n if config is not None:\n if not isinstance(config, dict):\n raise ImproperlyConfigured(\n 'MongoDB backend settings should be grouped in a dict')\n\n self.mongodb_host = config.get('host', self.mongodb_host)\n self.mongodb_port = int(config.get('port', self.mongodb_port))\n self.mongodb_user = config.get('user', self.mongodb_user)\n self.mongodb_password = config.get(\n 'password', self.mongodb_password)\n self.mongodb_database = config.get(\n 'database', self.mongodb_database)\n self.mongodb_taskmeta_collection = config.get(\n 'taskmeta_collection', self.mongodb_taskmeta_collection)\n self.mongodb_max_pool_size = config.get(\n 'max_pool_size', self.mongodb_max_pool_size)\n\n self._connection = None\n\n def _get_connection(self):\n \"\"\"Connect to the MongoDB server.\"\"\"\n if self._connection is None:\n from pymongo.connection import Connection\n\n # The first pymongo.Connection() argument (host) can be\n # a list of ['host:port'] elements or a mongodb connection\n # URI. If this is the case, don't use self.mongodb_port\n # but let pymongo get the port(s) from the URI instead.\n # This enables the use of replica sets and sharding.\n # See pymongo.Connection() for more info.\n args = [self.mongodb_host]\n kwargs = {\n 'max_pool_size': self.mongodb_max_pool_size,\n 'ssl': self.app.conf.BROKER_USE_SSL\n }\n if isinstance(self.mongodb_host, string_t) \\\n and not self.mongodb_host.startswith('mongodb://'):\n args.append(self.mongodb_port)\n\n self._connection = Connection(*args, **kwargs)\n\n return self._connection\n\n def process_cleanup(self):\n if self._connection is not None:\n # MongoDB connection will be closed automatically when object\n # goes out of scope\n self._connection = None\n\n def _store_result(self, task_id, result, status, traceback=None):\n \"\"\"Store return value and status of an executed task.\"\"\"\n meta = {'_id': task_id,\n 'status': status,\n 'result': Binary(self.encode(result)),\n 'date_done': datetime.utcnow(),\n 'traceback': Binary(self.encode(traceback)),\n 'children': Binary(self.encode(self.current_task_children()))}\n self.collection.save(meta, safe=True)\n\n return result\n\n def _get_task_meta_for(self, task_id):\n \"\"\"Get task metadata for a task by id.\"\"\"\n\n obj = self.collection.find_one({'_id': task_id})\n if not obj:\n return {'status': states.PENDING, 'result': None}\n\n meta = {\n 'task_id': obj['_id'],\n 'status': obj['status'],\n 'result': self.decode(obj['result']),\n 'date_done': obj['date_done'],\n 'traceback': self.decode(obj['traceback']),\n 'children': self.decode(obj['children']),\n }\n\n return meta\n\n def _save_group(self, group_id, result):\n \"\"\"Save the group result.\"\"\"\n meta = {'_id': group_id,\n 'result': Binary(self.encode(result)),\n 'date_done': datetime.utcnow()}\n self.collection.save(meta, safe=True)\n\n return result\n\n def _restore_group(self, group_id):\n \"\"\"Get the result for a group by id.\"\"\"\n obj = self.collection.find_one({'_id': group_id})\n if not obj:\n return\n\n meta = {\n 'task_id': obj['_id'],\n 'result': self.decode(obj['result']),\n 'date_done': obj['date_done'],\n }\n\n return meta\n\n def _delete_group(self, group_id):\n \"\"\"Delete a group by id.\"\"\"\n self.collection.remove({'_id': group_id})\n\n def _forget(self, task_id):\n \"\"\"\n Remove result from MongoDB.\n\n :raises celery.exceptions.OperationsError: if the task_id could not be\n removed.\n \"\"\"\n # By using safe=True, this will wait until it receives a response from\n # the server. Likewise, it will raise an OperationsError if the\n # response was unable to be completed.\n self.collection.remove({'_id': task_id}, safe=True)\n\n def cleanup(self):\n \"\"\"Delete expired metadata.\"\"\"\n self.collection.remove(\n {'date_done': {'$lt': self.app.now() - self.expires}},\n )\n\n def __reduce__(self, args=(), kwargs={}):\n kwargs.update(\n dict(expires=self.expires))\n return super(MongoBackend, self).__reduce__(args, kwargs)\n\n def _get_database(self):\n conn = self._get_connection()\n db = conn[self.mongodb_database]\n if self.mongodb_user and self.mongodb_password:\n if not db.authenticate(self.mongodb_user,\n self.mongodb_password):\n raise ImproperlyConfigured(\n 'Invalid MongoDB username or password.')\n return db\n\n @cached_property\n def database(self):\n \"\"\"Get database from MongoDB connection and perform authentication\n if necessary.\"\"\"\n return self._get_database()\n\n @cached_property\n def collection(self):\n \"\"\"Get the metadata task collection.\"\"\"\n collection = self.database[self.mongodb_taskmeta_collection]\n\n # Ensure an index on date_done is there, if not process the index\n # in the background. Once completed cleanup will be much faster\n collection.ensure_index('date_done', background='true')\n return collection\n", "path": "celery/backends/mongodb.py"}]} | 3,513 | 167 |
gh_patches_debug_39443 | rasdani/github-patches | git_diff | DataDog__dd-agent-1263 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
system.mem.pct_usable is missing on Windows
We don't compute this very useful metric on Windows. We should to have more consistency with Unix.
Need to hack around there:
https://github.com/DataDog/dd-agent/blob/master/checks/system/win32.py#L166
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checks/system/win32.py`
Content:
```
1 # project
2 from checks import Check
3
4 # 3rd party
5 try:
6 import psutil
7 except ImportError:
8 psutil = None
9
10 try:
11 import wmi
12 w = wmi.WMI()
13 except Exception:
14 wmi, w = None, None
15
16 # Device WMI drive types
17 class DriveType(object):
18 UNKNOWN, NOROOT, REMOVEABLE, LOCAL, NETWORK, CD, RAM = (0, 1, 2, 3, 4, 5, 6)
19 B2MB = float(1048576)
20 KB2MB = B2KB = float(1024)
21
22 def should_ignore_disk(name, blacklist_re):
23 # blacklist_re is a compiled regex, compilation done at config loading time
24 return name =='_total' or blacklist_re is not None and blacklist_re.match(name)
25
26 class Processes(Check):
27 def __init__(self, logger):
28 Check.__init__(self, logger)
29 self.gauge('system.proc.queue_length')
30 self.gauge('system.proc.count')
31
32 def check(self, agentConfig):
33 try:
34 os = w.Win32_PerfFormattedData_PerfOS_System()[0]
35 except AttributeError:
36 self.logger.info('Missing Win32_PerfFormattedData_PerfOS_System WMI class.' \
37 ' No process metrics will be returned.')
38 return
39
40 try:
41 cpu = w.Win32_PerfFormattedData_PerfOS_Processor(name="_Total")[0]
42 except AttributeError:
43 self.logger.info('Missing Win32_PerfFormattedData_PerfOS_Processor WMI class.' \
44 ' No process metrics will be returned.')
45 return
46 if os.ProcessorQueueLength is not None:
47 self.save_sample('system.proc.queue_length', os.ProcessorQueueLength)
48 if os.Processes is not None:
49 self.save_sample('system.proc.count', os.Processes)
50
51 return self.get_metrics()
52
53 class Memory(Check):
54 def __init__(self, logger):
55 Check.__init__(self, logger)
56 self.logger = logger
57 self.gauge('system.mem.free')
58 self.gauge('system.mem.used')
59 self.gauge('system.mem.total')
60 self.gauge('system.mem.cached')
61 self.gauge('system.mem.committed')
62 self.gauge('system.mem.paged')
63 self.gauge('system.mem.nonpaged')
64
65 def check(self, agentConfig):
66 try:
67 os = w.Win32_OperatingSystem()[0]
68 except AttributeError:
69 self.logger.info('Missing Win32_OperatingSystem. No memory metrics will be returned.')
70 return
71
72 if os.TotalVisibleMemorySize is not None and os.FreePhysicalMemory is not None:
73 total = int(os.TotalVisibleMemorySize) / KB2MB
74 free = int(os.FreePhysicalMemory) / KB2MB
75 self.save_sample('system.mem.total', total)
76 self.save_sample('system.mem.free', free)
77 self.save_sample('system.mem.used', total - free)
78
79 mem = w.Win32_PerfFormattedData_PerfOS_Memory()[0]
80 if mem.CacheBytes is not None:
81 self.save_sample('system.mem.cached', int(mem.CacheBytes) / B2MB)
82 if mem.CommittedBytes is not None:
83 self.save_sample('system.mem.committed', int(mem.CommittedBytes) / B2MB)
84 if mem.PoolPagedBytes is not None:
85 self.save_sample('system.mem.paged', int(mem.PoolPagedBytes) / B2MB)
86 if mem.PoolNonpagedBytes is not None:
87 self.save_sample('system.mem.nonpaged', int(mem.PoolNonpagedBytes) / B2MB)
88
89 return self.get_metrics()
90
91 class Cpu(Check):
92 def __init__(self, logger):
93 Check.__init__(self, logger)
94 self.logger = logger
95 self.counter('system.cpu.user')
96 self.counter('system.cpu.idle')
97 self.gauge('system.cpu.interrupt')
98 self.counter('system.cpu.system')
99
100 def check(self, agentConfig):
101 try:
102 cpu = w.Win32_PerfFormattedData_PerfOS_Processor()
103 except AttributeError:
104 self.logger.info('Missing Win32_PerfFormattedData_PerfOS_Processor WMI class.' \
105 ' No CPU metrics will be returned.')
106 return
107
108 cpu_interrupt = self._average_metric(cpu, 'PercentInterruptTime')
109 if cpu_interrupt is not None:
110 self.save_sample('system.cpu.interrupt', cpu_interrupt)
111
112 cpu_percent = psutil.cpu_times()
113
114 self.save_sample('system.cpu.user', 100 * cpu_percent.user / psutil.NUM_CPUS)
115 self.save_sample('system.cpu.idle', 100 * cpu_percent.idle / psutil.NUM_CPUS)
116 self.save_sample('system.cpu.system', 100 * cpu_percent.system/ psutil.NUM_CPUS)
117
118 return self.get_metrics()
119
120 def _average_metric(self, wmi_class, wmi_prop):
121 ''' Sum all of the values of a metric from a WMI class object, excluding
122 the value for "_Total"
123 '''
124 val = 0
125 counter = 0
126 for wmi_object in wmi_class:
127 if wmi_object.Name == '_Total':
128 # Skip the _Total value
129 continue
130
131 if getattr(wmi_object, wmi_prop) is not None:
132 counter += 1
133 val += float(getattr(wmi_object, wmi_prop))
134
135 if counter > 0:
136 return val / counter
137
138 return val
139
140
141 class Network(Check):
142 def __init__(self, logger):
143 Check.__init__(self, logger)
144 self.logger = logger
145 self.gauge('system.net.bytes_rcvd')
146 self.gauge('system.net.bytes_sent')
147
148 def check(self, agentConfig):
149 try:
150 net = w.Win32_PerfFormattedData_Tcpip_NetworkInterface()
151 except AttributeError:
152 self.logger.info('Missing Win32_PerfFormattedData_Tcpip_NetworkInterface WMI class.' \
153 ' No network metrics will be returned')
154 return
155
156 for iface in net:
157 name = self.normalize_device_name(iface.name)
158 if iface.BytesReceivedPerSec is not None:
159 self.save_sample('system.net.bytes_rcvd', iface.BytesReceivedPerSec,
160 device_name=name)
161 if iface.BytesSentPerSec is not None:
162 self.save_sample('system.net.bytes_sent', iface.BytesSentPerSec,
163 device_name=name)
164 return self.get_metrics()
165
166 class Disk(Check):
167 def __init__(self, logger):
168 Check.__init__(self, logger)
169 self.logger = logger
170 self.gauge('system.disk.free')
171 self.gauge('system.disk.total')
172 self.gauge('system.disk.in_use')
173 self.gauge('system.disk.used')
174
175 def check(self, agentConfig):
176 try:
177 disk = w.Win32_LogicalDisk()
178 except AttributeError:
179 self.logger.info('Missing Win32_LogicalDisk WMI class.' \
180 ' No disk metrics will be returned.')
181 return
182
183 blacklist_re = agentConfig.get('device_blacklist_re', None)
184 for device in disk:
185 name = self.normalize_device_name(device.name)
186 if device.DriveType in (DriveType.CD, DriveType.UNKNOWN) or should_ignore_disk(name, blacklist_re):
187 continue
188 if device.FreeSpace is not None and device.Size is not None:
189 free = float(device.FreeSpace) / B2KB
190 total = float(device.Size) / B2KB
191 used = total - free
192 self.save_sample('system.disk.free', free, device_name=name)
193 self.save_sample('system.disk.total', total, device_name=name)
194 self.save_sample('system.disk.used', used, device_name=name)
195 self.save_sample('system.disk.in_use', (used / total),
196 device_name=name)
197 return self.get_metrics()
198
199 class IO(Check):
200 def __init__(self, logger):
201 Check.__init__(self, logger)
202 self.logger = logger
203 self.gauge('system.io.wkb_s')
204 self.gauge('system.io.w_s')
205 self.gauge('system.io.rkb_s')
206 self.gauge('system.io.r_s')
207 self.gauge('system.io.avg_q_sz')
208
209 def check(self, agentConfig):
210 try:
211 disk = w.Win32_PerfFormattedData_PerfDisk_LogicalDisk()
212 except AttributeError:
213 self.logger.info('Missing Win32_PerfFormattedData_PerfDisk_LogicalDiskUnable WMI class.' \
214 ' No I/O metrics will be returned.')
215 return
216 blacklist_re = agentConfig.get('device_blacklist_re', None)
217 for device in disk:
218 name = self.normalize_device_name(device.name)
219 if should_ignore_disk(name, blacklist_re):
220 continue
221 if device.DiskWriteBytesPerSec is not None:
222 self.save_sample('system.io.wkb_s', int(device.DiskWriteBytesPerSec) / B2KB,
223 device_name=name)
224 if device.DiskWritesPerSec is not None:
225 self.save_sample('system.io.w_s', int(device.DiskWritesPerSec),
226 device_name=name)
227 if device.DiskReadBytesPerSec is not None:
228 self.save_sample('system.io.rkb_s', int(device.DiskReadBytesPerSec) / B2KB,
229 device_name=name)
230 if device.DiskReadsPerSec is not None:
231 self.save_sample('system.io.r_s', int(device.DiskReadsPerSec),
232 device_name=name)
233 if device.CurrentDiskQueueLength is not None:
234 self.save_sample('system.io.avg_q_sz', device.CurrentDiskQueueLength,
235 device_name=name)
236 return self.get_metrics()
237
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checks/system/win32.py b/checks/system/win32.py
--- a/checks/system/win32.py
+++ b/checks/system/win32.py
@@ -57,10 +57,23 @@
self.gauge('system.mem.free')
self.gauge('system.mem.used')
self.gauge('system.mem.total')
+ # area of physical memory that stores recently used pages of data
+ # for applications
self.gauge('system.mem.cached')
+ # Committed memory is physical memory for which space has been
+ # reserved on the disk paging file in case it must be written
+ # back to disk
self.gauge('system.mem.committed')
+ # physical memory used by the operating system, for objects
+ # that can be written to disk when they are not being used
self.gauge('system.mem.paged')
+ # physical memory used by the operating system for objects that
+ # cannot be written to disk, but must remain in physical memory
+ # as long as they are allocated.
self.gauge('system.mem.nonpaged')
+ # usable = free + cached
+ self.gauge('system.mem.usable')
+ self.gauge('system.mem.pct_usable')
def check(self, agentConfig):
try:
@@ -69,6 +82,10 @@
self.logger.info('Missing Win32_OperatingSystem. No memory metrics will be returned.')
return
+ total = 0
+ free = 0
+ cached = 0
+
if os.TotalVisibleMemorySize is not None and os.FreePhysicalMemory is not None:
total = int(os.TotalVisibleMemorySize) / KB2MB
free = int(os.FreePhysicalMemory) / KB2MB
@@ -76,9 +93,11 @@
self.save_sample('system.mem.free', free)
self.save_sample('system.mem.used', total - free)
+
mem = w.Win32_PerfFormattedData_PerfOS_Memory()[0]
if mem.CacheBytes is not None:
- self.save_sample('system.mem.cached', int(mem.CacheBytes) / B2MB)
+ cached = int(mem.CacheBytes) / B2MB
+ self.save_sample('system.mem.cached', cached)
if mem.CommittedBytes is not None:
self.save_sample('system.mem.committed', int(mem.CommittedBytes) / B2MB)
if mem.PoolPagedBytes is not None:
@@ -86,6 +105,12 @@
if mem.PoolNonpagedBytes is not None:
self.save_sample('system.mem.nonpaged', int(mem.PoolNonpagedBytes) / B2MB)
+ usable = free + cached
+ self.save_sample('system.mem.usable', usable)
+ if total > 0:
+ pct_usable = float(usable) / total
+ self.save_sample('system.mem.pct_usable', pct_usable)
+
return self.get_metrics()
class Cpu(Check):
| {"golden_diff": "diff --git a/checks/system/win32.py b/checks/system/win32.py\n--- a/checks/system/win32.py\n+++ b/checks/system/win32.py\n@@ -57,10 +57,23 @@\n self.gauge('system.mem.free')\n self.gauge('system.mem.used')\n self.gauge('system.mem.total')\n+ # area of physical memory that stores recently used pages of data\n+ # for applications\n self.gauge('system.mem.cached')\n+ # Committed memory is physical memory for which space has been\n+ # reserved on the disk paging file in case it must be written\n+ # back to disk\n self.gauge('system.mem.committed')\n+ # physical memory used by the operating system, for objects\n+ # that can be written to disk when they are not being used\n self.gauge('system.mem.paged')\n+ # physical memory used by the operating system for objects that\n+ # cannot be written to disk, but must remain in physical memory\n+ # as long as they are allocated.\n self.gauge('system.mem.nonpaged')\n+ # usable = free + cached\n+ self.gauge('system.mem.usable')\n+ self.gauge('system.mem.pct_usable')\n \n def check(self, agentConfig):\n try:\n@@ -69,6 +82,10 @@\n self.logger.info('Missing Win32_OperatingSystem. No memory metrics will be returned.')\n return\n \n+ total = 0\n+ free = 0\n+ cached = 0\n+\n if os.TotalVisibleMemorySize is not None and os.FreePhysicalMemory is not None:\n total = int(os.TotalVisibleMemorySize) / KB2MB\n free = int(os.FreePhysicalMemory) / KB2MB\n@@ -76,9 +93,11 @@\n self.save_sample('system.mem.free', free)\n self.save_sample('system.mem.used', total - free)\n \n+\n mem = w.Win32_PerfFormattedData_PerfOS_Memory()[0]\n if mem.CacheBytes is not None:\n- self.save_sample('system.mem.cached', int(mem.CacheBytes) / B2MB)\n+ cached = int(mem.CacheBytes) / B2MB\n+ self.save_sample('system.mem.cached', cached)\n if mem.CommittedBytes is not None:\n self.save_sample('system.mem.committed', int(mem.CommittedBytes) / B2MB)\n if mem.PoolPagedBytes is not None:\n@@ -86,6 +105,12 @@\n if mem.PoolNonpagedBytes is not None:\n self.save_sample('system.mem.nonpaged', int(mem.PoolNonpagedBytes) / B2MB)\n \n+ usable = free + cached\n+ self.save_sample('system.mem.usable', usable)\n+ if total > 0:\n+ pct_usable = float(usable) / total\n+ self.save_sample('system.mem.pct_usable', pct_usable)\n+\n return self.get_metrics()\n \n class Cpu(Check):\n", "issue": "system.mem.pct_usable is missing on Windows\nWe don't compute this very useful metric on Windows. We should to have more consistency with Unix.\nNeed to hack around there: \nhttps://github.com/DataDog/dd-agent/blob/master/checks/system/win32.py#L166\n\n", "before_files": [{"content": "# project\nfrom checks import Check\n\n# 3rd party\ntry:\n import psutil\nexcept ImportError:\n psutil = None\n\ntry:\n import wmi\n w = wmi.WMI()\nexcept Exception:\n wmi, w = None, None\n\n# Device WMI drive types\nclass DriveType(object):\n UNKNOWN, NOROOT, REMOVEABLE, LOCAL, NETWORK, CD, RAM = (0, 1, 2, 3, 4, 5, 6)\nB2MB = float(1048576)\nKB2MB = B2KB = float(1024)\n\ndef should_ignore_disk(name, blacklist_re):\n # blacklist_re is a compiled regex, compilation done at config loading time\n return name =='_total' or blacklist_re is not None and blacklist_re.match(name)\n\nclass Processes(Check):\n def __init__(self, logger):\n Check.__init__(self, logger)\n self.gauge('system.proc.queue_length')\n self.gauge('system.proc.count')\n\n def check(self, agentConfig):\n try:\n os = w.Win32_PerfFormattedData_PerfOS_System()[0]\n except AttributeError:\n self.logger.info('Missing Win32_PerfFormattedData_PerfOS_System WMI class.' \\\n ' No process metrics will be returned.')\n return\n\n try:\n cpu = w.Win32_PerfFormattedData_PerfOS_Processor(name=\"_Total\")[0]\n except AttributeError:\n self.logger.info('Missing Win32_PerfFormattedData_PerfOS_Processor WMI class.' \\\n ' No process metrics will be returned.')\n return\n if os.ProcessorQueueLength is not None:\n self.save_sample('system.proc.queue_length', os.ProcessorQueueLength)\n if os.Processes is not None:\n self.save_sample('system.proc.count', os.Processes)\n\n return self.get_metrics()\n\nclass Memory(Check):\n def __init__(self, logger):\n Check.__init__(self, logger)\n self.logger = logger\n self.gauge('system.mem.free')\n self.gauge('system.mem.used')\n self.gauge('system.mem.total')\n self.gauge('system.mem.cached')\n self.gauge('system.mem.committed')\n self.gauge('system.mem.paged')\n self.gauge('system.mem.nonpaged')\n\n def check(self, agentConfig):\n try:\n os = w.Win32_OperatingSystem()[0]\n except AttributeError:\n self.logger.info('Missing Win32_OperatingSystem. No memory metrics will be returned.')\n return\n\n if os.TotalVisibleMemorySize is not None and os.FreePhysicalMemory is not None:\n total = int(os.TotalVisibleMemorySize) / KB2MB\n free = int(os.FreePhysicalMemory) / KB2MB\n self.save_sample('system.mem.total', total)\n self.save_sample('system.mem.free', free)\n self.save_sample('system.mem.used', total - free)\n\n mem = w.Win32_PerfFormattedData_PerfOS_Memory()[0]\n if mem.CacheBytes is not None:\n self.save_sample('system.mem.cached', int(mem.CacheBytes) / B2MB)\n if mem.CommittedBytes is not None:\n self.save_sample('system.mem.committed', int(mem.CommittedBytes) / B2MB)\n if mem.PoolPagedBytes is not None:\n self.save_sample('system.mem.paged', int(mem.PoolPagedBytes) / B2MB)\n if mem.PoolNonpagedBytes is not None:\n self.save_sample('system.mem.nonpaged', int(mem.PoolNonpagedBytes) / B2MB)\n\n return self.get_metrics()\n\nclass Cpu(Check):\n def __init__(self, logger):\n Check.__init__(self, logger)\n self.logger = logger\n self.counter('system.cpu.user')\n self.counter('system.cpu.idle')\n self.gauge('system.cpu.interrupt')\n self.counter('system.cpu.system')\n\n def check(self, agentConfig):\n try:\n cpu = w.Win32_PerfFormattedData_PerfOS_Processor()\n except AttributeError:\n self.logger.info('Missing Win32_PerfFormattedData_PerfOS_Processor WMI class.' \\\n ' No CPU metrics will be returned.')\n return\n\n cpu_interrupt = self._average_metric(cpu, 'PercentInterruptTime')\n if cpu_interrupt is not None:\n self.save_sample('system.cpu.interrupt', cpu_interrupt)\n\n cpu_percent = psutil.cpu_times()\n\n self.save_sample('system.cpu.user', 100 * cpu_percent.user / psutil.NUM_CPUS)\n self.save_sample('system.cpu.idle', 100 * cpu_percent.idle / psutil.NUM_CPUS)\n self.save_sample('system.cpu.system', 100 * cpu_percent.system/ psutil.NUM_CPUS)\n\n return self.get_metrics()\n\n def _average_metric(self, wmi_class, wmi_prop):\n ''' Sum all of the values of a metric from a WMI class object, excluding\n the value for \"_Total\"\n '''\n val = 0\n counter = 0\n for wmi_object in wmi_class:\n if wmi_object.Name == '_Total':\n # Skip the _Total value\n continue\n\n if getattr(wmi_object, wmi_prop) is not None:\n counter += 1\n val += float(getattr(wmi_object, wmi_prop))\n\n if counter > 0:\n return val / counter\n\n return val\n\n\nclass Network(Check):\n def __init__(self, logger):\n Check.__init__(self, logger)\n self.logger = logger\n self.gauge('system.net.bytes_rcvd')\n self.gauge('system.net.bytes_sent')\n\n def check(self, agentConfig):\n try:\n net = w.Win32_PerfFormattedData_Tcpip_NetworkInterface()\n except AttributeError:\n self.logger.info('Missing Win32_PerfFormattedData_Tcpip_NetworkInterface WMI class.' \\\n ' No network metrics will be returned')\n return\n\n for iface in net:\n name = self.normalize_device_name(iface.name)\n if iface.BytesReceivedPerSec is not None:\n self.save_sample('system.net.bytes_rcvd', iface.BytesReceivedPerSec,\n device_name=name)\n if iface.BytesSentPerSec is not None:\n self.save_sample('system.net.bytes_sent', iface.BytesSentPerSec,\n device_name=name)\n return self.get_metrics()\n\nclass Disk(Check):\n def __init__(self, logger):\n Check.__init__(self, logger)\n self.logger = logger\n self.gauge('system.disk.free')\n self.gauge('system.disk.total')\n self.gauge('system.disk.in_use')\n self.gauge('system.disk.used')\n\n def check(self, agentConfig):\n try:\n disk = w.Win32_LogicalDisk()\n except AttributeError:\n self.logger.info('Missing Win32_LogicalDisk WMI class.' \\\n ' No disk metrics will be returned.')\n return\n\n blacklist_re = agentConfig.get('device_blacklist_re', None)\n for device in disk:\n name = self.normalize_device_name(device.name)\n if device.DriveType in (DriveType.CD, DriveType.UNKNOWN) or should_ignore_disk(name, blacklist_re):\n continue\n if device.FreeSpace is not None and device.Size is not None:\n free = float(device.FreeSpace) / B2KB\n total = float(device.Size) / B2KB\n used = total - free\n self.save_sample('system.disk.free', free, device_name=name)\n self.save_sample('system.disk.total', total, device_name=name)\n self.save_sample('system.disk.used', used, device_name=name)\n self.save_sample('system.disk.in_use', (used / total),\n device_name=name)\n return self.get_metrics()\n\nclass IO(Check):\n def __init__(self, logger):\n Check.__init__(self, logger)\n self.logger = logger\n self.gauge('system.io.wkb_s')\n self.gauge('system.io.w_s')\n self.gauge('system.io.rkb_s')\n self.gauge('system.io.r_s')\n self.gauge('system.io.avg_q_sz')\n\n def check(self, agentConfig):\n try:\n disk = w.Win32_PerfFormattedData_PerfDisk_LogicalDisk()\n except AttributeError:\n self.logger.info('Missing Win32_PerfFormattedData_PerfDisk_LogicalDiskUnable WMI class.' \\\n ' No I/O metrics will be returned.')\n return\n blacklist_re = agentConfig.get('device_blacklist_re', None)\n for device in disk:\n name = self.normalize_device_name(device.name)\n if should_ignore_disk(name, blacklist_re):\n continue\n if device.DiskWriteBytesPerSec is not None:\n self.save_sample('system.io.wkb_s', int(device.DiskWriteBytesPerSec) / B2KB,\n device_name=name)\n if device.DiskWritesPerSec is not None:\n self.save_sample('system.io.w_s', int(device.DiskWritesPerSec),\n device_name=name)\n if device.DiskReadBytesPerSec is not None:\n self.save_sample('system.io.rkb_s', int(device.DiskReadBytesPerSec) / B2KB,\n device_name=name)\n if device.DiskReadsPerSec is not None:\n self.save_sample('system.io.r_s', int(device.DiskReadsPerSec),\n device_name=name)\n if device.CurrentDiskQueueLength is not None:\n self.save_sample('system.io.avg_q_sz', device.CurrentDiskQueueLength,\n device_name=name)\n return self.get_metrics()\n", "path": "checks/system/win32.py"}], "after_files": [{"content": "# project\nfrom checks import Check\n\n# 3rd party\ntry:\n import psutil\nexcept ImportError:\n psutil = None\n\ntry:\n import wmi\n w = wmi.WMI()\nexcept Exception:\n wmi, w = None, None\n\n# Device WMI drive types\nclass DriveType(object):\n UNKNOWN, NOROOT, REMOVEABLE, LOCAL, NETWORK, CD, RAM = (0, 1, 2, 3, 4, 5, 6)\nB2MB = float(1048576)\nKB2MB = B2KB = float(1024)\n\ndef should_ignore_disk(name, blacklist_re):\n # blacklist_re is a compiled regex, compilation done at config loading time\n return name =='_total' or blacklist_re is not None and blacklist_re.match(name)\n\nclass Processes(Check):\n def __init__(self, logger):\n Check.__init__(self, logger)\n self.gauge('system.proc.queue_length')\n self.gauge('system.proc.count')\n\n def check(self, agentConfig):\n try:\n os = w.Win32_PerfFormattedData_PerfOS_System()[0]\n except AttributeError:\n self.logger.info('Missing Win32_PerfFormattedData_PerfOS_System WMI class.' \\\n ' No process metrics will be returned.')\n return\n\n try:\n cpu = w.Win32_PerfFormattedData_PerfOS_Processor(name=\"_Total\")[0]\n except AttributeError:\n self.logger.info('Missing Win32_PerfFormattedData_PerfOS_Processor WMI class.' \\\n ' No process metrics will be returned.')\n return\n if os.ProcessorQueueLength is not None:\n self.save_sample('system.proc.queue_length', os.ProcessorQueueLength)\n if os.Processes is not None:\n self.save_sample('system.proc.count', os.Processes)\n\n return self.get_metrics()\n\nclass Memory(Check):\n def __init__(self, logger):\n Check.__init__(self, logger)\n self.logger = logger\n self.gauge('system.mem.free')\n self.gauge('system.mem.used')\n self.gauge('system.mem.total')\n # area of physical memory that stores recently used pages of data\n # for applications\n self.gauge('system.mem.cached')\n # Committed memory is physical memory for which space has been\n # reserved on the disk paging file in case it must be written\n # back to disk\n self.gauge('system.mem.committed')\n # physical memory used by the operating system, for objects\n # that can be written to disk when they are not being used\n self.gauge('system.mem.paged')\n # physical memory used by the operating system for objects that\n # cannot be written to disk, but must remain in physical memory\n # as long as they are allocated.\n self.gauge('system.mem.nonpaged')\n # usable = free + cached\n self.gauge('system.mem.usable')\n self.gauge('system.mem.pct_usable')\n\n def check(self, agentConfig):\n try:\n os = w.Win32_OperatingSystem()[0]\n except AttributeError:\n self.logger.info('Missing Win32_OperatingSystem. No memory metrics will be returned.')\n return\n\n total = 0\n free = 0\n cached = 0\n\n if os.TotalVisibleMemorySize is not None and os.FreePhysicalMemory is not None:\n total = int(os.TotalVisibleMemorySize) / KB2MB\n free = int(os.FreePhysicalMemory) / KB2MB\n self.save_sample('system.mem.total', total)\n self.save_sample('system.mem.free', free)\n self.save_sample('system.mem.used', total - free)\n\n\n mem = w.Win32_PerfFormattedData_PerfOS_Memory()[0]\n if mem.CacheBytes is not None:\n cached = int(mem.CacheBytes) / B2MB\n self.save_sample('system.mem.cached', cached)\n if mem.CommittedBytes is not None:\n self.save_sample('system.mem.committed', int(mem.CommittedBytes) / B2MB)\n if mem.PoolPagedBytes is not None:\n self.save_sample('system.mem.paged', int(mem.PoolPagedBytes) / B2MB)\n if mem.PoolNonpagedBytes is not None:\n self.save_sample('system.mem.nonpaged', int(mem.PoolNonpagedBytes) / B2MB)\n\n usable = free + cached\n self.save_sample('system.mem.usable', usable)\n if total > 0:\n pct_usable = float(usable) / total\n self.save_sample('system.mem.pct_usable', pct_usable)\n\n return self.get_metrics()\n\nclass Cpu(Check):\n def __init__(self, logger):\n Check.__init__(self, logger)\n self.logger = logger\n self.counter('system.cpu.user')\n self.counter('system.cpu.idle')\n self.gauge('system.cpu.interrupt')\n self.counter('system.cpu.system')\n\n def check(self, agentConfig):\n try:\n cpu = w.Win32_PerfFormattedData_PerfOS_Processor()\n except AttributeError:\n self.logger.info('Missing Win32_PerfFormattedData_PerfOS_Processor WMI class.' \\\n ' No CPU metrics will be returned.')\n return\n\n cpu_interrupt = self._average_metric(cpu, 'PercentInterruptTime')\n if cpu_interrupt is not None:\n self.save_sample('system.cpu.interrupt', cpu_interrupt)\n\n cpu_percent = psutil.cpu_times()\n\n self.save_sample('system.cpu.user', 100 * cpu_percent.user / psutil.NUM_CPUS)\n self.save_sample('system.cpu.idle', 100 * cpu_percent.idle / psutil.NUM_CPUS)\n self.save_sample('system.cpu.system', 100 * cpu_percent.system/ psutil.NUM_CPUS)\n\n return self.get_metrics()\n\n def _average_metric(self, wmi_class, wmi_prop):\n ''' Sum all of the values of a metric from a WMI class object, excluding\n the value for \"_Total\"\n '''\n val = 0\n counter = 0\n for wmi_object in wmi_class:\n if wmi_object.Name == '_Total':\n # Skip the _Total value\n continue\n\n if getattr(wmi_object, wmi_prop) is not None:\n counter += 1\n val += float(getattr(wmi_object, wmi_prop))\n\n if counter > 0:\n return val / counter\n\n return val\n\n\nclass Network(Check):\n def __init__(self, logger):\n Check.__init__(self, logger)\n self.logger = logger\n self.gauge('system.net.bytes_rcvd')\n self.gauge('system.net.bytes_sent')\n\n def check(self, agentConfig):\n try:\n net = w.Win32_PerfFormattedData_Tcpip_NetworkInterface()\n except AttributeError:\n self.logger.info('Missing Win32_PerfFormattedData_Tcpip_NetworkInterface WMI class.' \\\n ' No network metrics will be returned')\n return\n\n for iface in net:\n name = self.normalize_device_name(iface.name)\n if iface.BytesReceivedPerSec is not None:\n self.save_sample('system.net.bytes_rcvd', iface.BytesReceivedPerSec,\n device_name=name)\n if iface.BytesSentPerSec is not None:\n self.save_sample('system.net.bytes_sent', iface.BytesSentPerSec,\n device_name=name)\n return self.get_metrics()\n\nclass Disk(Check):\n def __init__(self, logger):\n Check.__init__(self, logger)\n self.logger = logger\n self.gauge('system.disk.free')\n self.gauge('system.disk.total')\n self.gauge('system.disk.in_use')\n self.gauge('system.disk.used')\n\n def check(self, agentConfig):\n try:\n disk = w.Win32_LogicalDisk()\n except AttributeError:\n self.logger.info('Missing Win32_LogicalDisk WMI class.' \\\n ' No disk metrics will be returned.')\n return\n\n blacklist_re = agentConfig.get('device_blacklist_re', None)\n for device in disk:\n name = self.normalize_device_name(device.name)\n if device.DriveType in (DriveType.CD, DriveType.UNKNOWN) or should_ignore_disk(name, blacklist_re):\n continue\n if device.FreeSpace is not None and device.Size is not None:\n free = float(device.FreeSpace) / B2KB\n total = float(device.Size) / B2KB\n used = total - free\n self.save_sample('system.disk.free', free, device_name=name)\n self.save_sample('system.disk.total', total, device_name=name)\n self.save_sample('system.disk.used', used, device_name=name)\n self.save_sample('system.disk.in_use', (used / total),\n device_name=name)\n return self.get_metrics()\n\nclass IO(Check):\n def __init__(self, logger):\n Check.__init__(self, logger)\n self.logger = logger\n self.gauge('system.io.wkb_s')\n self.gauge('system.io.w_s')\n self.gauge('system.io.rkb_s')\n self.gauge('system.io.r_s')\n self.gauge('system.io.avg_q_sz')\n\n def check(self, agentConfig):\n try:\n disk = w.Win32_PerfFormattedData_PerfDisk_LogicalDisk()\n except AttributeError:\n self.logger.info('Missing Win32_PerfFormattedData_PerfDisk_LogicalDiskUnable WMI class.' \\\n ' No I/O metrics will be returned.')\n return\n blacklist_re = agentConfig.get('device_blacklist_re', None)\n for device in disk:\n name = self.normalize_device_name(device.name)\n if should_ignore_disk(name, blacklist_re):\n continue\n if device.DiskWriteBytesPerSec is not None:\n self.save_sample('system.io.wkb_s', int(device.DiskWriteBytesPerSec) / B2KB,\n device_name=name)\n if device.DiskWritesPerSec is not None:\n self.save_sample('system.io.w_s', int(device.DiskWritesPerSec),\n device_name=name)\n if device.DiskReadBytesPerSec is not None:\n self.save_sample('system.io.rkb_s', int(device.DiskReadBytesPerSec) / B2KB,\n device_name=name)\n if device.DiskReadsPerSec is not None:\n self.save_sample('system.io.r_s', int(device.DiskReadsPerSec),\n device_name=name)\n if device.CurrentDiskQueueLength is not None:\n self.save_sample('system.io.avg_q_sz', device.CurrentDiskQueueLength,\n device_name=name)\n return self.get_metrics()\n", "path": "checks/system/win32.py"}]} | 3,045 | 680 |
gh_patches_debug_36985 | rasdani/github-patches | git_diff | scoutapp__scout_apm_python-332 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve tastypie support
Currently some [Django Tastypie](https://django-tastypie.readthedocs.io/en/latest/) views are named simply `tastypie.resources` in their operation name. We can probably find better names, and maybe support some Tastypie specific tags!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/scout_apm/compat.py`
Content:
```
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import datetime as dt
5 import inspect
6 import sys
7 from functools import wraps
8
9 string_type = str if sys.version_info[0] >= 3 else basestring # noqa: F821
10 text_type = str if sys.version_info[0] >= 3 else unicode # noqa: F821
11
12 # Python 2 (and very early 3.x) didn't have ContextDecorator, so define it for ourselves
13 if sys.version_info < (3, 2):
14 import functools
15
16 class ContextDecorator(object):
17 def __call__(self, f):
18 @functools.wraps(f)
19 def decorated(*args, **kwds):
20 with self:
21 return f(*args, **kwds)
22
23 return decorated
24
25
26 else:
27 from contextlib import ContextDecorator
28
29 try:
30 # Python 3.x
31 import queue
32 except ImportError:
33 # Python 2.x
34 import Queue as queue
35
36 # datetime_to_timestamp converts a naive UTC datetime to a unix timestamp
37 if sys.version_info >= (3, 3):
38
39 def datetime_to_timestamp(datetime_obj):
40 return datetime_obj.replace(tzinfo=dt.timezone.utc).timestamp()
41
42
43 else:
44 _EPOCH = dt.datetime(1970, 1, 1)
45
46 def datetime_to_timestamp(datetime_obj):
47 return (datetime_obj - _EPOCH).total_seconds()
48
49
50 def text(value, encoding="utf-8", errors="strict"):
51 """
52 Convert a value to str on Python 3 and unicode on Python 2.
53 """
54 if isinstance(value, text_type):
55 return value
56 elif isinstance(value, bytes):
57 return text_type(value, encoding, errors)
58 else:
59 return text_type(value)
60
61
62 try:
63 from urllib.parse import urlencode
64 except ImportError:
65 from urllib import urlencode
66
67
68 def kwargs_only(func):
69 """
70 Make a function only accept keyword arguments.
71 This can be dropped in Python 3 in lieu of:
72 def foo(*, bar=default):
73 Source: https://pypi.org/project/kwargs-only/
74 """
75 if hasattr(inspect, "signature"): # pragma: no cover
76 # Python 3
77 signature = inspect.signature(func)
78 first_arg_name = list(signature.parameters.keys())[0]
79 else: # pragma: no cover
80 # Python 2
81 signature = inspect.getargspec(func)
82 first_arg_name = signature.args[0]
83
84 if first_arg_name in ("self", "cls"):
85 allowable_args = 1
86 else:
87 allowable_args = 0
88
89 @wraps(func)
90 def wrapper(*args, **kwargs):
91 if len(args) > allowable_args:
92 raise TypeError(
93 "{} should only be called with keyword args".format(func.__name__)
94 )
95 return func(*args, **kwargs)
96
97 return wrapper
98
99
100 __all__ = [
101 "ContextDecorator",
102 "datetime_to_timestamp",
103 "kwargs_only",
104 "queue",
105 "string_type",
106 "text",
107 "text_type",
108 "urlencode",
109 ]
110
```
Path: `src/scout_apm/django/middleware.py`
Content:
```
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import django
5 from django.conf import settings
6
7 from scout_apm.core.config import scout_config
8 from scout_apm.core.tracked_request import TrackedRequest
9 from scout_apm.core.web_requests import (
10 create_filtered_path,
11 ignore_path,
12 track_amazon_request_queue_time,
13 track_request_queue_time,
14 )
15
16 if django.VERSION >= (2, 0):
17 from django.urls import get_urlconf
18 else:
19 from django.core.urlresolvers import get_urlconf
20
21
22 def get_operation_name(request):
23 view_func = request.resolver_match.func
24 view_name = request.resolver_match._func_path
25
26 if hasattr(view_func, "model_admin"):
27 # Seems to comes from Django admin (attribute only set on Django 1.9+)
28 admin_class = view_func.model_admin.__class__
29 view_name = (
30 admin_class.__module__
31 + "."
32 + admin_class.__name__
33 + "."
34 + view_func.__name__
35 )
36
37 return "Controller/" + view_name
38
39
40 def track_request_view_data(request, tracked_request):
41 path = request.path
42 tracked_request.tag(
43 "path",
44 create_filtered_path(
45 path, [(k, v) for k, vs in request.GET.lists() for v in vs]
46 ),
47 )
48 if ignore_path(path):
49 tracked_request.tag("ignore_transaction", True)
50
51 try:
52 # Determine a remote IP to associate with the request. The value is
53 # spoofable by the requester so this is not suitable to use in any
54 # security sensitive context.
55 user_ip = (
56 request.META.get("HTTP_X_FORWARDED_FOR", "").split(",")[0]
57 or request.META.get("HTTP_CLIENT_IP", "").split(",")[0]
58 or request.META.get("REMOTE_ADDR", None)
59 )
60 tracked_request.tag("user_ip", user_ip)
61 except Exception:
62 pass
63
64 user = getattr(request, "user", None)
65 if user is not None:
66 try:
67 tracked_request.tag("username", user.get_username())
68 except Exception:
69 pass
70
71 tracked_request.tag("urlconf", get_urlconf(settings.ROOT_URLCONF))
72
73
74 class MiddlewareTimingMiddleware(object):
75 """
76 Insert as early into the Middleware stack as possible (outermost layers),
77 so that other middlewares called after can be timed.
78 """
79
80 def __init__(self, get_response):
81 self.get_response = get_response
82
83 def __call__(self, request):
84 if not scout_config.value("monitor"):
85 return self.get_response(request)
86
87 tracked_request = TrackedRequest.instance()
88
89 tracked_request.start_span(
90 operation="Middleware", should_capture_backtrace=False
91 )
92 queue_time = request.META.get("HTTP_X_QUEUE_START") or request.META.get(
93 "HTTP_X_REQUEST_START", ""
94 )
95 queue_time_tracked = track_request_queue_time(queue_time, tracked_request)
96 if not queue_time_tracked:
97 track_amazon_request_queue_time(
98 request.META.get("HTTP_X_AMZN_TRACE_ID", ""), tracked_request
99 )
100
101 try:
102 return self.get_response(request)
103 finally:
104 tracked_request.stop_span()
105
106
107 class ViewTimingMiddleware(object):
108 """
109 Insert as deep into the middleware stack as possible, ideally wrapping no
110 other middleware. Designed to time the View itself
111 """
112
113 def __init__(self, get_response):
114 self.get_response = get_response
115
116 def __call__(self, request):
117 """
118 Wrap a single incoming request with start and stop calls.
119 This will start timing, but relies on the process_view callback to
120 capture more details about what view was really called, and other
121 similar info.
122
123 If process_view isn't called, then the request will not
124 be recorded. This can happen if a middleware further along the stack
125 doesn't call onward, and instead returns a response directly.
126 """
127 if not scout_config.value("monitor"):
128 return self.get_response(request)
129
130 tracked_request = TrackedRequest.instance()
131
132 # This operation name won't be recorded unless changed later in
133 # process_view
134 tracked_request.start_span(operation="Unknown", should_capture_backtrace=False)
135 try:
136 return self.get_response(request)
137 finally:
138 tracked_request.stop_span()
139
140 def process_view(self, request, view_func, view_args, view_kwargs):
141 """
142 Capture details about the view_func that is about to execute
143 """
144 if not scout_config.value("monitor"):
145 return
146 tracked_request = TrackedRequest.instance()
147 tracked_request.mark_real_request()
148
149 track_request_view_data(request, tracked_request)
150
151 span = tracked_request.current_span()
152 if span is not None:
153 span.operation = get_operation_name(request)
154
155 def process_exception(self, request, exception):
156 """
157 Mark this request as having errored out
158
159 Does not modify or catch or otherwise change the exception thrown
160 """
161 if not scout_config.value("monitor"):
162 return
163 TrackedRequest.instance().tag("error", "true")
164
165
166 class OldStyleMiddlewareTimingMiddleware(object):
167 """
168 Insert as early into the Middleware stack as possible (outermost layers),
169 so that other middlewares called after can be timed.
170 """
171
172 def process_request(self, request):
173 if not scout_config.value("monitor"):
174 return
175 tracked_request = TrackedRequest.instance()
176 request._scout_tracked_request = tracked_request
177
178 queue_time = request.META.get("HTTP_X_QUEUE_START") or request.META.get(
179 "HTTP_X_REQUEST_START", ""
180 )
181 queue_time_tracked = track_request_queue_time(queue_time, tracked_request)
182 if not queue_time_tracked:
183 track_amazon_request_queue_time(
184 request.META.get("HTTP_X_AMZN_TRACE_ID", ""), tracked_request
185 )
186
187 tracked_request.start_span(
188 operation="Middleware", should_capture_backtrace=False
189 )
190
191 def process_response(self, request, response):
192 # Only stop span if there's a request, but presume we are balanced,
193 # i.e. that custom instrumentation within the application is not
194 # causing errors
195 tracked_request = getattr(request, "_scout_tracked_request", None)
196 if tracked_request is not None:
197 tracked_request.stop_span()
198 return response
199
200
201 class OldStyleViewMiddleware(object):
202 def process_view(self, request, view_func, view_func_args, view_func_kwargs):
203 tracked_request = getattr(request, "_scout_tracked_request", None)
204 if tracked_request is None:
205 # Looks like OldStyleMiddlewareTimingMiddleware didn't run, so
206 # don't do anything
207 return
208
209 tracked_request.mark_real_request()
210
211 track_request_view_data(request, tracked_request)
212
213 span = tracked_request.start_span(
214 operation=get_operation_name(request), should_capture_backtrace=False
215 )
216 # Save the span into the request, so we can check
217 # if we're matched up when stopping
218 request._scout_view_span = span
219
220 def process_response(self, request, response):
221 tracked_request = getattr(request, "_scout_tracked_request", None)
222 if tracked_request is None:
223 # Looks like OldStyleMiddlewareTimingMiddleware didn't run, so
224 # don't do anything
225 return response
226
227 # Only stop span if we started, but presume we are balanced, i.e. that
228 # custom instrumentation within the application is not causing errors
229 span = getattr(request, "_scout_view_span", None)
230 if span is not None:
231 tracked_request.stop_span()
232 return response
233
234 def process_exception(self, request, exception):
235 tracked_request = getattr(request, "_scout_tracked_request", None)
236 if tracked_request is None:
237 # Looks like OldStyleMiddlewareTimingMiddleware didn't run, so
238 # don't do anything
239 return
240
241 tracked_request.tag("error", "true")
242
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/scout_apm/compat.py b/src/scout_apm/compat.py
--- a/src/scout_apm/compat.py
+++ b/src/scout_apm/compat.py
@@ -8,6 +8,7 @@
string_type = str if sys.version_info[0] >= 3 else basestring # noqa: F821
text_type = str if sys.version_info[0] >= 3 else unicode # noqa: F821
+string_types = tuple({string_type, text_type})
# Python 2 (and very early 3.x) didn't have ContextDecorator, so define it for ourselves
if sys.version_info < (3, 2):
diff --git a/src/scout_apm/django/middleware.py b/src/scout_apm/django/middleware.py
--- a/src/scout_apm/django/middleware.py
+++ b/src/scout_apm/django/middleware.py
@@ -1,9 +1,12 @@
# coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
+import sys
+
import django
from django.conf import settings
+from scout_apm.compat import string_types
from scout_apm.core.config import scout_config
from scout_apm.core.tracked_request import TrackedRequest
from scout_apm.core.web_requests import (
@@ -34,9 +37,52 @@
+ view_func.__name__
)
+ # Seems to be a Tastypie Resource. Need to resort to some stack inspection
+ # to find a better name since its decorators don't wrap very well
+ if view_name == "tastypie.resources.wrapper":
+ tastypie_name = _get_tastypie_operation_name(request, view_func)
+ if tastypie_name is not None:
+ return tastypie_name
+
return "Controller/" + view_name
+def _get_tastypie_operation_name(request, view_func):
+ try:
+ from tastypie.resources import Resource
+ except ImportError:
+ return None
+
+ if sys.version_info[0] == 2: # pragma: no cover
+ try:
+ wrapper = view_func.__closure__[0].cell_contents
+ except (AttributeError, IndexError):
+ return None
+ elif sys.version_info[0] == 3:
+ try:
+ wrapper = view_func.__wrapped__
+ except AttributeError:
+ return None
+
+ if not hasattr(wrapper, "__closure__") or len(wrapper.__closure__) != 2:
+ return None
+
+ instance = wrapper.__closure__[0].cell_contents
+ if not isinstance(instance, Resource): # pragma: no cover
+ return None
+
+ method_name = wrapper.__closure__[1].cell_contents
+ if not isinstance(method_name, string_types): # pragma: no cover
+ return None
+
+ if method_name.startswith("dispatch_"): # pragma: no cover
+ method_name = request.method.lower() + method_name.split("dispatch", 1)[1]
+
+ return "Controller/{}.{}.{}".format(
+ instance.__module__, instance.__class__.__name__, method_name
+ )
+
+
def track_request_view_data(request, tracked_request):
path = request.path
tracked_request.tag(
| {"golden_diff": "diff --git a/src/scout_apm/compat.py b/src/scout_apm/compat.py\n--- a/src/scout_apm/compat.py\n+++ b/src/scout_apm/compat.py\n@@ -8,6 +8,7 @@\n \n string_type = str if sys.version_info[0] >= 3 else basestring # noqa: F821\n text_type = str if sys.version_info[0] >= 3 else unicode # noqa: F821\n+string_types = tuple({string_type, text_type})\n \n # Python 2 (and very early 3.x) didn't have ContextDecorator, so define it for ourselves\n if sys.version_info < (3, 2):\ndiff --git a/src/scout_apm/django/middleware.py b/src/scout_apm/django/middleware.py\n--- a/src/scout_apm/django/middleware.py\n+++ b/src/scout_apm/django/middleware.py\n@@ -1,9 +1,12 @@\n # coding=utf-8\n from __future__ import absolute_import, division, print_function, unicode_literals\n \n+import sys\n+\n import django\n from django.conf import settings\n \n+from scout_apm.compat import string_types\n from scout_apm.core.config import scout_config\n from scout_apm.core.tracked_request import TrackedRequest\n from scout_apm.core.web_requests import (\n@@ -34,9 +37,52 @@\n + view_func.__name__\n )\n \n+ # Seems to be a Tastypie Resource. Need to resort to some stack inspection\n+ # to find a better name since its decorators don't wrap very well\n+ if view_name == \"tastypie.resources.wrapper\":\n+ tastypie_name = _get_tastypie_operation_name(request, view_func)\n+ if tastypie_name is not None:\n+ return tastypie_name\n+\n return \"Controller/\" + view_name\n \n \n+def _get_tastypie_operation_name(request, view_func):\n+ try:\n+ from tastypie.resources import Resource\n+ except ImportError:\n+ return None\n+\n+ if sys.version_info[0] == 2: # pragma: no cover\n+ try:\n+ wrapper = view_func.__closure__[0].cell_contents\n+ except (AttributeError, IndexError):\n+ return None\n+ elif sys.version_info[0] == 3:\n+ try:\n+ wrapper = view_func.__wrapped__\n+ except AttributeError:\n+ return None\n+\n+ if not hasattr(wrapper, \"__closure__\") or len(wrapper.__closure__) != 2:\n+ return None\n+\n+ instance = wrapper.__closure__[0].cell_contents\n+ if not isinstance(instance, Resource): # pragma: no cover\n+ return None\n+\n+ method_name = wrapper.__closure__[1].cell_contents\n+ if not isinstance(method_name, string_types): # pragma: no cover\n+ return None\n+\n+ if method_name.startswith(\"dispatch_\"): # pragma: no cover\n+ method_name = request.method.lower() + method_name.split(\"dispatch\", 1)[1]\n+\n+ return \"Controller/{}.{}.{}\".format(\n+ instance.__module__, instance.__class__.__name__, method_name\n+ )\n+\n+\n def track_request_view_data(request, tracked_request):\n path = request.path\n tracked_request.tag(\n", "issue": "Improve tastypie support\nCurrently some [Django Tastypie](https://django-tastypie.readthedocs.io/en/latest/) views are named simply `tastypie.resources` in their operation name. We can probably find better names, and maybe support some Tastypie specific tags!\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport datetime as dt\nimport inspect\nimport sys\nfrom functools import wraps\n\nstring_type = str if sys.version_info[0] >= 3 else basestring # noqa: F821\ntext_type = str if sys.version_info[0] >= 3 else unicode # noqa: F821\n\n# Python 2 (and very early 3.x) didn't have ContextDecorator, so define it for ourselves\nif sys.version_info < (3, 2):\n import functools\n\n class ContextDecorator(object):\n def __call__(self, f):\n @functools.wraps(f)\n def decorated(*args, **kwds):\n with self:\n return f(*args, **kwds)\n\n return decorated\n\n\nelse:\n from contextlib import ContextDecorator\n\ntry:\n # Python 3.x\n import queue\nexcept ImportError:\n # Python 2.x\n import Queue as queue\n\n# datetime_to_timestamp converts a naive UTC datetime to a unix timestamp\nif sys.version_info >= (3, 3):\n\n def datetime_to_timestamp(datetime_obj):\n return datetime_obj.replace(tzinfo=dt.timezone.utc).timestamp()\n\n\nelse:\n _EPOCH = dt.datetime(1970, 1, 1)\n\n def datetime_to_timestamp(datetime_obj):\n return (datetime_obj - _EPOCH).total_seconds()\n\n\ndef text(value, encoding=\"utf-8\", errors=\"strict\"):\n \"\"\"\n Convert a value to str on Python 3 and unicode on Python 2.\n \"\"\"\n if isinstance(value, text_type):\n return value\n elif isinstance(value, bytes):\n return text_type(value, encoding, errors)\n else:\n return text_type(value)\n\n\ntry:\n from urllib.parse import urlencode\nexcept ImportError:\n from urllib import urlencode\n\n\ndef kwargs_only(func):\n \"\"\"\n Make a function only accept keyword arguments.\n This can be dropped in Python 3 in lieu of:\n def foo(*, bar=default):\n Source: https://pypi.org/project/kwargs-only/\n \"\"\"\n if hasattr(inspect, \"signature\"): # pragma: no cover\n # Python 3\n signature = inspect.signature(func)\n first_arg_name = list(signature.parameters.keys())[0]\n else: # pragma: no cover\n # Python 2\n signature = inspect.getargspec(func)\n first_arg_name = signature.args[0]\n\n if first_arg_name in (\"self\", \"cls\"):\n allowable_args = 1\n else:\n allowable_args = 0\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n if len(args) > allowable_args:\n raise TypeError(\n \"{} should only be called with keyword args\".format(func.__name__)\n )\n return func(*args, **kwargs)\n\n return wrapper\n\n\n__all__ = [\n \"ContextDecorator\",\n \"datetime_to_timestamp\",\n \"kwargs_only\",\n \"queue\",\n \"string_type\",\n \"text\",\n \"text_type\",\n \"urlencode\",\n]\n", "path": "src/scout_apm/compat.py"}, {"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport django\nfrom django.conf import settings\n\nfrom scout_apm.core.config import scout_config\nfrom scout_apm.core.tracked_request import TrackedRequest\nfrom scout_apm.core.web_requests import (\n create_filtered_path,\n ignore_path,\n track_amazon_request_queue_time,\n track_request_queue_time,\n)\n\nif django.VERSION >= (2, 0):\n from django.urls import get_urlconf\nelse:\n from django.core.urlresolvers import get_urlconf\n\n\ndef get_operation_name(request):\n view_func = request.resolver_match.func\n view_name = request.resolver_match._func_path\n\n if hasattr(view_func, \"model_admin\"):\n # Seems to comes from Django admin (attribute only set on Django 1.9+)\n admin_class = view_func.model_admin.__class__\n view_name = (\n admin_class.__module__\n + \".\"\n + admin_class.__name__\n + \".\"\n + view_func.__name__\n )\n\n return \"Controller/\" + view_name\n\n\ndef track_request_view_data(request, tracked_request):\n path = request.path\n tracked_request.tag(\n \"path\",\n create_filtered_path(\n path, [(k, v) for k, vs in request.GET.lists() for v in vs]\n ),\n )\n if ignore_path(path):\n tracked_request.tag(\"ignore_transaction\", True)\n\n try:\n # Determine a remote IP to associate with the request. The value is\n # spoofable by the requester so this is not suitable to use in any\n # security sensitive context.\n user_ip = (\n request.META.get(\"HTTP_X_FORWARDED_FOR\", \"\").split(\",\")[0]\n or request.META.get(\"HTTP_CLIENT_IP\", \"\").split(\",\")[0]\n or request.META.get(\"REMOTE_ADDR\", None)\n )\n tracked_request.tag(\"user_ip\", user_ip)\n except Exception:\n pass\n\n user = getattr(request, \"user\", None)\n if user is not None:\n try:\n tracked_request.tag(\"username\", user.get_username())\n except Exception:\n pass\n\n tracked_request.tag(\"urlconf\", get_urlconf(settings.ROOT_URLCONF))\n\n\nclass MiddlewareTimingMiddleware(object):\n \"\"\"\n Insert as early into the Middleware stack as possible (outermost layers),\n so that other middlewares called after can be timed.\n \"\"\"\n\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n if not scout_config.value(\"monitor\"):\n return self.get_response(request)\n\n tracked_request = TrackedRequest.instance()\n\n tracked_request.start_span(\n operation=\"Middleware\", should_capture_backtrace=False\n )\n queue_time = request.META.get(\"HTTP_X_QUEUE_START\") or request.META.get(\n \"HTTP_X_REQUEST_START\", \"\"\n )\n queue_time_tracked = track_request_queue_time(queue_time, tracked_request)\n if not queue_time_tracked:\n track_amazon_request_queue_time(\n request.META.get(\"HTTP_X_AMZN_TRACE_ID\", \"\"), tracked_request\n )\n\n try:\n return self.get_response(request)\n finally:\n tracked_request.stop_span()\n\n\nclass ViewTimingMiddleware(object):\n \"\"\"\n Insert as deep into the middleware stack as possible, ideally wrapping no\n other middleware. Designed to time the View itself\n \"\"\"\n\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n \"\"\"\n Wrap a single incoming request with start and stop calls.\n This will start timing, but relies on the process_view callback to\n capture more details about what view was really called, and other\n similar info.\n\n If process_view isn't called, then the request will not\n be recorded. This can happen if a middleware further along the stack\n doesn't call onward, and instead returns a response directly.\n \"\"\"\n if not scout_config.value(\"monitor\"):\n return self.get_response(request)\n\n tracked_request = TrackedRequest.instance()\n\n # This operation name won't be recorded unless changed later in\n # process_view\n tracked_request.start_span(operation=\"Unknown\", should_capture_backtrace=False)\n try:\n return self.get_response(request)\n finally:\n tracked_request.stop_span()\n\n def process_view(self, request, view_func, view_args, view_kwargs):\n \"\"\"\n Capture details about the view_func that is about to execute\n \"\"\"\n if not scout_config.value(\"monitor\"):\n return\n tracked_request = TrackedRequest.instance()\n tracked_request.mark_real_request()\n\n track_request_view_data(request, tracked_request)\n\n span = tracked_request.current_span()\n if span is not None:\n span.operation = get_operation_name(request)\n\n def process_exception(self, request, exception):\n \"\"\"\n Mark this request as having errored out\n\n Does not modify or catch or otherwise change the exception thrown\n \"\"\"\n if not scout_config.value(\"monitor\"):\n return\n TrackedRequest.instance().tag(\"error\", \"true\")\n\n\nclass OldStyleMiddlewareTimingMiddleware(object):\n \"\"\"\n Insert as early into the Middleware stack as possible (outermost layers),\n so that other middlewares called after can be timed.\n \"\"\"\n\n def process_request(self, request):\n if not scout_config.value(\"monitor\"):\n return\n tracked_request = TrackedRequest.instance()\n request._scout_tracked_request = tracked_request\n\n queue_time = request.META.get(\"HTTP_X_QUEUE_START\") or request.META.get(\n \"HTTP_X_REQUEST_START\", \"\"\n )\n queue_time_tracked = track_request_queue_time(queue_time, tracked_request)\n if not queue_time_tracked:\n track_amazon_request_queue_time(\n request.META.get(\"HTTP_X_AMZN_TRACE_ID\", \"\"), tracked_request\n )\n\n tracked_request.start_span(\n operation=\"Middleware\", should_capture_backtrace=False\n )\n\n def process_response(self, request, response):\n # Only stop span if there's a request, but presume we are balanced,\n # i.e. that custom instrumentation within the application is not\n # causing errors\n tracked_request = getattr(request, \"_scout_tracked_request\", None)\n if tracked_request is not None:\n tracked_request.stop_span()\n return response\n\n\nclass OldStyleViewMiddleware(object):\n def process_view(self, request, view_func, view_func_args, view_func_kwargs):\n tracked_request = getattr(request, \"_scout_tracked_request\", None)\n if tracked_request is None:\n # Looks like OldStyleMiddlewareTimingMiddleware didn't run, so\n # don't do anything\n return\n\n tracked_request.mark_real_request()\n\n track_request_view_data(request, tracked_request)\n\n span = tracked_request.start_span(\n operation=get_operation_name(request), should_capture_backtrace=False\n )\n # Save the span into the request, so we can check\n # if we're matched up when stopping\n request._scout_view_span = span\n\n def process_response(self, request, response):\n tracked_request = getattr(request, \"_scout_tracked_request\", None)\n if tracked_request is None:\n # Looks like OldStyleMiddlewareTimingMiddleware didn't run, so\n # don't do anything\n return response\n\n # Only stop span if we started, but presume we are balanced, i.e. that\n # custom instrumentation within the application is not causing errors\n span = getattr(request, \"_scout_view_span\", None)\n if span is not None:\n tracked_request.stop_span()\n return response\n\n def process_exception(self, request, exception):\n tracked_request = getattr(request, \"_scout_tracked_request\", None)\n if tracked_request is None:\n # Looks like OldStyleMiddlewareTimingMiddleware didn't run, so\n # don't do anything\n return\n\n tracked_request.tag(\"error\", \"true\")\n", "path": "src/scout_apm/django/middleware.py"}], "after_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport datetime as dt\nimport inspect\nimport sys\nfrom functools import wraps\n\nstring_type = str if sys.version_info[0] >= 3 else basestring # noqa: F821\ntext_type = str if sys.version_info[0] >= 3 else unicode # noqa: F821\nstring_types = tuple({string_type, text_type})\n\n# Python 2 (and very early 3.x) didn't have ContextDecorator, so define it for ourselves\nif sys.version_info < (3, 2):\n import functools\n\n class ContextDecorator(object):\n def __call__(self, f):\n @functools.wraps(f)\n def decorated(*args, **kwds):\n with self:\n return f(*args, **kwds)\n\n return decorated\n\n\nelse:\n from contextlib import ContextDecorator\n\ntry:\n # Python 3.x\n import queue\nexcept ImportError:\n # Python 2.x\n import Queue as queue\n\n# datetime_to_timestamp converts a naive UTC datetime to a unix timestamp\nif sys.version_info >= (3, 3):\n\n def datetime_to_timestamp(datetime_obj):\n return datetime_obj.replace(tzinfo=dt.timezone.utc).timestamp()\n\n\nelse:\n _EPOCH = dt.datetime(1970, 1, 1)\n\n def datetime_to_timestamp(datetime_obj):\n return (datetime_obj - _EPOCH).total_seconds()\n\n\ndef text(value, encoding=\"utf-8\", errors=\"strict\"):\n \"\"\"\n Convert a value to str on Python 3 and unicode on Python 2.\n \"\"\"\n if isinstance(value, text_type):\n return value\n elif isinstance(value, bytes):\n return text_type(value, encoding, errors)\n else:\n return text_type(value)\n\n\ntry:\n from urllib.parse import urlencode\nexcept ImportError:\n from urllib import urlencode\n\n\ndef kwargs_only(func):\n \"\"\"\n Make a function only accept keyword arguments.\n This can be dropped in Python 3 in lieu of:\n def foo(*, bar=default):\n Source: https://pypi.org/project/kwargs-only/\n \"\"\"\n if hasattr(inspect, \"signature\"): # pragma: no cover\n # Python 3\n signature = inspect.signature(func)\n first_arg_name = list(signature.parameters.keys())[0]\n else: # pragma: no cover\n # Python 2\n signature = inspect.getargspec(func)\n first_arg_name = signature.args[0]\n\n if first_arg_name in (\"self\", \"cls\"):\n allowable_args = 1\n else:\n allowable_args = 0\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n if len(args) > allowable_args:\n raise TypeError(\n \"{} should only be called with keyword args\".format(func.__name__)\n )\n return func(*args, **kwargs)\n\n return wrapper\n\n\n__all__ = [\n \"ContextDecorator\",\n \"datetime_to_timestamp\",\n \"kwargs_only\",\n \"queue\",\n \"string_type\",\n \"text\",\n \"text_type\",\n \"urlencode\",\n]\n", "path": "src/scout_apm/compat.py"}, {"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport sys\n\nimport django\nfrom django.conf import settings\n\nfrom scout_apm.compat import string_types\nfrom scout_apm.core.config import scout_config\nfrom scout_apm.core.tracked_request import TrackedRequest\nfrom scout_apm.core.web_requests import (\n create_filtered_path,\n ignore_path,\n track_amazon_request_queue_time,\n track_request_queue_time,\n)\n\nif django.VERSION >= (2, 0):\n from django.urls import get_urlconf\nelse:\n from django.core.urlresolvers import get_urlconf\n\n\ndef get_operation_name(request):\n view_func = request.resolver_match.func\n view_name = request.resolver_match._func_path\n\n if hasattr(view_func, \"model_admin\"):\n # Seems to comes from Django admin (attribute only set on Django 1.9+)\n admin_class = view_func.model_admin.__class__\n view_name = (\n admin_class.__module__\n + \".\"\n + admin_class.__name__\n + \".\"\n + view_func.__name__\n )\n\n # Seems to be a Tastypie Resource. Need to resort to some stack inspection\n # to find a better name since its decorators don't wrap very well\n if view_name == \"tastypie.resources.wrapper\":\n tastypie_name = _get_tastypie_operation_name(request, view_func)\n if tastypie_name is not None:\n return tastypie_name\n\n return \"Controller/\" + view_name\n\n\ndef _get_tastypie_operation_name(request, view_func):\n try:\n from tastypie.resources import Resource\n except ImportError:\n return None\n\n if sys.version_info[0] == 2: # pragma: no cover\n try:\n wrapper = view_func.__closure__[0].cell_contents\n except (AttributeError, IndexError):\n return None\n elif sys.version_info[0] == 3:\n try:\n wrapper = view_func.__wrapped__\n except AttributeError:\n return None\n\n if not hasattr(wrapper, \"__closure__\") or len(wrapper.__closure__) != 2:\n return None\n\n instance = wrapper.__closure__[0].cell_contents\n if not isinstance(instance, Resource): # pragma: no cover\n return None\n\n method_name = wrapper.__closure__[1].cell_contents\n if not isinstance(method_name, string_types): # pragma: no cover\n return None\n\n if method_name.startswith(\"dispatch_\"): # pragma: no cover\n method_name = request.method.lower() + method_name.split(\"dispatch\", 1)[1]\n\n return \"Controller/{}.{}.{}\".format(\n instance.__module__, instance.__class__.__name__, method_name\n )\n\n\ndef track_request_view_data(request, tracked_request):\n path = request.path\n tracked_request.tag(\n \"path\",\n create_filtered_path(\n path, [(k, v) for k, vs in request.GET.lists() for v in vs]\n ),\n )\n if ignore_path(path):\n tracked_request.tag(\"ignore_transaction\", True)\n\n try:\n # Determine a remote IP to associate with the request. The value is\n # spoofable by the requester so this is not suitable to use in any\n # security sensitive context.\n user_ip = (\n request.META.get(\"HTTP_X_FORWARDED_FOR\", \"\").split(\",\")[0]\n or request.META.get(\"HTTP_CLIENT_IP\", \"\").split(\",\")[0]\n or request.META.get(\"REMOTE_ADDR\", None)\n )\n tracked_request.tag(\"user_ip\", user_ip)\n except Exception:\n pass\n\n user = getattr(request, \"user\", None)\n if user is not None:\n try:\n tracked_request.tag(\"username\", user.get_username())\n except Exception:\n pass\n\n tracked_request.tag(\"urlconf\", get_urlconf(settings.ROOT_URLCONF))\n\n\nclass MiddlewareTimingMiddleware(object):\n \"\"\"\n Insert as early into the Middleware stack as possible (outermost layers),\n so that other middlewares called after can be timed.\n \"\"\"\n\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n if not scout_config.value(\"monitor\"):\n return self.get_response(request)\n\n tracked_request = TrackedRequest.instance()\n\n tracked_request.start_span(\n operation=\"Middleware\", should_capture_backtrace=False\n )\n queue_time = request.META.get(\"HTTP_X_QUEUE_START\") or request.META.get(\n \"HTTP_X_REQUEST_START\", \"\"\n )\n queue_time_tracked = track_request_queue_time(queue_time, tracked_request)\n if not queue_time_tracked:\n track_amazon_request_queue_time(\n request.META.get(\"HTTP_X_AMZN_TRACE_ID\", \"\"), tracked_request\n )\n\n try:\n return self.get_response(request)\n finally:\n tracked_request.stop_span()\n\n\nclass ViewTimingMiddleware(object):\n \"\"\"\n Insert as deep into the middleware stack as possible, ideally wrapping no\n other middleware. Designed to time the View itself\n \"\"\"\n\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n \"\"\"\n Wrap a single incoming request with start and stop calls.\n This will start timing, but relies on the process_view callback to\n capture more details about what view was really called, and other\n similar info.\n\n If process_view isn't called, then the request will not\n be recorded. This can happen if a middleware further along the stack\n doesn't call onward, and instead returns a response directly.\n \"\"\"\n if not scout_config.value(\"monitor\"):\n return self.get_response(request)\n\n tracked_request = TrackedRequest.instance()\n\n # This operation name won't be recorded unless changed later in\n # process_view\n tracked_request.start_span(operation=\"Unknown\", should_capture_backtrace=False)\n try:\n return self.get_response(request)\n finally:\n tracked_request.stop_span()\n\n def process_view(self, request, view_func, view_args, view_kwargs):\n \"\"\"\n Capture details about the view_func that is about to execute\n \"\"\"\n if not scout_config.value(\"monitor\"):\n return\n tracked_request = TrackedRequest.instance()\n tracked_request.mark_real_request()\n\n track_request_view_data(request, tracked_request)\n\n span = tracked_request.current_span()\n if span is not None:\n span.operation = get_operation_name(request)\n\n def process_exception(self, request, exception):\n \"\"\"\n Mark this request as having errored out\n\n Does not modify or catch or otherwise change the exception thrown\n \"\"\"\n if not scout_config.value(\"monitor\"):\n return\n TrackedRequest.instance().tag(\"error\", \"true\")\n\n\nclass OldStyleMiddlewareTimingMiddleware(object):\n \"\"\"\n Insert as early into the Middleware stack as possible (outermost layers),\n so that other middlewares called after can be timed.\n \"\"\"\n\n def process_request(self, request):\n if not scout_config.value(\"monitor\"):\n return\n tracked_request = TrackedRequest.instance()\n request._scout_tracked_request = tracked_request\n\n queue_time = request.META.get(\"HTTP_X_QUEUE_START\") or request.META.get(\n \"HTTP_X_REQUEST_START\", \"\"\n )\n queue_time_tracked = track_request_queue_time(queue_time, tracked_request)\n if not queue_time_tracked:\n track_amazon_request_queue_time(\n request.META.get(\"HTTP_X_AMZN_TRACE_ID\", \"\"), tracked_request\n )\n\n tracked_request.start_span(\n operation=\"Middleware\", should_capture_backtrace=False\n )\n\n def process_response(self, request, response):\n # Only stop span if there's a request, but presume we are balanced,\n # i.e. that custom instrumentation within the application is not\n # causing errors\n tracked_request = getattr(request, \"_scout_tracked_request\", None)\n if tracked_request is not None:\n tracked_request.stop_span()\n return response\n\n\nclass OldStyleViewMiddleware(object):\n def process_view(self, request, view_func, view_func_args, view_func_kwargs):\n tracked_request = getattr(request, \"_scout_tracked_request\", None)\n if tracked_request is None:\n # Looks like OldStyleMiddlewareTimingMiddleware didn't run, so\n # don't do anything\n return\n\n tracked_request.mark_real_request()\n\n track_request_view_data(request, tracked_request)\n\n span = tracked_request.start_span(\n operation=get_operation_name(request), should_capture_backtrace=False\n )\n # Save the span into the request, so we can check\n # if we're matched up when stopping\n request._scout_view_span = span\n\n def process_response(self, request, response):\n tracked_request = getattr(request, \"_scout_tracked_request\", None)\n if tracked_request is None:\n # Looks like OldStyleMiddlewareTimingMiddleware didn't run, so\n # don't do anything\n return response\n\n # Only stop span if we started, but presume we are balanced, i.e. that\n # custom instrumentation within the application is not causing errors\n span = getattr(request, \"_scout_view_span\", None)\n if span is not None:\n tracked_request.stop_span()\n return response\n\n def process_exception(self, request, exception):\n tracked_request = getattr(request, \"_scout_tracked_request\", None)\n if tracked_request is None:\n # Looks like OldStyleMiddlewareTimingMiddleware didn't run, so\n # don't do anything\n return\n\n tracked_request.tag(\"error\", \"true\")\n", "path": "src/scout_apm/django/middleware.py"}]} | 3,589 | 740 |
gh_patches_debug_30147 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-5856 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Data for French Guyana (up to today)
Hourly production data for French Guyana: https://opendata-guyane.edf.fr/explore/dataset/production-d-electricite-par-filiere-en-temps-reel/
This one is up-to-date to today!
Also mentioned in #1912, but that was closed due to inactivity after some of the data sources were implemented. Also, the URLs have changed.
Edit: looks like the FR_O parser supports a 'historical dataset', but not this real-time dataset.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsers/FR_O.py`
Content:
```
1 from datetime import datetime
2 from logging import getLogger
3 from typing import Dict, Optional, Tuple, Union
4
5 from requests import Response, Session
6
7 from electricitymap.contrib.lib.models.event_lists import (
8 PriceList,
9 ProductionBreakdownList,
10 )
11 from electricitymap.contrib.lib.models.events import (
12 EventSourceType,
13 ProductionMix,
14 StorageMix,
15 )
16 from electricitymap.contrib.lib.types import ZoneKey
17
18 from .lib.exceptions import ParserException
19
20 DOMAIN_MAPPING = {
21 "FR-COR": "https://opendata-corse.edf.fr",
22 "RE": "https://opendata-reunion.edf.fr",
23 "GF": "https://opendata-guyane.edf.fr",
24 "MQ": "https://opendata-martinique.edf.fr",
25 "GP": "https://opendata-guadeloupe.edf.fr",
26 }
27
28 LIVE_DATASETS = {
29 "FR-COR": "production-delectricite-par-filiere-en-temps-reel",
30 "GP": "mix-temps-reel-guadeloupe",
31 "RE": "prod-electricite-temps-reel",
32 }
33
34 HISTORICAL_DATASETS = {
35 "FR-COR": "production-delectricite-par-filiere",
36 "RE": "courbe-de-charge-de-la-production-delectricite-par-filiere",
37 "GF": "courbe-de-charge-de-la-production-delectricite-par-filiere",
38 "MQ": "courbe-de-charge-de-la-production-delectricite-par-filiere",
39 "GP": "courbe-de-charge-de-la-production-delectricite-par-filiere",
40 }
41
42 API_PARAMETER_GROUPS = {
43 "production": {
44 "biomass": [
45 "biomasse",
46 "biomasse_mw",
47 "biomasse_mwh",
48 "bioenergies",
49 "bioenergies_mw",
50 "bioenergies_mwh",
51 ],
52 "coal": [
53 "charbon",
54 ],
55 "gas": [
56 "thermique_mw",
57 "thermique_mwh",
58 "turbines_a_combustion",
59 ],
60 "geothermal": [
61 "geothermie",
62 "geothermie_mw",
63 ],
64 "hydro": [
65 "hydraulique",
66 "hydraulique_mw",
67 "hydraulique_mwh",
68 "micro_hydro",
69 "micro_hydraulique_mw",
70 ],
71 "oil": ["diesel", "moteur_diesel"],
72 "solar": [
73 "photovoltaique",
74 "photovoltaique0",
75 "photovoltaique_mw",
76 "photovoltaique_mwh",
77 "solaire_mw",
78 ],
79 "wind": [
80 "eolien",
81 "eolien_mw",
82 "eolien_mwh",
83 ],
84 "unknown": ["bagasse_charbon_mwh", "charbon_bagasse_mw"],
85 },
86 "storage": {"battery": ["solde_stockage", "stockage"]},
87 "price": {
88 "price": ["cout_moyen_de_production_eur_mwh"],
89 },
90 }
91
92 PRODUCTION_MAPPING = {
93 API_TYPE: type
94 for key in ["production"]
95 for type, groups in API_PARAMETER_GROUPS[key].items()
96 for API_TYPE in groups
97 }
98
99 STORAGE_MAPPING = {
100 API_TYPE: type
101 for key in ["storage"]
102 for type, groups in API_PARAMETER_GROUPS[key].items()
103 for API_TYPE in groups
104 }
105
106 PRICE_MAPPING = {
107 API_TYPE: type
108 for key in ["price"]
109 for type, groups in API_PARAMETER_GROUPS[key].items()
110 for API_TYPE in groups
111 }
112
113 IGNORED_VALUES = ["jour", "total", "statut", "date", "heure", "liaisons", "tac"]
114
115
116 def generate_url(zone_key, target_datetime):
117 return f"{DOMAIN_MAPPING[zone_key]}/api/v2/catalog/datasets/{HISTORICAL_DATASETS[zone_key] if target_datetime else LIVE_DATASETS[zone_key]}/exports/json"
118
119
120 def generate_source(zone_key: ZoneKey):
121 # Return the domain name of the source without the protocol
122 return DOMAIN_MAPPING[zone_key].split("//")[1]
123
124
125 def fetch_data(
126 zone_key: ZoneKey,
127 session: Optional[Session] = None,
128 target_datetime: Optional[datetime] = None,
129 ) -> Tuple[list, str]:
130 ses = session or Session()
131
132 DATE_STRING_MAPPING = {
133 "FR-COR": "date_heure" if target_datetime else "date",
134 "RE": "date_heure" if target_datetime else "date",
135 "GF": "date",
136 "MQ": "date_heure",
137 "GP": "date",
138 }
139
140 if target_datetime and zone_key not in HISTORICAL_DATASETS.keys():
141 raise ParserException(
142 "FR_O.py",
143 f"Historical data not implemented for {zone_key} in this parser.",
144 zone_key,
145 )
146 elif target_datetime is None and zone_key not in LIVE_DATASETS.keys():
147 raise ParserException(
148 "FR_O.py",
149 f"Live data not implemented for {zone_key} in this parser.",
150 zone_key,
151 )
152
153 URL_QUERIES: Dict[str, Union[str, None]] = {
154 # "refine": "statut:Validé" if target_datetime else None,
155 "timezone": "UTC",
156 "order_by": f"{DATE_STRING_MAPPING[zone_key]} desc",
157 "refine": f"{DATE_STRING_MAPPING[zone_key]}:{target_datetime.strftime('%Y')}"
158 if target_datetime
159 else None,
160 }
161
162 url = generate_url(zone_key, target_datetime)
163 response: Response = ses.get(url, params=URL_QUERIES)
164 data: Union[dict, list, None] = response.json()
165 if data == []:
166 raise ParserException(
167 "FR_O.py",
168 f"No data available for {zone_key} for {target_datetime.strftime('%Y')}"
169 if target_datetime
170 else f"No live data available for {zone_key}.",
171 zone_key,
172 )
173 elif isinstance(data, dict):
174 if data.get("errorcode") == "10002":
175 raise ParserException(
176 "FR_O.py",
177 f"Rate limit exceeded. Please try again later after: {data.get('reset_time')}",
178 )
179 elif data.get("error_code") == "ODSQLError":
180 raise ParserException(
181 "FR_O.py",
182 "Query malformed. Please check the parameters. If this was previously working there has likely been a change in the API.",
183 )
184 if not isinstance(data, list):
185 raise ParserException(
186 "FR_O.py",
187 f"Unexpected data format for {zone_key} for {target_datetime.strftime('%Y')}"
188 if target_datetime
189 else f"Unexpected data format for {zone_key}.",
190 zone_key,
191 )
192 return data, DATE_STRING_MAPPING[zone_key]
193
194
195 def fetch_production(
196 zone_key: ZoneKey,
197 session: Optional[Session] = None,
198 target_datetime: Optional[datetime] = None,
199 logger=getLogger(__name__),
200 ):
201 production_objects, date_string = fetch_data(zone_key, session, target_datetime)
202
203 production_breakdown_list = ProductionBreakdownList(logger=logger)
204 for production_object in production_objects:
205 production = ProductionMix()
206 storage = StorageMix()
207 for mode_key in production_object:
208 if mode_key in PRODUCTION_MAPPING:
209 production.add_value(
210 PRODUCTION_MAPPING[mode_key],
211 production_object[mode_key],
212 correct_negative_with_zero=True,
213 )
214 elif mode_key in STORAGE_MAPPING:
215 storage.add_value(
216 STORAGE_MAPPING[mode_key], -production_object[mode_key]
217 )
218 elif mode_key in IGNORED_VALUES:
219 pass
220 else:
221 logger.warning(
222 f"Unknown mode_key: '{mode_key}' encountered for {zone_key}."
223 )
224
225 production_breakdown_list.append(
226 zoneKey=zone_key,
227 datetime=datetime.fromisoformat(production_object[date_string]),
228 production=production,
229 storage=storage,
230 source=generate_source(zone_key),
231 sourceType=EventSourceType.estimated
232 if production_object["statut"] == "Estimé"
233 else EventSourceType.measured,
234 )
235 return production_breakdown_list.to_list()
236
237
238 def fetch_price(
239 zone_key: ZoneKey,
240 session: Optional[Session] = None,
241 target_datetime: Optional[datetime] = None,
242 logger=getLogger(__name__),
243 ):
244 data_objects, date_string = fetch_data(zone_key, session, target_datetime)
245
246 price_list = PriceList(logger=logger)
247 for data_object in data_objects:
248 price: Union[float, int, None] = None
249 for mode_key in data_object:
250 if mode_key in PRICE_MAPPING:
251 price = data_object[mode_key]
252 break
253 if price is not None:
254 price_list.append(
255 zoneKey=zone_key,
256 currency="EUR",
257 datetime=datetime.fromisoformat(data_object[date_string]),
258 source=generate_source(zone_key),
259 price=price,
260 )
261 return price_list.to_list()
262
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parsers/FR_O.py b/parsers/FR_O.py
--- a/parsers/FR_O.py
+++ b/parsers/FR_O.py
@@ -29,6 +29,8 @@
"FR-COR": "production-delectricite-par-filiere-en-temps-reel",
"GP": "mix-temps-reel-guadeloupe",
"RE": "prod-electricite-temps-reel",
+ "GF": "production-d-electricite-par-filiere-en-temps-reel",
+ "MQ": "production-delectricite-par-filiere-en-temps-reel",
}
HISTORICAL_DATASETS = {
@@ -68,7 +70,7 @@
"micro_hydro",
"micro_hydraulique_mw",
],
- "oil": ["diesel", "moteur_diesel"],
+ "oil": ["diesel", "moteur_diesel", "centrale_au_fioul", "moteurs_diesels"],
"solar": [
"photovoltaique",
"photovoltaique0",
@@ -133,7 +135,7 @@
"FR-COR": "date_heure" if target_datetime else "date",
"RE": "date_heure" if target_datetime else "date",
"GF": "date",
- "MQ": "date_heure",
+ "MQ": "date_heure" if target_datetime else "date",
"GP": "date",
}
@@ -229,7 +231,7 @@
storage=storage,
source=generate_source(zone_key),
sourceType=EventSourceType.estimated
- if production_object["statut"] == "Estimé"
+ if production_object.get("statut") == "Estimé"
else EventSourceType.measured,
)
return production_breakdown_list.to_list()
| {"golden_diff": "diff --git a/parsers/FR_O.py b/parsers/FR_O.py\n--- a/parsers/FR_O.py\n+++ b/parsers/FR_O.py\n@@ -29,6 +29,8 @@\n \"FR-COR\": \"production-delectricite-par-filiere-en-temps-reel\",\n \"GP\": \"mix-temps-reel-guadeloupe\",\n \"RE\": \"prod-electricite-temps-reel\",\n+ \"GF\": \"production-d-electricite-par-filiere-en-temps-reel\",\n+ \"MQ\": \"production-delectricite-par-filiere-en-temps-reel\",\n }\n \n HISTORICAL_DATASETS = {\n@@ -68,7 +70,7 @@\n \"micro_hydro\",\n \"micro_hydraulique_mw\",\n ],\n- \"oil\": [\"diesel\", \"moteur_diesel\"],\n+ \"oil\": [\"diesel\", \"moteur_diesel\", \"centrale_au_fioul\", \"moteurs_diesels\"],\n \"solar\": [\n \"photovoltaique\",\n \"photovoltaique0\",\n@@ -133,7 +135,7 @@\n \"FR-COR\": \"date_heure\" if target_datetime else \"date\",\n \"RE\": \"date_heure\" if target_datetime else \"date\",\n \"GF\": \"date\",\n- \"MQ\": \"date_heure\",\n+ \"MQ\": \"date_heure\" if target_datetime else \"date\",\n \"GP\": \"date\",\n }\n \n@@ -229,7 +231,7 @@\n storage=storage,\n source=generate_source(zone_key),\n sourceType=EventSourceType.estimated\n- if production_object[\"statut\"] == \"Estim\u00e9\"\n+ if production_object.get(\"statut\") == \"Estim\u00e9\"\n else EventSourceType.measured,\n )\n return production_breakdown_list.to_list()\n", "issue": "Data for French Guyana (up to today)\nHourly production data for French Guyana: https://opendata-guyane.edf.fr/explore/dataset/production-d-electricite-par-filiere-en-temps-reel/\r\n\r\nThis one is up-to-date to today!\r\n\r\nAlso mentioned in #1912, but that was closed due to inactivity after some of the data sources were implemented. Also, the URLs have changed.\r\n\r\nEdit: looks like the FR_O parser supports a 'historical dataset', but not this real-time dataset.\n", "before_files": [{"content": "from datetime import datetime\nfrom logging import getLogger\nfrom typing import Dict, Optional, Tuple, Union\n\nfrom requests import Response, Session\n\nfrom electricitymap.contrib.lib.models.event_lists import (\n PriceList,\n ProductionBreakdownList,\n)\nfrom electricitymap.contrib.lib.models.events import (\n EventSourceType,\n ProductionMix,\n StorageMix,\n)\nfrom electricitymap.contrib.lib.types import ZoneKey\n\nfrom .lib.exceptions import ParserException\n\nDOMAIN_MAPPING = {\n \"FR-COR\": \"https://opendata-corse.edf.fr\",\n \"RE\": \"https://opendata-reunion.edf.fr\",\n \"GF\": \"https://opendata-guyane.edf.fr\",\n \"MQ\": \"https://opendata-martinique.edf.fr\",\n \"GP\": \"https://opendata-guadeloupe.edf.fr\",\n}\n\nLIVE_DATASETS = {\n \"FR-COR\": \"production-delectricite-par-filiere-en-temps-reel\",\n \"GP\": \"mix-temps-reel-guadeloupe\",\n \"RE\": \"prod-electricite-temps-reel\",\n}\n\nHISTORICAL_DATASETS = {\n \"FR-COR\": \"production-delectricite-par-filiere\",\n \"RE\": \"courbe-de-charge-de-la-production-delectricite-par-filiere\",\n \"GF\": \"courbe-de-charge-de-la-production-delectricite-par-filiere\",\n \"MQ\": \"courbe-de-charge-de-la-production-delectricite-par-filiere\",\n \"GP\": \"courbe-de-charge-de-la-production-delectricite-par-filiere\",\n}\n\nAPI_PARAMETER_GROUPS = {\n \"production\": {\n \"biomass\": [\n \"biomasse\",\n \"biomasse_mw\",\n \"biomasse_mwh\",\n \"bioenergies\",\n \"bioenergies_mw\",\n \"bioenergies_mwh\",\n ],\n \"coal\": [\n \"charbon\",\n ],\n \"gas\": [\n \"thermique_mw\",\n \"thermique_mwh\",\n \"turbines_a_combustion\",\n ],\n \"geothermal\": [\n \"geothermie\",\n \"geothermie_mw\",\n ],\n \"hydro\": [\n \"hydraulique\",\n \"hydraulique_mw\",\n \"hydraulique_mwh\",\n \"micro_hydro\",\n \"micro_hydraulique_mw\",\n ],\n \"oil\": [\"diesel\", \"moteur_diesel\"],\n \"solar\": [\n \"photovoltaique\",\n \"photovoltaique0\",\n \"photovoltaique_mw\",\n \"photovoltaique_mwh\",\n \"solaire_mw\",\n ],\n \"wind\": [\n \"eolien\",\n \"eolien_mw\",\n \"eolien_mwh\",\n ],\n \"unknown\": [\"bagasse_charbon_mwh\", \"charbon_bagasse_mw\"],\n },\n \"storage\": {\"battery\": [\"solde_stockage\", \"stockage\"]},\n \"price\": {\n \"price\": [\"cout_moyen_de_production_eur_mwh\"],\n },\n}\n\nPRODUCTION_MAPPING = {\n API_TYPE: type\n for key in [\"production\"]\n for type, groups in API_PARAMETER_GROUPS[key].items()\n for API_TYPE in groups\n}\n\nSTORAGE_MAPPING = {\n API_TYPE: type\n for key in [\"storage\"]\n for type, groups in API_PARAMETER_GROUPS[key].items()\n for API_TYPE in groups\n}\n\nPRICE_MAPPING = {\n API_TYPE: type\n for key in [\"price\"]\n for type, groups in API_PARAMETER_GROUPS[key].items()\n for API_TYPE in groups\n}\n\nIGNORED_VALUES = [\"jour\", \"total\", \"statut\", \"date\", \"heure\", \"liaisons\", \"tac\"]\n\n\ndef generate_url(zone_key, target_datetime):\n return f\"{DOMAIN_MAPPING[zone_key]}/api/v2/catalog/datasets/{HISTORICAL_DATASETS[zone_key] if target_datetime else LIVE_DATASETS[zone_key]}/exports/json\"\n\n\ndef generate_source(zone_key: ZoneKey):\n # Return the domain name of the source without the protocol\n return DOMAIN_MAPPING[zone_key].split(\"//\")[1]\n\n\ndef fetch_data(\n zone_key: ZoneKey,\n session: Optional[Session] = None,\n target_datetime: Optional[datetime] = None,\n) -> Tuple[list, str]:\n ses = session or Session()\n\n DATE_STRING_MAPPING = {\n \"FR-COR\": \"date_heure\" if target_datetime else \"date\",\n \"RE\": \"date_heure\" if target_datetime else \"date\",\n \"GF\": \"date\",\n \"MQ\": \"date_heure\",\n \"GP\": \"date\",\n }\n\n if target_datetime and zone_key not in HISTORICAL_DATASETS.keys():\n raise ParserException(\n \"FR_O.py\",\n f\"Historical data not implemented for {zone_key} in this parser.\",\n zone_key,\n )\n elif target_datetime is None and zone_key not in LIVE_DATASETS.keys():\n raise ParserException(\n \"FR_O.py\",\n f\"Live data not implemented for {zone_key} in this parser.\",\n zone_key,\n )\n\n URL_QUERIES: Dict[str, Union[str, None]] = {\n # \"refine\": \"statut:Valid\u00e9\" if target_datetime else None,\n \"timezone\": \"UTC\",\n \"order_by\": f\"{DATE_STRING_MAPPING[zone_key]} desc\",\n \"refine\": f\"{DATE_STRING_MAPPING[zone_key]}:{target_datetime.strftime('%Y')}\"\n if target_datetime\n else None,\n }\n\n url = generate_url(zone_key, target_datetime)\n response: Response = ses.get(url, params=URL_QUERIES)\n data: Union[dict, list, None] = response.json()\n if data == []:\n raise ParserException(\n \"FR_O.py\",\n f\"No data available for {zone_key} for {target_datetime.strftime('%Y')}\"\n if target_datetime\n else f\"No live data available for {zone_key}.\",\n zone_key,\n )\n elif isinstance(data, dict):\n if data.get(\"errorcode\") == \"10002\":\n raise ParserException(\n \"FR_O.py\",\n f\"Rate limit exceeded. Please try again later after: {data.get('reset_time')}\",\n )\n elif data.get(\"error_code\") == \"ODSQLError\":\n raise ParserException(\n \"FR_O.py\",\n \"Query malformed. Please check the parameters. If this was previously working there has likely been a change in the API.\",\n )\n if not isinstance(data, list):\n raise ParserException(\n \"FR_O.py\",\n f\"Unexpected data format for {zone_key} for {target_datetime.strftime('%Y')}\"\n if target_datetime\n else f\"Unexpected data format for {zone_key}.\",\n zone_key,\n )\n return data, DATE_STRING_MAPPING[zone_key]\n\n\ndef fetch_production(\n zone_key: ZoneKey,\n session: Optional[Session] = None,\n target_datetime: Optional[datetime] = None,\n logger=getLogger(__name__),\n):\n production_objects, date_string = fetch_data(zone_key, session, target_datetime)\n\n production_breakdown_list = ProductionBreakdownList(logger=logger)\n for production_object in production_objects:\n production = ProductionMix()\n storage = StorageMix()\n for mode_key in production_object:\n if mode_key in PRODUCTION_MAPPING:\n production.add_value(\n PRODUCTION_MAPPING[mode_key],\n production_object[mode_key],\n correct_negative_with_zero=True,\n )\n elif mode_key in STORAGE_MAPPING:\n storage.add_value(\n STORAGE_MAPPING[mode_key], -production_object[mode_key]\n )\n elif mode_key in IGNORED_VALUES:\n pass\n else:\n logger.warning(\n f\"Unknown mode_key: '{mode_key}' encountered for {zone_key}.\"\n )\n\n production_breakdown_list.append(\n zoneKey=zone_key,\n datetime=datetime.fromisoformat(production_object[date_string]),\n production=production,\n storage=storage,\n source=generate_source(zone_key),\n sourceType=EventSourceType.estimated\n if production_object[\"statut\"] == \"Estim\u00e9\"\n else EventSourceType.measured,\n )\n return production_breakdown_list.to_list()\n\n\ndef fetch_price(\n zone_key: ZoneKey,\n session: Optional[Session] = None,\n target_datetime: Optional[datetime] = None,\n logger=getLogger(__name__),\n):\n data_objects, date_string = fetch_data(zone_key, session, target_datetime)\n\n price_list = PriceList(logger=logger)\n for data_object in data_objects:\n price: Union[float, int, None] = None\n for mode_key in data_object:\n if mode_key in PRICE_MAPPING:\n price = data_object[mode_key]\n break\n if price is not None:\n price_list.append(\n zoneKey=zone_key,\n currency=\"EUR\",\n datetime=datetime.fromisoformat(data_object[date_string]),\n source=generate_source(zone_key),\n price=price,\n )\n return price_list.to_list()\n", "path": "parsers/FR_O.py"}], "after_files": [{"content": "from datetime import datetime\nfrom logging import getLogger\nfrom typing import Dict, Optional, Tuple, Union\n\nfrom requests import Response, Session\n\nfrom electricitymap.contrib.lib.models.event_lists import (\n PriceList,\n ProductionBreakdownList,\n)\nfrom electricitymap.contrib.lib.models.events import (\n EventSourceType,\n ProductionMix,\n StorageMix,\n)\nfrom electricitymap.contrib.lib.types import ZoneKey\n\nfrom .lib.exceptions import ParserException\n\nDOMAIN_MAPPING = {\n \"FR-COR\": \"https://opendata-corse.edf.fr\",\n \"RE\": \"https://opendata-reunion.edf.fr\",\n \"GF\": \"https://opendata-guyane.edf.fr\",\n \"MQ\": \"https://opendata-martinique.edf.fr\",\n \"GP\": \"https://opendata-guadeloupe.edf.fr\",\n}\n\nLIVE_DATASETS = {\n \"FR-COR\": \"production-delectricite-par-filiere-en-temps-reel\",\n \"GP\": \"mix-temps-reel-guadeloupe\",\n \"RE\": \"prod-electricite-temps-reel\",\n \"GF\": \"production-d-electricite-par-filiere-en-temps-reel\",\n \"MQ\": \"production-delectricite-par-filiere-en-temps-reel\",\n}\n\nHISTORICAL_DATASETS = {\n \"FR-COR\": \"production-delectricite-par-filiere\",\n \"RE\": \"courbe-de-charge-de-la-production-delectricite-par-filiere\",\n \"GF\": \"courbe-de-charge-de-la-production-delectricite-par-filiere\",\n \"MQ\": \"courbe-de-charge-de-la-production-delectricite-par-filiere\",\n \"GP\": \"courbe-de-charge-de-la-production-delectricite-par-filiere\",\n}\n\nAPI_PARAMETER_GROUPS = {\n \"production\": {\n \"biomass\": [\n \"biomasse\",\n \"biomasse_mw\",\n \"biomasse_mwh\",\n \"bioenergies\",\n \"bioenergies_mw\",\n \"bioenergies_mwh\",\n ],\n \"coal\": [\n \"charbon\",\n ],\n \"gas\": [\n \"thermique_mw\",\n \"thermique_mwh\",\n \"turbines_a_combustion\",\n ],\n \"geothermal\": [\n \"geothermie\",\n \"geothermie_mw\",\n ],\n \"hydro\": [\n \"hydraulique\",\n \"hydraulique_mw\",\n \"hydraulique_mwh\",\n \"micro_hydro\",\n \"micro_hydraulique_mw\",\n ],\n \"oil\": [\"diesel\", \"moteur_diesel\", \"centrale_au_fioul\", \"moteurs_diesels\"],\n \"solar\": [\n \"photovoltaique\",\n \"photovoltaique0\",\n \"photovoltaique_mw\",\n \"photovoltaique_mwh\",\n \"solaire_mw\",\n ],\n \"wind\": [\n \"eolien\",\n \"eolien_mw\",\n \"eolien_mwh\",\n ],\n \"unknown\": [\"bagasse_charbon_mwh\", \"charbon_bagasse_mw\"],\n },\n \"storage\": {\"battery\": [\"solde_stockage\", \"stockage\"]},\n \"price\": {\n \"price\": [\"cout_moyen_de_production_eur_mwh\"],\n },\n}\n\nPRODUCTION_MAPPING = {\n API_TYPE: type\n for key in [\"production\"]\n for type, groups in API_PARAMETER_GROUPS[key].items()\n for API_TYPE in groups\n}\n\nSTORAGE_MAPPING = {\n API_TYPE: type\n for key in [\"storage\"]\n for type, groups in API_PARAMETER_GROUPS[key].items()\n for API_TYPE in groups\n}\n\nPRICE_MAPPING = {\n API_TYPE: type\n for key in [\"price\"]\n for type, groups in API_PARAMETER_GROUPS[key].items()\n for API_TYPE in groups\n}\n\nIGNORED_VALUES = [\"jour\", \"total\", \"statut\", \"date\", \"heure\", \"liaisons\", \"tac\"]\n\n\ndef generate_url(zone_key, target_datetime):\n return f\"{DOMAIN_MAPPING[zone_key]}/api/v2/catalog/datasets/{HISTORICAL_DATASETS[zone_key] if target_datetime else LIVE_DATASETS[zone_key]}/exports/json\"\n\n\ndef generate_source(zone_key: ZoneKey):\n # Return the domain name of the source without the protocol\n return DOMAIN_MAPPING[zone_key].split(\"//\")[1]\n\n\ndef fetch_data(\n zone_key: ZoneKey,\n session: Optional[Session] = None,\n target_datetime: Optional[datetime] = None,\n) -> Tuple[list, str]:\n ses = session or Session()\n\n DATE_STRING_MAPPING = {\n \"FR-COR\": \"date_heure\" if target_datetime else \"date\",\n \"RE\": \"date_heure\" if target_datetime else \"date\",\n \"GF\": \"date\",\n \"MQ\": \"date_heure\" if target_datetime else \"date\",\n \"GP\": \"date\",\n }\n\n if target_datetime and zone_key not in HISTORICAL_DATASETS.keys():\n raise ParserException(\n \"FR_O.py\",\n f\"Historical data not implemented for {zone_key} in this parser.\",\n zone_key,\n )\n elif target_datetime is None and zone_key not in LIVE_DATASETS.keys():\n raise ParserException(\n \"FR_O.py\",\n f\"Live data not implemented for {zone_key} in this parser.\",\n zone_key,\n )\n\n URL_QUERIES: Dict[str, Union[str, None]] = {\n # \"refine\": \"statut:Valid\u00e9\" if target_datetime else None,\n \"timezone\": \"UTC\",\n \"order_by\": f\"{DATE_STRING_MAPPING[zone_key]} desc\",\n \"refine\": f\"{DATE_STRING_MAPPING[zone_key]}:{target_datetime.strftime('%Y')}\"\n if target_datetime\n else None,\n }\n\n url = generate_url(zone_key, target_datetime)\n response: Response = ses.get(url, params=URL_QUERIES)\n data: Union[dict, list, None] = response.json()\n if data == []:\n raise ParserException(\n \"FR_O.py\",\n f\"No data available for {zone_key} for {target_datetime.strftime('%Y')}\"\n if target_datetime\n else f\"No live data available for {zone_key}.\",\n zone_key,\n )\n elif isinstance(data, dict):\n if data.get(\"errorcode\") == \"10002\":\n raise ParserException(\n \"FR_O.py\",\n f\"Rate limit exceeded. Please try again later after: {data.get('reset_time')}\",\n )\n elif data.get(\"error_code\") == \"ODSQLError\":\n raise ParserException(\n \"FR_O.py\",\n \"Query malformed. Please check the parameters. If this was previously working there has likely been a change in the API.\",\n )\n if not isinstance(data, list):\n raise ParserException(\n \"FR_O.py\",\n f\"Unexpected data format for {zone_key} for {target_datetime.strftime('%Y')}\"\n if target_datetime\n else f\"Unexpected data format for {zone_key}.\",\n zone_key,\n )\n return data, DATE_STRING_MAPPING[zone_key]\n\n\ndef fetch_production(\n zone_key: ZoneKey,\n session: Optional[Session] = None,\n target_datetime: Optional[datetime] = None,\n logger=getLogger(__name__),\n):\n production_objects, date_string = fetch_data(zone_key, session, target_datetime)\n\n production_breakdown_list = ProductionBreakdownList(logger=logger)\n for production_object in production_objects:\n production = ProductionMix()\n storage = StorageMix()\n for mode_key in production_object:\n if mode_key in PRODUCTION_MAPPING:\n production.add_value(\n PRODUCTION_MAPPING[mode_key],\n production_object[mode_key],\n correct_negative_with_zero=True,\n )\n elif mode_key in STORAGE_MAPPING:\n storage.add_value(\n STORAGE_MAPPING[mode_key], -production_object[mode_key]\n )\n elif mode_key in IGNORED_VALUES:\n pass\n else:\n logger.warning(\n f\"Unknown mode_key: '{mode_key}' encountered for {zone_key}.\"\n )\n\n production_breakdown_list.append(\n zoneKey=zone_key,\n datetime=datetime.fromisoformat(production_object[date_string]),\n production=production,\n storage=storage,\n source=generate_source(zone_key),\n sourceType=EventSourceType.estimated\n if production_object.get(\"statut\") == \"Estim\u00e9\"\n else EventSourceType.measured,\n )\n return production_breakdown_list.to_list()\n\n\ndef fetch_price(\n zone_key: ZoneKey,\n session: Optional[Session] = None,\n target_datetime: Optional[datetime] = None,\n logger=getLogger(__name__),\n):\n data_objects, date_string = fetch_data(zone_key, session, target_datetime)\n\n price_list = PriceList(logger=logger)\n for data_object in data_objects:\n price: Union[float, int, None] = None\n for mode_key in data_object:\n if mode_key in PRICE_MAPPING:\n price = data_object[mode_key]\n break\n if price is not None:\n price_list.append(\n zoneKey=zone_key,\n currency=\"EUR\",\n datetime=datetime.fromisoformat(data_object[date_string]),\n source=generate_source(zone_key),\n price=price,\n )\n return price_list.to_list()\n", "path": "parsers/FR_O.py"}]} | 3,050 | 430 |
gh_patches_debug_43345 | rasdani/github-patches | git_diff | docker__docker-py-2135 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Service logs does not appear in online docs
https://github.com/docker/docker-py/blob/e78e4e7491da7055151bfe454282770786a8c270/docker/models/services.py#L85
https://docker-py.readthedocs.io/en/stable/services.html
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/models/services.py`
Content:
```
1 import copy
2 from docker.errors import create_unexpected_kwargs_error, InvalidArgument
3 from docker.types import TaskTemplate, ContainerSpec, Placement, ServiceMode
4 from .resource import Model, Collection
5
6
7 class Service(Model):
8 """A service."""
9 id_attribute = 'ID'
10
11 @property
12 def name(self):
13 """The service's name."""
14 return self.attrs['Spec']['Name']
15
16 @property
17 def version(self):
18 """
19 The version number of the service. If this is not the same as the
20 server, the :py:meth:`update` function will not work and you will
21 need to call :py:meth:`reload` before calling it again.
22 """
23 return self.attrs.get('Version').get('Index')
24
25 def remove(self):
26 """
27 Stop and remove the service.
28
29 Raises:
30 :py:class:`docker.errors.APIError`
31 If the server returns an error.
32 """
33 return self.client.api.remove_service(self.id)
34
35 def tasks(self, filters=None):
36 """
37 List the tasks in this service.
38
39 Args:
40 filters (dict): A map of filters to process on the tasks list.
41 Valid filters: ``id``, ``name``, ``node``,
42 ``label``, and ``desired-state``.
43
44 Returns:
45 (:py:class:`list`): List of task dictionaries.
46
47 Raises:
48 :py:class:`docker.errors.APIError`
49 If the server returns an error.
50 """
51 if filters is None:
52 filters = {}
53 filters['service'] = self.id
54 return self.client.api.tasks(filters=filters)
55
56 def update(self, **kwargs):
57 """
58 Update a service's configuration. Similar to the ``docker service
59 update`` command.
60
61 Takes the same parameters as :py:meth:`~ServiceCollection.create`.
62
63 Raises:
64 :py:class:`docker.errors.APIError`
65 If the server returns an error.
66 """
67 # Image is required, so if it hasn't been set, use current image
68 if 'image' not in kwargs:
69 spec = self.attrs['Spec']['TaskTemplate']['ContainerSpec']
70 kwargs['image'] = spec['Image']
71
72 if kwargs.get('force_update') is True:
73 task_template = self.attrs['Spec']['TaskTemplate']
74 current_value = int(task_template.get('ForceUpdate', 0))
75 kwargs['force_update'] = current_value + 1
76
77 create_kwargs = _get_create_service_kwargs('update', kwargs)
78
79 return self.client.api.update_service(
80 self.id,
81 self.version,
82 **create_kwargs
83 )
84
85 def logs(self, **kwargs):
86 """
87 Get log stream for the service.
88 Note: This method works only for services with the ``json-file``
89 or ``journald`` logging drivers.
90
91 Args:
92 details (bool): Show extra details provided to logs.
93 Default: ``False``
94 follow (bool): Keep connection open to read logs as they are
95 sent by the Engine. Default: ``False``
96 stdout (bool): Return logs from ``stdout``. Default: ``False``
97 stderr (bool): Return logs from ``stderr``. Default: ``False``
98 since (int): UNIX timestamp for the logs staring point.
99 Default: 0
100 timestamps (bool): Add timestamps to every log line.
101 tail (string or int): Number of log lines to be returned,
102 counting from the current end of the logs. Specify an
103 integer or ``'all'`` to output all log lines.
104 Default: ``all``
105
106 Returns (generator): Logs for the service.
107 """
108 is_tty = self.attrs['Spec']['TaskTemplate']['ContainerSpec'].get(
109 'TTY', False
110 )
111 return self.client.api.service_logs(self.id, is_tty=is_tty, **kwargs)
112
113 def scale(self, replicas):
114 """
115 Scale service container.
116
117 Args:
118 replicas (int): The number of containers that should be running.
119
120 Returns:
121 ``True``if successful.
122 """
123
124 if 'Global' in self.attrs['Spec']['Mode'].keys():
125 raise InvalidArgument('Cannot scale a global container')
126
127 service_mode = ServiceMode('replicated', replicas)
128 return self.client.api.update_service(self.id, self.version,
129 mode=service_mode,
130 fetch_current_spec=True)
131
132 def force_update(self):
133 """
134 Force update the service even if no changes require it.
135
136 Returns:
137 ``True``if successful.
138 """
139
140 return self.update(force_update=True, fetch_current_spec=True)
141
142
143 class ServiceCollection(Collection):
144 """Services on the Docker server."""
145 model = Service
146
147 def create(self, image, command=None, **kwargs):
148 """
149 Create a service. Similar to the ``docker service create`` command.
150
151 Args:
152 image (str): The image name to use for the containers.
153 command (list of str or str): Command to run.
154 args (list of str): Arguments to the command.
155 constraints (list of str): Placement constraints.
156 preferences (list of str): Placement preferences.
157 platforms (list of tuple): A list of platforms constraints
158 expressed as ``(arch, os)`` tuples
159 container_labels (dict): Labels to apply to the container.
160 endpoint_spec (EndpointSpec): Properties that can be configured to
161 access and load balance a service. Default: ``None``.
162 env (list of str): Environment variables, in the form
163 ``KEY=val``.
164 hostname (string): Hostname to set on the container.
165 isolation (string): Isolation technology used by the service's
166 containers. Only used for Windows containers.
167 labels (dict): Labels to apply to the service.
168 log_driver (str): Log driver to use for containers.
169 log_driver_options (dict): Log driver options.
170 mode (ServiceMode): Scheduling mode for the service.
171 Default:``None``
172 mounts (list of str): Mounts for the containers, in the form
173 ``source:target:options``, where options is either
174 ``ro`` or ``rw``.
175 name (str): Name to give to the service.
176 networks (list of str): List of network names or IDs to attach
177 the service to. Default: ``None``.
178 resources (Resources): Resource limits and reservations.
179 restart_policy (RestartPolicy): Restart policy for containers.
180 secrets (list of :py:class:`docker.types.SecretReference`): List
181 of secrets accessible to containers for this service.
182 stop_grace_period (int): Amount of time to wait for
183 containers to terminate before forcefully killing them.
184 update_config (UpdateConfig): Specification for the update strategy
185 of the service. Default: ``None``
186 rollback_config (RollbackConfig): Specification for the rollback
187 strategy of the service. Default: ``None``
188 user (str): User to run commands as.
189 workdir (str): Working directory for commands to run.
190 tty (boolean): Whether a pseudo-TTY should be allocated.
191 groups (:py:class:`list`): A list of additional groups that the
192 container process will run as.
193 open_stdin (boolean): Open ``stdin``
194 read_only (boolean): Mount the container's root filesystem as read
195 only.
196 stop_signal (string): Set signal to stop the service's containers
197 healthcheck (Healthcheck): Healthcheck
198 configuration for this service.
199 hosts (:py:class:`dict`): A set of host to IP mappings to add to
200 the container's `hosts` file.
201 dns_config (DNSConfig): Specification for DNS
202 related configurations in resolver configuration file.
203 configs (:py:class:`list`): List of :py:class:`ConfigReference`
204 that will be exposed to the service.
205 privileges (Privileges): Security options for the service's
206 containers.
207
208 Returns:
209 (:py:class:`Service`) The created service.
210
211 Raises:
212 :py:class:`docker.errors.APIError`
213 If the server returns an error.
214 """
215 kwargs['image'] = image
216 kwargs['command'] = command
217 create_kwargs = _get_create_service_kwargs('create', kwargs)
218 service_id = self.client.api.create_service(**create_kwargs)
219 return self.get(service_id)
220
221 def get(self, service_id, insert_defaults=None):
222 """
223 Get a service.
224
225 Args:
226 service_id (str): The ID of the service.
227 insert_defaults (boolean): If true, default values will be merged
228 into the output.
229
230 Returns:
231 (:py:class:`Service`): The service.
232
233 Raises:
234 :py:class:`docker.errors.NotFound`
235 If the service does not exist.
236 :py:class:`docker.errors.APIError`
237 If the server returns an error.
238 :py:class:`docker.errors.InvalidVersion`
239 If one of the arguments is not supported with the current
240 API version.
241 """
242 return self.prepare_model(
243 self.client.api.inspect_service(service_id, insert_defaults)
244 )
245
246 def list(self, **kwargs):
247 """
248 List services.
249
250 Args:
251 filters (dict): Filters to process on the nodes list. Valid
252 filters: ``id``, ``name`` , ``label`` and ``mode``.
253 Default: ``None``.
254
255 Returns:
256 (list of :py:class:`Service`): The services.
257
258 Raises:
259 :py:class:`docker.errors.APIError`
260 If the server returns an error.
261 """
262 return [
263 self.prepare_model(s)
264 for s in self.client.api.services(**kwargs)
265 ]
266
267
268 # kwargs to copy straight over to ContainerSpec
269 CONTAINER_SPEC_KWARGS = [
270 'args',
271 'command',
272 'configs',
273 'dns_config',
274 'env',
275 'groups',
276 'healthcheck',
277 'hostname',
278 'hosts',
279 'image',
280 'isolation',
281 'labels',
282 'mounts',
283 'open_stdin',
284 'privileges',
285 'read_only',
286 'secrets',
287 'stop_grace_period',
288 'stop_signal',
289 'tty',
290 'user',
291 'workdir',
292 ]
293
294 # kwargs to copy straight over to TaskTemplate
295 TASK_TEMPLATE_KWARGS = [
296 'networks',
297 'resources',
298 'restart_policy',
299 ]
300
301 # kwargs to copy straight over to create_service
302 CREATE_SERVICE_KWARGS = [
303 'name',
304 'labels',
305 'mode',
306 'update_config',
307 'endpoint_spec',
308 ]
309
310 PLACEMENT_KWARGS = [
311 'constraints',
312 'preferences',
313 'platforms',
314 ]
315
316
317 def _get_create_service_kwargs(func_name, kwargs):
318 # Copy over things which can be copied directly
319 create_kwargs = {}
320 for key in copy.copy(kwargs):
321 if key in CREATE_SERVICE_KWARGS:
322 create_kwargs[key] = kwargs.pop(key)
323 container_spec_kwargs = {}
324 for key in copy.copy(kwargs):
325 if key in CONTAINER_SPEC_KWARGS:
326 container_spec_kwargs[key] = kwargs.pop(key)
327 task_template_kwargs = {}
328 for key in copy.copy(kwargs):
329 if key in TASK_TEMPLATE_KWARGS:
330 task_template_kwargs[key] = kwargs.pop(key)
331
332 if 'container_labels' in kwargs:
333 container_spec_kwargs['labels'] = kwargs.pop('container_labels')
334
335 placement = {}
336 for key in copy.copy(kwargs):
337 if key in PLACEMENT_KWARGS:
338 placement[key] = kwargs.pop(key)
339 placement = Placement(**placement)
340 task_template_kwargs['placement'] = placement
341
342 if 'log_driver' in kwargs:
343 task_template_kwargs['log_driver'] = {
344 'Name': kwargs.pop('log_driver'),
345 'Options': kwargs.pop('log_driver_options', {})
346 }
347
348 if func_name == 'update':
349 if 'force_update' in kwargs:
350 task_template_kwargs['force_update'] = kwargs.pop('force_update')
351
352 # fetch the current spec by default if updating the service
353 # through the model
354 fetch_current_spec = kwargs.pop('fetch_current_spec', True)
355 create_kwargs['fetch_current_spec'] = fetch_current_spec
356
357 # All kwargs should have been consumed by this point, so raise
358 # error if any are left
359 if kwargs:
360 raise create_unexpected_kwargs_error(func_name, kwargs)
361
362 container_spec = ContainerSpec(**container_spec_kwargs)
363 task_template_kwargs['container_spec'] = container_spec
364 create_kwargs['task_template'] = TaskTemplate(**task_template_kwargs)
365 return create_kwargs
366
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docker/models/services.py b/docker/models/services.py
--- a/docker/models/services.py
+++ b/docker/models/services.py
@@ -42,7 +42,7 @@
``label``, and ``desired-state``.
Returns:
- (:py:class:`list`): List of task dictionaries.
+ :py:class:`list`: List of task dictionaries.
Raises:
:py:class:`docker.errors.APIError`
@@ -84,26 +84,27 @@
def logs(self, **kwargs):
"""
- Get log stream for the service.
- Note: This method works only for services with the ``json-file``
- or ``journald`` logging drivers.
-
- Args:
- details (bool): Show extra details provided to logs.
- Default: ``False``
- follow (bool): Keep connection open to read logs as they are
- sent by the Engine. Default: ``False``
- stdout (bool): Return logs from ``stdout``. Default: ``False``
- stderr (bool): Return logs from ``stderr``. Default: ``False``
- since (int): UNIX timestamp for the logs staring point.
- Default: 0
- timestamps (bool): Add timestamps to every log line.
- tail (string or int): Number of log lines to be returned,
- counting from the current end of the logs. Specify an
- integer or ``'all'`` to output all log lines.
- Default: ``all``
-
- Returns (generator): Logs for the service.
+ Get log stream for the service.
+ Note: This method works only for services with the ``json-file``
+ or ``journald`` logging drivers.
+
+ Args:
+ details (bool): Show extra details provided to logs.
+ Default: ``False``
+ follow (bool): Keep connection open to read logs as they are
+ sent by the Engine. Default: ``False``
+ stdout (bool): Return logs from ``stdout``. Default: ``False``
+ stderr (bool): Return logs from ``stderr``. Default: ``False``
+ since (int): UNIX timestamp for the logs staring point.
+ Default: 0
+ timestamps (bool): Add timestamps to every log line.
+ tail (string or int): Number of log lines to be returned,
+ counting from the current end of the logs. Specify an
+ integer or ``'all'`` to output all log lines.
+ Default: ``all``
+
+ Returns:
+ generator: Logs for the service.
"""
is_tty = self.attrs['Spec']['TaskTemplate']['ContainerSpec'].get(
'TTY', False
@@ -118,7 +119,7 @@
replicas (int): The number of containers that should be running.
Returns:
- ``True``if successful.
+ bool: ``True`` if successful.
"""
if 'Global' in self.attrs['Spec']['Mode'].keys():
@@ -134,7 +135,7 @@
Force update the service even if no changes require it.
Returns:
- ``True``if successful.
+ bool: ``True`` if successful.
"""
return self.update(force_update=True, fetch_current_spec=True)
@@ -206,7 +207,7 @@
containers.
Returns:
- (:py:class:`Service`) The created service.
+ :py:class:`Service`: The created service.
Raises:
:py:class:`docker.errors.APIError`
@@ -228,7 +229,7 @@
into the output.
Returns:
- (:py:class:`Service`): The service.
+ :py:class:`Service`: The service.
Raises:
:py:class:`docker.errors.NotFound`
@@ -253,7 +254,7 @@
Default: ``None``.
Returns:
- (list of :py:class:`Service`): The services.
+ list of :py:class:`Service`: The services.
Raises:
:py:class:`docker.errors.APIError`
| {"golden_diff": "diff --git a/docker/models/services.py b/docker/models/services.py\n--- a/docker/models/services.py\n+++ b/docker/models/services.py\n@@ -42,7 +42,7 @@\n ``label``, and ``desired-state``.\n \n Returns:\n- (:py:class:`list`): List of task dictionaries.\n+ :py:class:`list`: List of task dictionaries.\n \n Raises:\n :py:class:`docker.errors.APIError`\n@@ -84,26 +84,27 @@\n \n def logs(self, **kwargs):\n \"\"\"\n- Get log stream for the service.\n- Note: This method works only for services with the ``json-file``\n- or ``journald`` logging drivers.\n-\n- Args:\n- details (bool): Show extra details provided to logs.\n- Default: ``False``\n- follow (bool): Keep connection open to read logs as they are\n- sent by the Engine. Default: ``False``\n- stdout (bool): Return logs from ``stdout``. Default: ``False``\n- stderr (bool): Return logs from ``stderr``. Default: ``False``\n- since (int): UNIX timestamp for the logs staring point.\n- Default: 0\n- timestamps (bool): Add timestamps to every log line.\n- tail (string or int): Number of log lines to be returned,\n- counting from the current end of the logs. Specify an\n- integer or ``'all'`` to output all log lines.\n- Default: ``all``\n-\n- Returns (generator): Logs for the service.\n+ Get log stream for the service.\n+ Note: This method works only for services with the ``json-file``\n+ or ``journald`` logging drivers.\n+\n+ Args:\n+ details (bool): Show extra details provided to logs.\n+ Default: ``False``\n+ follow (bool): Keep connection open to read logs as they are\n+ sent by the Engine. Default: ``False``\n+ stdout (bool): Return logs from ``stdout``. Default: ``False``\n+ stderr (bool): Return logs from ``stderr``. Default: ``False``\n+ since (int): UNIX timestamp for the logs staring point.\n+ Default: 0\n+ timestamps (bool): Add timestamps to every log line.\n+ tail (string or int): Number of log lines to be returned,\n+ counting from the current end of the logs. Specify an\n+ integer or ``'all'`` to output all log lines.\n+ Default: ``all``\n+\n+ Returns:\n+ generator: Logs for the service.\n \"\"\"\n is_tty = self.attrs['Spec']['TaskTemplate']['ContainerSpec'].get(\n 'TTY', False\n@@ -118,7 +119,7 @@\n replicas (int): The number of containers that should be running.\n \n Returns:\n- ``True``if successful.\n+ bool: ``True`` if successful.\n \"\"\"\n \n if 'Global' in self.attrs['Spec']['Mode'].keys():\n@@ -134,7 +135,7 @@\n Force update the service even if no changes require it.\n \n Returns:\n- ``True``if successful.\n+ bool: ``True`` if successful.\n \"\"\"\n \n return self.update(force_update=True, fetch_current_spec=True)\n@@ -206,7 +207,7 @@\n containers.\n \n Returns:\n- (:py:class:`Service`) The created service.\n+ :py:class:`Service`: The created service.\n \n Raises:\n :py:class:`docker.errors.APIError`\n@@ -228,7 +229,7 @@\n into the output.\n \n Returns:\n- (:py:class:`Service`): The service.\n+ :py:class:`Service`: The service.\n \n Raises:\n :py:class:`docker.errors.NotFound`\n@@ -253,7 +254,7 @@\n Default: ``None``.\n \n Returns:\n- (list of :py:class:`Service`): The services.\n+ list of :py:class:`Service`: The services.\n \n Raises:\n :py:class:`docker.errors.APIError`\n", "issue": "Service logs does not appear in online docs\nhttps://github.com/docker/docker-py/blob/e78e4e7491da7055151bfe454282770786a8c270/docker/models/services.py#L85\r\n\r\nhttps://docker-py.readthedocs.io/en/stable/services.html\n", "before_files": [{"content": "import copy\nfrom docker.errors import create_unexpected_kwargs_error, InvalidArgument\nfrom docker.types import TaskTemplate, ContainerSpec, Placement, ServiceMode\nfrom .resource import Model, Collection\n\n\nclass Service(Model):\n \"\"\"A service.\"\"\"\n id_attribute = 'ID'\n\n @property\n def name(self):\n \"\"\"The service's name.\"\"\"\n return self.attrs['Spec']['Name']\n\n @property\n def version(self):\n \"\"\"\n The version number of the service. If this is not the same as the\n server, the :py:meth:`update` function will not work and you will\n need to call :py:meth:`reload` before calling it again.\n \"\"\"\n return self.attrs.get('Version').get('Index')\n\n def remove(self):\n \"\"\"\n Stop and remove the service.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return self.client.api.remove_service(self.id)\n\n def tasks(self, filters=None):\n \"\"\"\n List the tasks in this service.\n\n Args:\n filters (dict): A map of filters to process on the tasks list.\n Valid filters: ``id``, ``name``, ``node``,\n ``label``, and ``desired-state``.\n\n Returns:\n (:py:class:`list`): List of task dictionaries.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n if filters is None:\n filters = {}\n filters['service'] = self.id\n return self.client.api.tasks(filters=filters)\n\n def update(self, **kwargs):\n \"\"\"\n Update a service's configuration. Similar to the ``docker service\n update`` command.\n\n Takes the same parameters as :py:meth:`~ServiceCollection.create`.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n # Image is required, so if it hasn't been set, use current image\n if 'image' not in kwargs:\n spec = self.attrs['Spec']['TaskTemplate']['ContainerSpec']\n kwargs['image'] = spec['Image']\n\n if kwargs.get('force_update') is True:\n task_template = self.attrs['Spec']['TaskTemplate']\n current_value = int(task_template.get('ForceUpdate', 0))\n kwargs['force_update'] = current_value + 1\n\n create_kwargs = _get_create_service_kwargs('update', kwargs)\n\n return self.client.api.update_service(\n self.id,\n self.version,\n **create_kwargs\n )\n\n def logs(self, **kwargs):\n \"\"\"\n Get log stream for the service.\n Note: This method works only for services with the ``json-file``\n or ``journald`` logging drivers.\n\n Args:\n details (bool): Show extra details provided to logs.\n Default: ``False``\n follow (bool): Keep connection open to read logs as they are\n sent by the Engine. Default: ``False``\n stdout (bool): Return logs from ``stdout``. Default: ``False``\n stderr (bool): Return logs from ``stderr``. Default: ``False``\n since (int): UNIX timestamp for the logs staring point.\n Default: 0\n timestamps (bool): Add timestamps to every log line.\n tail (string or int): Number of log lines to be returned,\n counting from the current end of the logs. Specify an\n integer or ``'all'`` to output all log lines.\n Default: ``all``\n\n Returns (generator): Logs for the service.\n \"\"\"\n is_tty = self.attrs['Spec']['TaskTemplate']['ContainerSpec'].get(\n 'TTY', False\n )\n return self.client.api.service_logs(self.id, is_tty=is_tty, **kwargs)\n\n def scale(self, replicas):\n \"\"\"\n Scale service container.\n\n Args:\n replicas (int): The number of containers that should be running.\n\n Returns:\n ``True``if successful.\n \"\"\"\n\n if 'Global' in self.attrs['Spec']['Mode'].keys():\n raise InvalidArgument('Cannot scale a global container')\n\n service_mode = ServiceMode('replicated', replicas)\n return self.client.api.update_service(self.id, self.version,\n mode=service_mode,\n fetch_current_spec=True)\n\n def force_update(self):\n \"\"\"\n Force update the service even if no changes require it.\n\n Returns:\n ``True``if successful.\n \"\"\"\n\n return self.update(force_update=True, fetch_current_spec=True)\n\n\nclass ServiceCollection(Collection):\n \"\"\"Services on the Docker server.\"\"\"\n model = Service\n\n def create(self, image, command=None, **kwargs):\n \"\"\"\n Create a service. Similar to the ``docker service create`` command.\n\n Args:\n image (str): The image name to use for the containers.\n command (list of str or str): Command to run.\n args (list of str): Arguments to the command.\n constraints (list of str): Placement constraints.\n preferences (list of str): Placement preferences.\n platforms (list of tuple): A list of platforms constraints\n expressed as ``(arch, os)`` tuples\n container_labels (dict): Labels to apply to the container.\n endpoint_spec (EndpointSpec): Properties that can be configured to\n access and load balance a service. Default: ``None``.\n env (list of str): Environment variables, in the form\n ``KEY=val``.\n hostname (string): Hostname to set on the container.\n isolation (string): Isolation technology used by the service's\n containers. Only used for Windows containers.\n labels (dict): Labels to apply to the service.\n log_driver (str): Log driver to use for containers.\n log_driver_options (dict): Log driver options.\n mode (ServiceMode): Scheduling mode for the service.\n Default:``None``\n mounts (list of str): Mounts for the containers, in the form\n ``source:target:options``, where options is either\n ``ro`` or ``rw``.\n name (str): Name to give to the service.\n networks (list of str): List of network names or IDs to attach\n the service to. Default: ``None``.\n resources (Resources): Resource limits and reservations.\n restart_policy (RestartPolicy): Restart policy for containers.\n secrets (list of :py:class:`docker.types.SecretReference`): List\n of secrets accessible to containers for this service.\n stop_grace_period (int): Amount of time to wait for\n containers to terminate before forcefully killing them.\n update_config (UpdateConfig): Specification for the update strategy\n of the service. Default: ``None``\n rollback_config (RollbackConfig): Specification for the rollback\n strategy of the service. Default: ``None``\n user (str): User to run commands as.\n workdir (str): Working directory for commands to run.\n tty (boolean): Whether a pseudo-TTY should be allocated.\n groups (:py:class:`list`): A list of additional groups that the\n container process will run as.\n open_stdin (boolean): Open ``stdin``\n read_only (boolean): Mount the container's root filesystem as read\n only.\n stop_signal (string): Set signal to stop the service's containers\n healthcheck (Healthcheck): Healthcheck\n configuration for this service.\n hosts (:py:class:`dict`): A set of host to IP mappings to add to\n the container's `hosts` file.\n dns_config (DNSConfig): Specification for DNS\n related configurations in resolver configuration file.\n configs (:py:class:`list`): List of :py:class:`ConfigReference`\n that will be exposed to the service.\n privileges (Privileges): Security options for the service's\n containers.\n\n Returns:\n (:py:class:`Service`) The created service.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n kwargs['image'] = image\n kwargs['command'] = command\n create_kwargs = _get_create_service_kwargs('create', kwargs)\n service_id = self.client.api.create_service(**create_kwargs)\n return self.get(service_id)\n\n def get(self, service_id, insert_defaults=None):\n \"\"\"\n Get a service.\n\n Args:\n service_id (str): The ID of the service.\n insert_defaults (boolean): If true, default values will be merged\n into the output.\n\n Returns:\n (:py:class:`Service`): The service.\n\n Raises:\n :py:class:`docker.errors.NotFound`\n If the service does not exist.\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n :py:class:`docker.errors.InvalidVersion`\n If one of the arguments is not supported with the current\n API version.\n \"\"\"\n return self.prepare_model(\n self.client.api.inspect_service(service_id, insert_defaults)\n )\n\n def list(self, **kwargs):\n \"\"\"\n List services.\n\n Args:\n filters (dict): Filters to process on the nodes list. Valid\n filters: ``id``, ``name`` , ``label`` and ``mode``.\n Default: ``None``.\n\n Returns:\n (list of :py:class:`Service`): The services.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return [\n self.prepare_model(s)\n for s in self.client.api.services(**kwargs)\n ]\n\n\n# kwargs to copy straight over to ContainerSpec\nCONTAINER_SPEC_KWARGS = [\n 'args',\n 'command',\n 'configs',\n 'dns_config',\n 'env',\n 'groups',\n 'healthcheck',\n 'hostname',\n 'hosts',\n 'image',\n 'isolation',\n 'labels',\n 'mounts',\n 'open_stdin',\n 'privileges',\n 'read_only',\n 'secrets',\n 'stop_grace_period',\n 'stop_signal',\n 'tty',\n 'user',\n 'workdir',\n]\n\n# kwargs to copy straight over to TaskTemplate\nTASK_TEMPLATE_KWARGS = [\n 'networks',\n 'resources',\n 'restart_policy',\n]\n\n# kwargs to copy straight over to create_service\nCREATE_SERVICE_KWARGS = [\n 'name',\n 'labels',\n 'mode',\n 'update_config',\n 'endpoint_spec',\n]\n\nPLACEMENT_KWARGS = [\n 'constraints',\n 'preferences',\n 'platforms',\n]\n\n\ndef _get_create_service_kwargs(func_name, kwargs):\n # Copy over things which can be copied directly\n create_kwargs = {}\n for key in copy.copy(kwargs):\n if key in CREATE_SERVICE_KWARGS:\n create_kwargs[key] = kwargs.pop(key)\n container_spec_kwargs = {}\n for key in copy.copy(kwargs):\n if key in CONTAINER_SPEC_KWARGS:\n container_spec_kwargs[key] = kwargs.pop(key)\n task_template_kwargs = {}\n for key in copy.copy(kwargs):\n if key in TASK_TEMPLATE_KWARGS:\n task_template_kwargs[key] = kwargs.pop(key)\n\n if 'container_labels' in kwargs:\n container_spec_kwargs['labels'] = kwargs.pop('container_labels')\n\n placement = {}\n for key in copy.copy(kwargs):\n if key in PLACEMENT_KWARGS:\n placement[key] = kwargs.pop(key)\n placement = Placement(**placement)\n task_template_kwargs['placement'] = placement\n\n if 'log_driver' in kwargs:\n task_template_kwargs['log_driver'] = {\n 'Name': kwargs.pop('log_driver'),\n 'Options': kwargs.pop('log_driver_options', {})\n }\n\n if func_name == 'update':\n if 'force_update' in kwargs:\n task_template_kwargs['force_update'] = kwargs.pop('force_update')\n\n # fetch the current spec by default if updating the service\n # through the model\n fetch_current_spec = kwargs.pop('fetch_current_spec', True)\n create_kwargs['fetch_current_spec'] = fetch_current_spec\n\n # All kwargs should have been consumed by this point, so raise\n # error if any are left\n if kwargs:\n raise create_unexpected_kwargs_error(func_name, kwargs)\n\n container_spec = ContainerSpec(**container_spec_kwargs)\n task_template_kwargs['container_spec'] = container_spec\n create_kwargs['task_template'] = TaskTemplate(**task_template_kwargs)\n return create_kwargs\n", "path": "docker/models/services.py"}], "after_files": [{"content": "import copy\nfrom docker.errors import create_unexpected_kwargs_error, InvalidArgument\nfrom docker.types import TaskTemplate, ContainerSpec, Placement, ServiceMode\nfrom .resource import Model, Collection\n\n\nclass Service(Model):\n \"\"\"A service.\"\"\"\n id_attribute = 'ID'\n\n @property\n def name(self):\n \"\"\"The service's name.\"\"\"\n return self.attrs['Spec']['Name']\n\n @property\n def version(self):\n \"\"\"\n The version number of the service. If this is not the same as the\n server, the :py:meth:`update` function will not work and you will\n need to call :py:meth:`reload` before calling it again.\n \"\"\"\n return self.attrs.get('Version').get('Index')\n\n def remove(self):\n \"\"\"\n Stop and remove the service.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return self.client.api.remove_service(self.id)\n\n def tasks(self, filters=None):\n \"\"\"\n List the tasks in this service.\n\n Args:\n filters (dict): A map of filters to process on the tasks list.\n Valid filters: ``id``, ``name``, ``node``,\n ``label``, and ``desired-state``.\n\n Returns:\n :py:class:`list`: List of task dictionaries.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n if filters is None:\n filters = {}\n filters['service'] = self.id\n return self.client.api.tasks(filters=filters)\n\n def update(self, **kwargs):\n \"\"\"\n Update a service's configuration. Similar to the ``docker service\n update`` command.\n\n Takes the same parameters as :py:meth:`~ServiceCollection.create`.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n # Image is required, so if it hasn't been set, use current image\n if 'image' not in kwargs:\n spec = self.attrs['Spec']['TaskTemplate']['ContainerSpec']\n kwargs['image'] = spec['Image']\n\n if kwargs.get('force_update') is True:\n task_template = self.attrs['Spec']['TaskTemplate']\n current_value = int(task_template.get('ForceUpdate', 0))\n kwargs['force_update'] = current_value + 1\n\n create_kwargs = _get_create_service_kwargs('update', kwargs)\n\n return self.client.api.update_service(\n self.id,\n self.version,\n **create_kwargs\n )\n\n def logs(self, **kwargs):\n \"\"\"\n Get log stream for the service.\n Note: This method works only for services with the ``json-file``\n or ``journald`` logging drivers.\n\n Args:\n details (bool): Show extra details provided to logs.\n Default: ``False``\n follow (bool): Keep connection open to read logs as they are\n sent by the Engine. Default: ``False``\n stdout (bool): Return logs from ``stdout``. Default: ``False``\n stderr (bool): Return logs from ``stderr``. Default: ``False``\n since (int): UNIX timestamp for the logs staring point.\n Default: 0\n timestamps (bool): Add timestamps to every log line.\n tail (string or int): Number of log lines to be returned,\n counting from the current end of the logs. Specify an\n integer or ``'all'`` to output all log lines.\n Default: ``all``\n\n Returns:\n generator: Logs for the service.\n \"\"\"\n is_tty = self.attrs['Spec']['TaskTemplate']['ContainerSpec'].get(\n 'TTY', False\n )\n return self.client.api.service_logs(self.id, is_tty=is_tty, **kwargs)\n\n def scale(self, replicas):\n \"\"\"\n Scale service container.\n\n Args:\n replicas (int): The number of containers that should be running.\n\n Returns:\n bool: ``True`` if successful.\n \"\"\"\n\n if 'Global' in self.attrs['Spec']['Mode'].keys():\n raise InvalidArgument('Cannot scale a global container')\n\n service_mode = ServiceMode('replicated', replicas)\n return self.client.api.update_service(self.id, self.version,\n mode=service_mode,\n fetch_current_spec=True)\n\n def force_update(self):\n \"\"\"\n Force update the service even if no changes require it.\n\n Returns:\n bool: ``True`` if successful.\n \"\"\"\n\n return self.update(force_update=True, fetch_current_spec=True)\n\n\nclass ServiceCollection(Collection):\n \"\"\"Services on the Docker server.\"\"\"\n model = Service\n\n def create(self, image, command=None, **kwargs):\n \"\"\"\n Create a service. Similar to the ``docker service create`` command.\n\n Args:\n image (str): The image name to use for the containers.\n command (list of str or str): Command to run.\n args (list of str): Arguments to the command.\n constraints (list of str): Placement constraints.\n preferences (list of str): Placement preferences.\n platforms (list of tuple): A list of platforms constraints\n expressed as ``(arch, os)`` tuples\n container_labels (dict): Labels to apply to the container.\n endpoint_spec (EndpointSpec): Properties that can be configured to\n access and load balance a service. Default: ``None``.\n env (list of str): Environment variables, in the form\n ``KEY=val``.\n hostname (string): Hostname to set on the container.\n isolation (string): Isolation technology used by the service's\n containers. Only used for Windows containers.\n labels (dict): Labels to apply to the service.\n log_driver (str): Log driver to use for containers.\n log_driver_options (dict): Log driver options.\n mode (ServiceMode): Scheduling mode for the service.\n Default:``None``\n mounts (list of str): Mounts for the containers, in the form\n ``source:target:options``, where options is either\n ``ro`` or ``rw``.\n name (str): Name to give to the service.\n networks (list of str): List of network names or IDs to attach\n the service to. Default: ``None``.\n resources (Resources): Resource limits and reservations.\n restart_policy (RestartPolicy): Restart policy for containers.\n secrets (list of :py:class:`docker.types.SecretReference`): List\n of secrets accessible to containers for this service.\n stop_grace_period (int): Amount of time to wait for\n containers to terminate before forcefully killing them.\n update_config (UpdateConfig): Specification for the update strategy\n of the service. Default: ``None``\n rollback_config (RollbackConfig): Specification for the rollback\n strategy of the service. Default: ``None``\n user (str): User to run commands as.\n workdir (str): Working directory for commands to run.\n tty (boolean): Whether a pseudo-TTY should be allocated.\n groups (:py:class:`list`): A list of additional groups that the\n container process will run as.\n open_stdin (boolean): Open ``stdin``\n read_only (boolean): Mount the container's root filesystem as read\n only.\n stop_signal (string): Set signal to stop the service's containers\n healthcheck (Healthcheck): Healthcheck\n configuration for this service.\n hosts (:py:class:`dict`): A set of host to IP mappings to add to\n the container's `hosts` file.\n dns_config (DNSConfig): Specification for DNS\n related configurations in resolver configuration file.\n configs (:py:class:`list`): List of :py:class:`ConfigReference`\n that will be exposed to the service.\n privileges (Privileges): Security options for the service's\n containers.\n\n Returns:\n :py:class:`Service`: The created service.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n kwargs['image'] = image\n kwargs['command'] = command\n create_kwargs = _get_create_service_kwargs('create', kwargs)\n service_id = self.client.api.create_service(**create_kwargs)\n return self.get(service_id)\n\n def get(self, service_id, insert_defaults=None):\n \"\"\"\n Get a service.\n\n Args:\n service_id (str): The ID of the service.\n insert_defaults (boolean): If true, default values will be merged\n into the output.\n\n Returns:\n :py:class:`Service`: The service.\n\n Raises:\n :py:class:`docker.errors.NotFound`\n If the service does not exist.\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n :py:class:`docker.errors.InvalidVersion`\n If one of the arguments is not supported with the current\n API version.\n \"\"\"\n return self.prepare_model(\n self.client.api.inspect_service(service_id, insert_defaults)\n )\n\n def list(self, **kwargs):\n \"\"\"\n List services.\n\n Args:\n filters (dict): Filters to process on the nodes list. Valid\n filters: ``id``, ``name`` , ``label`` and ``mode``.\n Default: ``None``.\n\n Returns:\n list of :py:class:`Service`: The services.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return [\n self.prepare_model(s)\n for s in self.client.api.services(**kwargs)\n ]\n\n\n# kwargs to copy straight over to ContainerSpec\nCONTAINER_SPEC_KWARGS = [\n 'args',\n 'command',\n 'configs',\n 'dns_config',\n 'env',\n 'groups',\n 'healthcheck',\n 'hostname',\n 'hosts',\n 'image',\n 'isolation',\n 'labels',\n 'mounts',\n 'open_stdin',\n 'privileges',\n 'read_only',\n 'secrets',\n 'stop_grace_period',\n 'stop_signal',\n 'tty',\n 'user',\n 'workdir',\n]\n\n# kwargs to copy straight over to TaskTemplate\nTASK_TEMPLATE_KWARGS = [\n 'networks',\n 'resources',\n 'restart_policy',\n]\n\n# kwargs to copy straight over to create_service\nCREATE_SERVICE_KWARGS = [\n 'name',\n 'labels',\n 'mode',\n 'update_config',\n 'endpoint_spec',\n]\n\nPLACEMENT_KWARGS = [\n 'constraints',\n 'preferences',\n 'platforms',\n]\n\n\ndef _get_create_service_kwargs(func_name, kwargs):\n # Copy over things which can be copied directly\n create_kwargs = {}\n for key in copy.copy(kwargs):\n if key in CREATE_SERVICE_KWARGS:\n create_kwargs[key] = kwargs.pop(key)\n container_spec_kwargs = {}\n for key in copy.copy(kwargs):\n if key in CONTAINER_SPEC_KWARGS:\n container_spec_kwargs[key] = kwargs.pop(key)\n task_template_kwargs = {}\n for key in copy.copy(kwargs):\n if key in TASK_TEMPLATE_KWARGS:\n task_template_kwargs[key] = kwargs.pop(key)\n\n if 'container_labels' in kwargs:\n container_spec_kwargs['labels'] = kwargs.pop('container_labels')\n\n placement = {}\n for key in copy.copy(kwargs):\n if key in PLACEMENT_KWARGS:\n placement[key] = kwargs.pop(key)\n placement = Placement(**placement)\n task_template_kwargs['placement'] = placement\n\n if 'log_driver' in kwargs:\n task_template_kwargs['log_driver'] = {\n 'Name': kwargs.pop('log_driver'),\n 'Options': kwargs.pop('log_driver_options', {})\n }\n\n if func_name == 'update':\n if 'force_update' in kwargs:\n task_template_kwargs['force_update'] = kwargs.pop('force_update')\n\n # fetch the current spec by default if updating the service\n # through the model\n fetch_current_spec = kwargs.pop('fetch_current_spec', True)\n create_kwargs['fetch_current_spec'] = fetch_current_spec\n\n # All kwargs should have been consumed by this point, so raise\n # error if any are left\n if kwargs:\n raise create_unexpected_kwargs_error(func_name, kwargs)\n\n container_spec = ContainerSpec(**container_spec_kwargs)\n task_template_kwargs['container_spec'] = container_spec\n create_kwargs['task_template'] = TaskTemplate(**task_template_kwargs)\n return create_kwargs\n", "path": "docker/models/services.py"}]} | 4,006 | 911 |
gh_patches_debug_26655 | rasdani/github-patches | git_diff | scikit-image__scikit-image-6989 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Existing "inpainting" gallery example could use a better (more specific) title.
Creating this issue so we don't lose track of what's been discussed in the conversation _Originally posted by @lagru in https://github.com/scikit-image/scikit-image/pull/6853#discussion_r1149741067_
> @mkcor, just wondering how this relates to [our existing inpainting example ](https://scikit-image.org/docs/dev/auto_examples/filters/plot_inpaint.html#sphx-glr-auto-examples-filters-plot-inpaint-py). I am assuming that the main benefit here is that it's a real world use case?
[...]
> Which prompts the idea that we should update the title of the existing example, so it's less generic than just "inpainting."
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `doc/examples/filters/plot_inpaint.py`
Content:
```
1 """
2 ===========
3 Inpainting
4 ===========
5 Inpainting [1]_ is the process of reconstructing lost or deteriorated
6 parts of images and videos.
7
8 The reconstruction is supposed to be performed in fully automatic way by
9 exploiting the information presented in non-damaged regions.
10
11 In this example, we show how the masked pixels get inpainted by
12 inpainting algorithm based on 'biharmonic equation'-assumption [2]_ [3]_ [4]_.
13
14 .. [1] Wikipedia. Inpainting
15 https://en.wikipedia.org/wiki/Inpainting
16 .. [2] Wikipedia. Biharmonic equation
17 https://en.wikipedia.org/wiki/Biharmonic_equation
18 .. [3] S.B.Damelin and N.S.Hoang. "On Surface Completion and Image
19 Inpainting by Biharmonic Functions: Numerical Aspects",
20 International Journal of Mathematics and Mathematical Sciences,
21 Vol. 2018, Article ID 3950312
22 :DOI:`10.1155/2018/3950312`
23 .. [4] C. K. Chui and H. N. Mhaskar, MRA Contextual-Recovery Extension of
24 Smooth Functions on Manifolds, Appl. and Comp. Harmonic Anal.,
25 28 (2010), 104-113,
26 :DOI:`10.1016/j.acha.2009.04.004`
27 """
28
29 import numpy as np
30 import matplotlib.pyplot as plt
31
32 from skimage import data
33 from skimage.morphology import disk, binary_dilation
34 from skimage.restoration import inpaint
35
36 image_orig = data.astronaut()
37
38 # Create mask with six block defect regions
39 mask = np.zeros(image_orig.shape[:-1], dtype=bool)
40 mask[20:60, 0:20] = 1
41 mask[160:180, 70:155] = 1
42 mask[30:60, 170:195] = 1
43 mask[-60:-30, 170:195] = 1
44 mask[-180:-160, 70:155] = 1
45 mask[-60:-20, 0:20] = 1
46
47 # add a few long, narrow defects
48 mask[200:205, -200:] = 1
49 mask[150:255, 20:23] = 1
50 mask[365:368, 60:130] = 1
51
52 # add randomly positioned small point-like defects
53 rstate = np.random.default_rng(0)
54 for radius in [0, 2, 4]:
55 # larger defects are less common
56 thresh = 3 + 0.25 * radius # make larger defects less common
57 tmp_mask = rstate.standard_normal(image_orig.shape[:-1]) > thresh
58 if radius > 0:
59 tmp_mask = binary_dilation(tmp_mask, disk(radius, dtype=bool))
60 mask[tmp_mask] = 1
61
62 # Apply defect mask to the image over the same region in each color channel
63 image_defect = image_orig * ~mask[..., np.newaxis]
64
65 image_result = inpaint.inpaint_biharmonic(image_defect, mask, channel_axis=-1)
66
67 fig, axes = plt.subplots(ncols=2, nrows=2)
68 ax = axes.ravel()
69
70 ax[0].set_title('Original image')
71 ax[0].imshow(image_orig)
72
73 ax[1].set_title('Mask')
74 ax[1].imshow(mask, cmap=plt.cm.gray)
75
76 ax[2].set_title('Defected image')
77 ax[2].imshow(image_defect)
78
79 ax[3].set_title('Inpainted image')
80 ax[3].imshow(image_result)
81
82 for a in ax:
83 a.axis('off')
84
85 fig.tight_layout()
86 plt.show()
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/doc/examples/filters/plot_inpaint.py b/doc/examples/filters/plot_inpaint.py
--- a/doc/examples/filters/plot_inpaint.py
+++ b/doc/examples/filters/plot_inpaint.py
@@ -1,15 +1,16 @@
"""
-===========
-Inpainting
-===========
+===============================
+Fill in defects with inpainting
+===============================
+
Inpainting [1]_ is the process of reconstructing lost or deteriorated
parts of images and videos.
-The reconstruction is supposed to be performed in fully automatic way by
-exploiting the information presented in non-damaged regions.
+The reconstruction (restoration) is performed in an automatic way by
+exploiting the information present in non-damaged regions.
-In this example, we show how the masked pixels get inpainted by
-inpainting algorithm based on 'biharmonic equation'-assumption [2]_ [3]_ [4]_.
+In this example, we show how the masked pixels get inpainted using an
+inpainting algorithm based on the biharmonic equation [2]_ [3]_ [4]_.
.. [1] Wikipedia. Inpainting
https://en.wikipedia.org/wiki/Inpainting
@@ -44,12 +45,12 @@
mask[-180:-160, 70:155] = 1
mask[-60:-20, 0:20] = 1
-# add a few long, narrow defects
+# Add a few long, narrow defects
mask[200:205, -200:] = 1
mask[150:255, 20:23] = 1
mask[365:368, 60:130] = 1
-# add randomly positioned small point-like defects
+# Add randomly positioned small point-like defects
rstate = np.random.default_rng(0)
for radius in [0, 2, 4]:
# larger defects are less common
| {"golden_diff": "diff --git a/doc/examples/filters/plot_inpaint.py b/doc/examples/filters/plot_inpaint.py\n--- a/doc/examples/filters/plot_inpaint.py\n+++ b/doc/examples/filters/plot_inpaint.py\n@@ -1,15 +1,16 @@\n \"\"\"\n-===========\n-Inpainting\n-===========\n+===============================\n+Fill in defects with inpainting\n+===============================\n+\n Inpainting [1]_ is the process of reconstructing lost or deteriorated\n parts of images and videos.\n \n-The reconstruction is supposed to be performed in fully automatic way by\n-exploiting the information presented in non-damaged regions.\n+The reconstruction (restoration) is performed in an automatic way by\n+exploiting the information present in non-damaged regions.\n \n-In this example, we show how the masked pixels get inpainted by\n-inpainting algorithm based on 'biharmonic equation'-assumption [2]_ [3]_ [4]_.\n+In this example, we show how the masked pixels get inpainted using an\n+inpainting algorithm based on the biharmonic equation [2]_ [3]_ [4]_.\n \n .. [1] Wikipedia. Inpainting\n https://en.wikipedia.org/wiki/Inpainting\n@@ -44,12 +45,12 @@\n mask[-180:-160, 70:155] = 1\n mask[-60:-20, 0:20] = 1\n \n-# add a few long, narrow defects\n+# Add a few long, narrow defects\n mask[200:205, -200:] = 1\n mask[150:255, 20:23] = 1\n mask[365:368, 60:130] = 1\n \n-# add randomly positioned small point-like defects\n+# Add randomly positioned small point-like defects\n rstate = np.random.default_rng(0)\n for radius in [0, 2, 4]:\n # larger defects are less common\n", "issue": "Existing \"inpainting\" gallery example could use a better (more specific) title.\nCreating this issue so we don't lose track of what's been discussed in the conversation _Originally posted by @lagru in https://github.com/scikit-image/scikit-image/pull/6853#discussion_r1149741067_\r\n\r\n> @mkcor, just wondering how this relates to [our existing inpainting example ](https://scikit-image.org/docs/dev/auto_examples/filters/plot_inpaint.html#sphx-glr-auto-examples-filters-plot-inpaint-py). I am assuming that the main benefit here is that it's a real world use case?\r\n\r\n[...]\r\n\r\n> Which prompts the idea that we should update the title of the existing example, so it's less generic than just \"inpainting.\"\n", "before_files": [{"content": "\"\"\"\n===========\nInpainting\n===========\nInpainting [1]_ is the process of reconstructing lost or deteriorated\nparts of images and videos.\n\nThe reconstruction is supposed to be performed in fully automatic way by\nexploiting the information presented in non-damaged regions.\n\nIn this example, we show how the masked pixels get inpainted by\ninpainting algorithm based on 'biharmonic equation'-assumption [2]_ [3]_ [4]_.\n\n.. [1] Wikipedia. Inpainting\n https://en.wikipedia.org/wiki/Inpainting\n.. [2] Wikipedia. Biharmonic equation\n https://en.wikipedia.org/wiki/Biharmonic_equation\n.. [3] S.B.Damelin and N.S.Hoang. \"On Surface Completion and Image\n Inpainting by Biharmonic Functions: Numerical Aspects\",\n International Journal of Mathematics and Mathematical Sciences,\n Vol. 2018, Article ID 3950312\n :DOI:`10.1155/2018/3950312`\n.. [4] C. K. Chui and H. N. Mhaskar, MRA Contextual-Recovery Extension of\n Smooth Functions on Manifolds, Appl. and Comp. Harmonic Anal.,\n 28 (2010), 104-113,\n :DOI:`10.1016/j.acha.2009.04.004`\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom skimage import data\nfrom skimage.morphology import disk, binary_dilation\nfrom skimage.restoration import inpaint\n\nimage_orig = data.astronaut()\n\n# Create mask with six block defect regions\nmask = np.zeros(image_orig.shape[:-1], dtype=bool)\nmask[20:60, 0:20] = 1\nmask[160:180, 70:155] = 1\nmask[30:60, 170:195] = 1\nmask[-60:-30, 170:195] = 1\nmask[-180:-160, 70:155] = 1\nmask[-60:-20, 0:20] = 1\n\n# add a few long, narrow defects\nmask[200:205, -200:] = 1\nmask[150:255, 20:23] = 1\nmask[365:368, 60:130] = 1\n\n# add randomly positioned small point-like defects\nrstate = np.random.default_rng(0)\nfor radius in [0, 2, 4]:\n # larger defects are less common\n thresh = 3 + 0.25 * radius # make larger defects less common\n tmp_mask = rstate.standard_normal(image_orig.shape[:-1]) > thresh\n if radius > 0:\n tmp_mask = binary_dilation(tmp_mask, disk(radius, dtype=bool))\n mask[tmp_mask] = 1\n\n# Apply defect mask to the image over the same region in each color channel\nimage_defect = image_orig * ~mask[..., np.newaxis]\n\nimage_result = inpaint.inpaint_biharmonic(image_defect, mask, channel_axis=-1)\n\nfig, axes = plt.subplots(ncols=2, nrows=2)\nax = axes.ravel()\n\nax[0].set_title('Original image')\nax[0].imshow(image_orig)\n\nax[1].set_title('Mask')\nax[1].imshow(mask, cmap=plt.cm.gray)\n\nax[2].set_title('Defected image')\nax[2].imshow(image_defect)\n\nax[3].set_title('Inpainted image')\nax[3].imshow(image_result)\n\nfor a in ax:\n a.axis('off')\n\nfig.tight_layout()\nplt.show()\n", "path": "doc/examples/filters/plot_inpaint.py"}], "after_files": [{"content": "\"\"\"\n===============================\nFill in defects with inpainting\n===============================\n\nInpainting [1]_ is the process of reconstructing lost or deteriorated\nparts of images and videos.\n\nThe reconstruction (restoration) is performed in an automatic way by\nexploiting the information present in non-damaged regions.\n\nIn this example, we show how the masked pixels get inpainted using an\ninpainting algorithm based on the biharmonic equation [2]_ [3]_ [4]_.\n\n.. [1] Wikipedia. Inpainting\n https://en.wikipedia.org/wiki/Inpainting\n.. [2] Wikipedia. Biharmonic equation\n https://en.wikipedia.org/wiki/Biharmonic_equation\n.. [3] S.B.Damelin and N.S.Hoang. \"On Surface Completion and Image\n Inpainting by Biharmonic Functions: Numerical Aspects\",\n International Journal of Mathematics and Mathematical Sciences,\n Vol. 2018, Article ID 3950312\n :DOI:`10.1155/2018/3950312`\n.. [4] C. K. Chui and H. N. Mhaskar, MRA Contextual-Recovery Extension of\n Smooth Functions on Manifolds, Appl. and Comp. Harmonic Anal.,\n 28 (2010), 104-113,\n :DOI:`10.1016/j.acha.2009.04.004`\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom skimage import data\nfrom skimage.morphology import disk, binary_dilation\nfrom skimage.restoration import inpaint\n\nimage_orig = data.astronaut()\n\n# Create mask with six block defect regions\nmask = np.zeros(image_orig.shape[:-1], dtype=bool)\nmask[20:60, 0:20] = 1\nmask[160:180, 70:155] = 1\nmask[30:60, 170:195] = 1\nmask[-60:-30, 170:195] = 1\nmask[-180:-160, 70:155] = 1\nmask[-60:-20, 0:20] = 1\n\n# Add a few long, narrow defects\nmask[200:205, -200:] = 1\nmask[150:255, 20:23] = 1\nmask[365:368, 60:130] = 1\n\n# Add randomly positioned small point-like defects\nrstate = np.random.default_rng(0)\nfor radius in [0, 2, 4]:\n # larger defects are less common\n thresh = 3 + 0.25 * radius # make larger defects less common\n tmp_mask = rstate.standard_normal(image_orig.shape[:-1]) > thresh\n if radius > 0:\n tmp_mask = binary_dilation(tmp_mask, disk(radius, dtype=bool))\n mask[tmp_mask] = 1\n\n# Apply defect mask to the image over the same region in each color channel\nimage_defect = image_orig * ~mask[..., np.newaxis]\n\nimage_result = inpaint.inpaint_biharmonic(image_defect, mask, channel_axis=-1)\n\nfig, axes = plt.subplots(ncols=2, nrows=2)\nax = axes.ravel()\n\nax[0].set_title('Original image')\nax[0].imshow(image_orig)\n\nax[1].set_title('Mask')\nax[1].imshow(mask, cmap=plt.cm.gray)\n\nax[2].set_title('Defected image')\nax[2].imshow(image_defect)\n\nax[3].set_title('Inpainted image')\nax[3].imshow(image_result)\n\nfor a in ax:\n a.axis('off')\n\nfig.tight_layout()\nplt.show()\n", "path": "doc/examples/filters/plot_inpaint.py"}]} | 1,489 | 454 |
gh_patches_debug_21673 | rasdani/github-patches | git_diff | ivy-llc__ivy-13280 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
unwrap
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/numpy/mathematical_functions/other_special_functions.py`
Content:
```
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/frontends/numpy/mathematical_functions/other_special_functions.py b/ivy/functional/frontends/numpy/mathematical_functions/other_special_functions.py
--- a/ivy/functional/frontends/numpy/mathematical_functions/other_special_functions.py
+++ b/ivy/functional/frontends/numpy/mathematical_functions/other_special_functions.py
@@ -0,0 +1,48 @@
+# global
+import ivy
+
+# local
+from ivy.functional.frontends.numpy.func_wrapper import (
+ to_ivy_arrays_and_back,
+ handle_numpy_dtype,
+ from_zero_dim_arrays_to_scalar,
+ handle_numpy_out,
+)
+
+
+
+@handle_numpy_out
+@handle_numpy_dtype
+@to_ivy_arrays_and_back
+@from_zero_dim_arrays_to_scalar
+def unwrap(p, discont=None, axis=-1, *, period=2*pi):
+ p = ivy.Array.asarray(p)
+ nd = p.ndim
+ dd = ivy.diff(p, axis=axis)
+ if discont is None:
+ discont = period/2
+ slice1 = [ivy.slice(None, None)]*nd # full slices
+ slice1[axis] = ivy.slice(1, None)
+ slice1 = ivy.tuple(slice1)
+ dtype = ivy.result_type(dd, period)
+ if ivy.issubdtype(dtype, ivy.integer):
+ interval_high, rem = ivy.divmod(period, 2)
+ boundary_ambiguous = rem == 0
+ else:
+ interval_high = period / 2
+ boundary_ambiguous = True
+ interval_low = -interval_high
+ ddmod = ivy.mod(dd - interval_low, period) + interval_low
+ if boundary_ambiguous:
+ ivy.copyto(ddmod, interval_high,
+ where=(ddmod == interval_low) & (dd > 0))
+ ph_correct = ddmod - dd
+ ivy.copyto(ph_correct, 0, where=ivy.abs(dd) < discont)
+ up = ivy.array(p, copy=True, dtype=dtype)
+ up[slice1] = p[slice1] + ph_correct.cumsum(axis)
+ return up
+
+my_list = [24,8,3,4,34,8]
+ans = unwrap(my_list)
+print("After the np.unwrap()")
+print(ans)
\ No newline at end of file
| {"golden_diff": "diff --git a/ivy/functional/frontends/numpy/mathematical_functions/other_special_functions.py b/ivy/functional/frontends/numpy/mathematical_functions/other_special_functions.py\n--- a/ivy/functional/frontends/numpy/mathematical_functions/other_special_functions.py\n+++ b/ivy/functional/frontends/numpy/mathematical_functions/other_special_functions.py\n@@ -0,0 +1,48 @@\n+# global\n+import ivy\n+\n+# local\n+from ivy.functional.frontends.numpy.func_wrapper import (\n+ to_ivy_arrays_and_back,\n+ handle_numpy_dtype,\n+ from_zero_dim_arrays_to_scalar,\n+ handle_numpy_out,\n+)\n+\n+\n+\n+@handle_numpy_out\n+@handle_numpy_dtype\n+@to_ivy_arrays_and_back\n+@from_zero_dim_arrays_to_scalar\n+def unwrap(p, discont=None, axis=-1, *, period=2*pi):\n+ p = ivy.Array.asarray(p)\n+ nd = p.ndim\n+ dd = ivy.diff(p, axis=axis)\n+ if discont is None:\n+ discont = period/2\n+ slice1 = [ivy.slice(None, None)]*nd # full slices\n+ slice1[axis] = ivy.slice(1, None)\n+ slice1 = ivy.tuple(slice1)\n+ dtype = ivy.result_type(dd, period)\n+ if ivy.issubdtype(dtype, ivy.integer):\n+ interval_high, rem = ivy.divmod(period, 2)\n+ boundary_ambiguous = rem == 0\n+ else:\n+ interval_high = period / 2\n+ boundary_ambiguous = True\n+ interval_low = -interval_high\n+ ddmod = ivy.mod(dd - interval_low, period) + interval_low\n+ if boundary_ambiguous:\n+ ivy.copyto(ddmod, interval_high,\n+ where=(ddmod == interval_low) & (dd > 0))\n+ ph_correct = ddmod - dd\n+ ivy.copyto(ph_correct, 0, where=ivy.abs(dd) < discont)\n+ up = ivy.array(p, copy=True, dtype=dtype)\n+ up[slice1] = p[slice1] + ph_correct.cumsum(axis)\n+ return up\n+\n+my_list = [24,8,3,4,34,8]\n+ans = unwrap(my_list)\n+print(\"After the np.unwrap()\")\n+print(ans)\n\\ No newline at end of file\n", "issue": "unwrap\n\n", "before_files": [{"content": "", "path": "ivy/functional/frontends/numpy/mathematical_functions/other_special_functions.py"}], "after_files": [{"content": "# global\nimport ivy\n\n# local\nfrom ivy.functional.frontends.numpy.func_wrapper import (\n to_ivy_arrays_and_back,\n handle_numpy_dtype,\n from_zero_dim_arrays_to_scalar,\n handle_numpy_out,\n)\n\n\n\n@handle_numpy_out\n@handle_numpy_dtype\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef unwrap(p, discont=None, axis=-1, *, period=2*pi):\n p = ivy.Array.asarray(p)\n nd = p.ndim\n dd = ivy.diff(p, axis=axis)\n if discont is None:\n discont = period/2\n slice1 = [ivy.slice(None, None)]*nd # full slices\n slice1[axis] = ivy.slice(1, None)\n slice1 = ivy.tuple(slice1)\n dtype = ivy.result_type(dd, period)\n if ivy.issubdtype(dtype, ivy.integer):\n interval_high, rem = ivy.divmod(period, 2)\n boundary_ambiguous = rem == 0\n else:\n interval_high = period / 2\n boundary_ambiguous = True\n interval_low = -interval_high\n ddmod = ivy.mod(dd - interval_low, period) + interval_low\n if boundary_ambiguous:\n ivy.copyto(ddmod, interval_high,\n where=(ddmod == interval_low) & (dd > 0))\n ph_correct = ddmod - dd\n ivy.copyto(ph_correct, 0, where=ivy.abs(dd) < discont)\n up = ivy.array(p, copy=True, dtype=dtype)\n up[slice1] = p[slice1] + ph_correct.cumsum(axis)\n return up\n\nmy_list = [24,8,3,4,34,8]\nans = unwrap(my_list)\nprint(\"After the np.unwrap()\")\nprint(ans)", "path": "ivy/functional/frontends/numpy/mathematical_functions/other_special_functions.py"}]} | 271 | 552 |
gh_patches_debug_5853 | rasdani/github-patches | git_diff | vllm-project__vllm-147 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Write README
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import io
2 import os
3 import re
4 import subprocess
5 from typing import List, Set
6
7 from packaging.version import parse, Version
8 import setuptools
9 import torch
10 from torch.utils.cpp_extension import BuildExtension, CUDAExtension, CUDA_HOME
11
12 ROOT_DIR = os.path.dirname(__file__)
13
14 # Compiler flags.
15 CXX_FLAGS = ["-g", "-O2", "-std=c++17"]
16 # TODO(woosuk): Should we use -O3?
17 NVCC_FLAGS = ["-O2", "-std=c++17"]
18
19 ABI = 1 if torch._C._GLIBCXX_USE_CXX11_ABI else 0
20 CXX_FLAGS += [f"-D_GLIBCXX_USE_CXX11_ABI={ABI}"]
21 NVCC_FLAGS += [f"-D_GLIBCXX_USE_CXX11_ABI={ABI}"]
22
23 if not torch.cuda.is_available():
24 raise RuntimeError(
25 f"Cannot find CUDA at CUDA_HOME: {CUDA_HOME}. "
26 "CUDA must be available in order to build the package.")
27
28
29 def get_nvcc_cuda_version(cuda_dir: str) -> Version:
30 """Get the CUDA version from nvcc.
31
32 Adapted from https://github.com/NVIDIA/apex/blob/8b7a1ff183741dd8f9b87e7bafd04cfde99cea28/setup.py
33 """
34 nvcc_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"],
35 universal_newlines=True)
36 output = nvcc_output.split()
37 release_idx = output.index("release") + 1
38 nvcc_cuda_version = parse(output[release_idx].split(",")[0])
39 return nvcc_cuda_version
40
41
42 # Collect the compute capabilities of all available GPUs.
43 device_count = torch.cuda.device_count()
44 compute_capabilities: Set[int] = set()
45 for i in range(device_count):
46 major, minor = torch.cuda.get_device_capability(i)
47 if major < 7:
48 raise RuntimeError(
49 "GPUs with compute capability less than 7.0 are not supported.")
50 compute_capabilities.add(major * 10 + minor)
51 # If no GPU is available, add all supported compute capabilities.
52 if not compute_capabilities:
53 compute_capabilities = {70, 75, 80, 86, 90}
54 # Add target compute capabilities to NVCC flags.
55 for capability in compute_capabilities:
56 NVCC_FLAGS += ["-gencode", f"arch=compute_{capability},code=sm_{capability}"]
57
58 # Validate the NVCC CUDA version.
59 nvcc_cuda_version = get_nvcc_cuda_version(CUDA_HOME)
60 if nvcc_cuda_version < Version("11.0"):
61 raise RuntimeError("CUDA 11.0 or higher is required to build the package.")
62 if 86 in compute_capabilities and nvcc_cuda_version < Version("11.1"):
63 raise RuntimeError(
64 "CUDA 11.1 or higher is required for GPUs with compute capability 8.6.")
65 if 90 in compute_capabilities and nvcc_cuda_version < Version("11.8"):
66 raise RuntimeError(
67 "CUDA 11.8 or higher is required for GPUs with compute capability 9.0.")
68
69 # Use NVCC threads to parallelize the build.
70 if nvcc_cuda_version >= Version("11.2"):
71 num_threads = min(os.cpu_count(), 8)
72 NVCC_FLAGS += ["--threads", str(num_threads)]
73
74 ext_modules = []
75
76 # Cache operations.
77 cache_extension = CUDAExtension(
78 name="vllm.cache_ops",
79 sources=["csrc/cache.cpp", "csrc/cache_kernels.cu"],
80 extra_compile_args={"cxx": CXX_FLAGS, "nvcc": NVCC_FLAGS},
81 )
82 ext_modules.append(cache_extension)
83
84 # Attention kernels.
85 attention_extension = CUDAExtension(
86 name="vllm.attention_ops",
87 sources=["csrc/attention.cpp", "csrc/attention/attention_kernels.cu"],
88 extra_compile_args={"cxx": CXX_FLAGS, "nvcc": NVCC_FLAGS},
89 )
90 ext_modules.append(attention_extension)
91
92 # Positional encoding kernels.
93 positional_encoding_extension = CUDAExtension(
94 name="vllm.pos_encoding_ops",
95 sources=["csrc/pos_encoding.cpp", "csrc/pos_encoding_kernels.cu"],
96 extra_compile_args={"cxx": CXX_FLAGS, "nvcc": NVCC_FLAGS},
97 )
98 ext_modules.append(positional_encoding_extension)
99
100 # Layer normalization kernels.
101 layernorm_extension = CUDAExtension(
102 name="vllm.layernorm_ops",
103 sources=["csrc/layernorm.cpp", "csrc/layernorm_kernels.cu"],
104 extra_compile_args={"cxx": CXX_FLAGS, "nvcc": NVCC_FLAGS},
105 )
106 ext_modules.append(layernorm_extension)
107
108 # Activation kernels.
109 activation_extension = CUDAExtension(
110 name="vllm.activation_ops",
111 sources=["csrc/activation.cpp", "csrc/activation_kernels.cu"],
112 extra_compile_args={"cxx": CXX_FLAGS, "nvcc": NVCC_FLAGS},
113 )
114 ext_modules.append(activation_extension)
115
116
117 def get_path(*filepath) -> str:
118 return os.path.join(ROOT_DIR, *filepath)
119
120
121 def find_version(filepath: str):
122 """Extract version information from the given filepath.
123
124 Adapted from https://github.com/ray-project/ray/blob/0b190ee1160eeca9796bc091e07eaebf4c85b511/python/setup.py
125 """
126 with open(filepath) as fp:
127 version_match = re.search(
128 r"^__version__ = ['\"]([^'\"]*)['\"]", fp.read(), re.M)
129 if version_match:
130 return version_match.group(1)
131 raise RuntimeError("Unable to find version string.")
132
133
134 def read_readme() -> str:
135 """Read the README file."""
136 return io.open(get_path("README.md"), "r", encoding="utf-8").read()
137
138
139 def get_requirements() -> List[str]:
140 """Get Python package dependencies from requirements.txt."""
141 with open(get_path("requirements.txt")) as f:
142 requirements = f.read().strip().split("\n")
143 return requirements
144
145
146 setuptools.setup(
147 name="vllm",
148 version=find_version(get_path("vllm", "__init__.py")),
149 author="vLLM Team",
150 author_email="[email protected]", # FIXME
151 license="Apache 2.0",
152 description="vLLM: Easy, Fast, and Cheap LLM Serving with PagedAttention", # FIXME
153 long_description=read_readme(),
154 long_description_content_type="text/markdown",
155 url="https://github.com/WoosukKwon/vllm",
156 project_urls={
157 "Homepage": "https://github.com/WoosukKwon/vllm",
158 "Documentation": "https://vllm.readthedocs.io/en/latest/", # FIXME
159 },
160 classifiers=[
161 "Programming Language :: Python :: 3.8",
162 "Programming Language :: Python :: 3.9",
163 "Programming Language :: Python :: 3.10",
164 "License :: OSI Approved :: Apache Software License",
165 "Topic :: Scientific/Engineering :: Artificial Intelligence",
166 ],
167 packages=setuptools.find_packages(
168 exclude=("benchmarks", "csrc", "docs", "examples", "tests")),
169 python_requires=">=3.8",
170 install_requires=get_requirements(),
171 ext_modules=ext_modules,
172 cmdclass={"build_ext": BuildExtension},
173 )
174
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -165,7 +165,7 @@
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
packages=setuptools.find_packages(
- exclude=("benchmarks", "csrc", "docs", "examples", "tests")),
+ exclude=("assets", "benchmarks", "csrc", "docs", "examples", "tests")),
python_requires=">=3.8",
install_requires=get_requirements(),
ext_modules=ext_modules,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -165,7 +165,7 @@\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n packages=setuptools.find_packages(\n- exclude=(\"benchmarks\", \"csrc\", \"docs\", \"examples\", \"tests\")),\n+ exclude=(\"assets\", \"benchmarks\", \"csrc\", \"docs\", \"examples\", \"tests\")),\n python_requires=\">=3.8\",\n install_requires=get_requirements(),\n ext_modules=ext_modules,\n", "issue": "Write README\n\n", "before_files": [{"content": "import io\nimport os\nimport re\nimport subprocess\nfrom typing import List, Set\n\nfrom packaging.version import parse, Version\nimport setuptools\nimport torch\nfrom torch.utils.cpp_extension import BuildExtension, CUDAExtension, CUDA_HOME\n\nROOT_DIR = os.path.dirname(__file__)\n\n# Compiler flags.\nCXX_FLAGS = [\"-g\", \"-O2\", \"-std=c++17\"]\n# TODO(woosuk): Should we use -O3?\nNVCC_FLAGS = [\"-O2\", \"-std=c++17\"]\n\nABI = 1 if torch._C._GLIBCXX_USE_CXX11_ABI else 0\nCXX_FLAGS += [f\"-D_GLIBCXX_USE_CXX11_ABI={ABI}\"]\nNVCC_FLAGS += [f\"-D_GLIBCXX_USE_CXX11_ABI={ABI}\"]\n\nif not torch.cuda.is_available():\n raise RuntimeError(\n f\"Cannot find CUDA at CUDA_HOME: {CUDA_HOME}. \"\n \"CUDA must be available in order to build the package.\")\n\n\ndef get_nvcc_cuda_version(cuda_dir: str) -> Version:\n \"\"\"Get the CUDA version from nvcc.\n\n Adapted from https://github.com/NVIDIA/apex/blob/8b7a1ff183741dd8f9b87e7bafd04cfde99cea28/setup.py\n \"\"\"\n nvcc_output = subprocess.check_output([cuda_dir + \"/bin/nvcc\", \"-V\"],\n universal_newlines=True)\n output = nvcc_output.split()\n release_idx = output.index(\"release\") + 1\n nvcc_cuda_version = parse(output[release_idx].split(\",\")[0])\n return nvcc_cuda_version\n\n\n# Collect the compute capabilities of all available GPUs.\ndevice_count = torch.cuda.device_count()\ncompute_capabilities: Set[int] = set()\nfor i in range(device_count):\n major, minor = torch.cuda.get_device_capability(i)\n if major < 7:\n raise RuntimeError(\n \"GPUs with compute capability less than 7.0 are not supported.\")\n compute_capabilities.add(major * 10 + minor)\n# If no GPU is available, add all supported compute capabilities.\nif not compute_capabilities:\n compute_capabilities = {70, 75, 80, 86, 90}\n# Add target compute capabilities to NVCC flags.\nfor capability in compute_capabilities:\n NVCC_FLAGS += [\"-gencode\", f\"arch=compute_{capability},code=sm_{capability}\"]\n\n# Validate the NVCC CUDA version.\nnvcc_cuda_version = get_nvcc_cuda_version(CUDA_HOME)\nif nvcc_cuda_version < Version(\"11.0\"):\n raise RuntimeError(\"CUDA 11.0 or higher is required to build the package.\")\nif 86 in compute_capabilities and nvcc_cuda_version < Version(\"11.1\"):\n raise RuntimeError(\n \"CUDA 11.1 or higher is required for GPUs with compute capability 8.6.\")\nif 90 in compute_capabilities and nvcc_cuda_version < Version(\"11.8\"):\n raise RuntimeError(\n \"CUDA 11.8 or higher is required for GPUs with compute capability 9.0.\")\n\n# Use NVCC threads to parallelize the build.\nif nvcc_cuda_version >= Version(\"11.2\"):\n num_threads = min(os.cpu_count(), 8)\n NVCC_FLAGS += [\"--threads\", str(num_threads)]\n\next_modules = []\n\n# Cache operations.\ncache_extension = CUDAExtension(\n name=\"vllm.cache_ops\",\n sources=[\"csrc/cache.cpp\", \"csrc/cache_kernels.cu\"],\n extra_compile_args={\"cxx\": CXX_FLAGS, \"nvcc\": NVCC_FLAGS},\n)\next_modules.append(cache_extension)\n\n# Attention kernels.\nattention_extension = CUDAExtension(\n name=\"vllm.attention_ops\",\n sources=[\"csrc/attention.cpp\", \"csrc/attention/attention_kernels.cu\"],\n extra_compile_args={\"cxx\": CXX_FLAGS, \"nvcc\": NVCC_FLAGS},\n)\next_modules.append(attention_extension)\n\n# Positional encoding kernels.\npositional_encoding_extension = CUDAExtension(\n name=\"vllm.pos_encoding_ops\",\n sources=[\"csrc/pos_encoding.cpp\", \"csrc/pos_encoding_kernels.cu\"],\n extra_compile_args={\"cxx\": CXX_FLAGS, \"nvcc\": NVCC_FLAGS},\n)\next_modules.append(positional_encoding_extension)\n\n# Layer normalization kernels.\nlayernorm_extension = CUDAExtension(\n name=\"vllm.layernorm_ops\",\n sources=[\"csrc/layernorm.cpp\", \"csrc/layernorm_kernels.cu\"],\n extra_compile_args={\"cxx\": CXX_FLAGS, \"nvcc\": NVCC_FLAGS},\n)\next_modules.append(layernorm_extension)\n\n# Activation kernels.\nactivation_extension = CUDAExtension(\n name=\"vllm.activation_ops\",\n sources=[\"csrc/activation.cpp\", \"csrc/activation_kernels.cu\"],\n extra_compile_args={\"cxx\": CXX_FLAGS, \"nvcc\": NVCC_FLAGS},\n)\next_modules.append(activation_extension)\n\n\ndef get_path(*filepath) -> str:\n return os.path.join(ROOT_DIR, *filepath)\n\n\ndef find_version(filepath: str):\n \"\"\"Extract version information from the given filepath.\n\n Adapted from https://github.com/ray-project/ray/blob/0b190ee1160eeca9796bc091e07eaebf4c85b511/python/setup.py\n \"\"\"\n with open(filepath) as fp:\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", fp.read(), re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\ndef read_readme() -> str:\n \"\"\"Read the README file.\"\"\"\n return io.open(get_path(\"README.md\"), \"r\", encoding=\"utf-8\").read()\n\n\ndef get_requirements() -> List[str]:\n \"\"\"Get Python package dependencies from requirements.txt.\"\"\"\n with open(get_path(\"requirements.txt\")) as f:\n requirements = f.read().strip().split(\"\\n\")\n return requirements\n\n\nsetuptools.setup(\n name=\"vllm\",\n version=find_version(get_path(\"vllm\", \"__init__.py\")),\n author=\"vLLM Team\",\n author_email=\"[email protected]\", # FIXME\n license=\"Apache 2.0\",\n description=\"vLLM: Easy, Fast, and Cheap LLM Serving with PagedAttention\", # FIXME\n long_description=read_readme(),\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/WoosukKwon/vllm\",\n project_urls={\n \"Homepage\": \"https://github.com/WoosukKwon/vllm\",\n \"Documentation\": \"https://vllm.readthedocs.io/en/latest/\", # FIXME\n },\n classifiers=[\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n packages=setuptools.find_packages(\n exclude=(\"benchmarks\", \"csrc\", \"docs\", \"examples\", \"tests\")),\n python_requires=\">=3.8\",\n install_requires=get_requirements(),\n ext_modules=ext_modules,\n cmdclass={\"build_ext\": BuildExtension},\n)\n", "path": "setup.py"}], "after_files": [{"content": "import io\nimport os\nimport re\nimport subprocess\nfrom typing import List, Set\n\nfrom packaging.version import parse, Version\nimport setuptools\nimport torch\nfrom torch.utils.cpp_extension import BuildExtension, CUDAExtension, CUDA_HOME\n\nROOT_DIR = os.path.dirname(__file__)\n\n# Compiler flags.\nCXX_FLAGS = [\"-g\", \"-O2\", \"-std=c++17\"]\n# TODO(woosuk): Should we use -O3?\nNVCC_FLAGS = [\"-O2\", \"-std=c++17\"]\n\nABI = 1 if torch._C._GLIBCXX_USE_CXX11_ABI else 0\nCXX_FLAGS += [f\"-D_GLIBCXX_USE_CXX11_ABI={ABI}\"]\nNVCC_FLAGS += [f\"-D_GLIBCXX_USE_CXX11_ABI={ABI}\"]\n\nif not torch.cuda.is_available():\n raise RuntimeError(\n f\"Cannot find CUDA at CUDA_HOME: {CUDA_HOME}. \"\n \"CUDA must be available in order to build the package.\")\n\n\ndef get_nvcc_cuda_version(cuda_dir: str) -> Version:\n \"\"\"Get the CUDA version from nvcc.\n\n Adapted from https://github.com/NVIDIA/apex/blob/8b7a1ff183741dd8f9b87e7bafd04cfde99cea28/setup.py\n \"\"\"\n nvcc_output = subprocess.check_output([cuda_dir + \"/bin/nvcc\", \"-V\"],\n universal_newlines=True)\n output = nvcc_output.split()\n release_idx = output.index(\"release\") + 1\n nvcc_cuda_version = parse(output[release_idx].split(\",\")[0])\n return nvcc_cuda_version\n\n\n# Collect the compute capabilities of all available GPUs.\ndevice_count = torch.cuda.device_count()\ncompute_capabilities: Set[int] = set()\nfor i in range(device_count):\n major, minor = torch.cuda.get_device_capability(i)\n if major < 7:\n raise RuntimeError(\n \"GPUs with compute capability less than 7.0 are not supported.\")\n compute_capabilities.add(major * 10 + minor)\n# If no GPU is available, add all supported compute capabilities.\nif not compute_capabilities:\n compute_capabilities = {70, 75, 80, 86, 90}\n# Add target compute capabilities to NVCC flags.\nfor capability in compute_capabilities:\n NVCC_FLAGS += [\"-gencode\", f\"arch=compute_{capability},code=sm_{capability}\"]\n\n# Validate the NVCC CUDA version.\nnvcc_cuda_version = get_nvcc_cuda_version(CUDA_HOME)\nif nvcc_cuda_version < Version(\"11.0\"):\n raise RuntimeError(\"CUDA 11.0 or higher is required to build the package.\")\nif 86 in compute_capabilities and nvcc_cuda_version < Version(\"11.1\"):\n raise RuntimeError(\n \"CUDA 11.1 or higher is required for GPUs with compute capability 8.6.\")\nif 90 in compute_capabilities and nvcc_cuda_version < Version(\"11.8\"):\n raise RuntimeError(\n \"CUDA 11.8 or higher is required for GPUs with compute capability 9.0.\")\n\n# Use NVCC threads to parallelize the build.\nif nvcc_cuda_version >= Version(\"11.2\"):\n num_threads = min(os.cpu_count(), 8)\n NVCC_FLAGS += [\"--threads\", str(num_threads)]\n\next_modules = []\n\n# Cache operations.\ncache_extension = CUDAExtension(\n name=\"vllm.cache_ops\",\n sources=[\"csrc/cache.cpp\", \"csrc/cache_kernels.cu\"],\n extra_compile_args={\"cxx\": CXX_FLAGS, \"nvcc\": NVCC_FLAGS},\n)\next_modules.append(cache_extension)\n\n# Attention kernels.\nattention_extension = CUDAExtension(\n name=\"vllm.attention_ops\",\n sources=[\"csrc/attention.cpp\", \"csrc/attention/attention_kernels.cu\"],\n extra_compile_args={\"cxx\": CXX_FLAGS, \"nvcc\": NVCC_FLAGS},\n)\next_modules.append(attention_extension)\n\n# Positional encoding kernels.\npositional_encoding_extension = CUDAExtension(\n name=\"vllm.pos_encoding_ops\",\n sources=[\"csrc/pos_encoding.cpp\", \"csrc/pos_encoding_kernels.cu\"],\n extra_compile_args={\"cxx\": CXX_FLAGS, \"nvcc\": NVCC_FLAGS},\n)\next_modules.append(positional_encoding_extension)\n\n# Layer normalization kernels.\nlayernorm_extension = CUDAExtension(\n name=\"vllm.layernorm_ops\",\n sources=[\"csrc/layernorm.cpp\", \"csrc/layernorm_kernels.cu\"],\n extra_compile_args={\"cxx\": CXX_FLAGS, \"nvcc\": NVCC_FLAGS},\n)\next_modules.append(layernorm_extension)\n\n# Activation kernels.\nactivation_extension = CUDAExtension(\n name=\"vllm.activation_ops\",\n sources=[\"csrc/activation.cpp\", \"csrc/activation_kernels.cu\"],\n extra_compile_args={\"cxx\": CXX_FLAGS, \"nvcc\": NVCC_FLAGS},\n)\next_modules.append(activation_extension)\n\n\ndef get_path(*filepath) -> str:\n return os.path.join(ROOT_DIR, *filepath)\n\n\ndef find_version(filepath: str):\n \"\"\"Extract version information from the given filepath.\n\n Adapted from https://github.com/ray-project/ray/blob/0b190ee1160eeca9796bc091e07eaebf4c85b511/python/setup.py\n \"\"\"\n with open(filepath) as fp:\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", fp.read(), re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\ndef read_readme() -> str:\n \"\"\"Read the README file.\"\"\"\n return io.open(get_path(\"README.md\"), \"r\", encoding=\"utf-8\").read()\n\n\ndef get_requirements() -> List[str]:\n \"\"\"Get Python package dependencies from requirements.txt.\"\"\"\n with open(get_path(\"requirements.txt\")) as f:\n requirements = f.read().strip().split(\"\\n\")\n return requirements\n\n\nsetuptools.setup(\n name=\"vllm\",\n version=find_version(get_path(\"vllm\", \"__init__.py\")),\n author=\"vLLM Team\",\n author_email=\"[email protected]\", # FIXME\n license=\"Apache 2.0\",\n description=\"vLLM: Easy, Fast, and Cheap LLM Serving with PagedAttention\", # FIXME\n long_description=read_readme(),\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/WoosukKwon/vllm\",\n project_urls={\n \"Homepage\": \"https://github.com/WoosukKwon/vllm\",\n \"Documentation\": \"https://vllm.readthedocs.io/en/latest/\", # FIXME\n },\n classifiers=[\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n packages=setuptools.find_packages(\n exclude=(\"assets\", \"benchmarks\", \"csrc\", \"docs\", \"examples\", \"tests\")),\n python_requires=\">=3.8\",\n install_requires=get_requirements(),\n ext_modules=ext_modules,\n cmdclass={\"build_ext\": BuildExtension},\n)\n", "path": "setup.py"}]} | 2,296 | 119 |
gh_patches_debug_18052 | rasdani/github-patches | git_diff | scikit-image__scikit-image-4064 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
travis has random failures on rank filters
## Description
See for ex: https://travis-ci.org/scikit-image/scikit-image/jobs/563363217
## Way to reproduce
```python
# Place the full code we need to recreate your issue here
# upload all necessary images to github too!
```
## Version information
```python
# Paste the output of the following python commands
from __future__ import print_function
import sys; print(sys.version)
import platform; print(platform.platform())
import skimage; print("scikit-image version: {}".format(skimage.__version__))
import numpy; print("numpy version: {}".format(numpy.__version__))
```
```python
# your output here
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `skimage/_shared/_warnings.py`
Content:
```
1 from contextlib import contextmanager
2 import sys
3 import warnings
4 import re
5 import os
6
7 __all__ = ['all_warnings', 'expected_warnings', 'warn']
8
9
10 def warn(message, category=None, stacklevel=2):
11 """A version of `warnings.warn` with a default stacklevel of 2.
12 """
13 if category is not None:
14 warnings.warn(message, category=category, stacklevel=stacklevel)
15 else:
16 warnings.warn(message, stacklevel=stacklevel)
17
18
19 @contextmanager
20 def all_warnings():
21 """
22 Context for use in testing to ensure that all warnings are raised.
23
24 Examples
25 --------
26 >>> import warnings
27 >>> def foo():
28 ... warnings.warn(RuntimeWarning("bar"))
29
30 We raise the warning once, while the warning filter is set to "once".
31 Hereafter, the warning is invisible, even with custom filters:
32
33 >>> with warnings.catch_warnings():
34 ... warnings.simplefilter('once')
35 ... foo()
36
37 We can now run ``foo()`` without a warning being raised:
38
39 >>> from numpy.testing import assert_warns
40 >>> foo()
41
42 To catch the warning, we call in the help of ``all_warnings``:
43
44 >>> with all_warnings():
45 ... assert_warns(RuntimeWarning, foo)
46 """
47 # _warnings.py is on the critical import path.
48 # Since this is a testing only function, we lazy import inspect.
49 import inspect
50 # Whenever a warning is triggered, Python adds a __warningregistry__
51 # member to the *calling* module. The exercize here is to find
52 # and eradicate all those breadcrumbs that were left lying around.
53 #
54 # We proceed by first searching all parent calling frames and explicitly
55 # clearing their warning registries (necessary for the doctests above to
56 # pass). Then, we search for all submodules of skimage and clear theirs
57 # as well (necessary for the skimage test suite to pass).
58
59 frame = inspect.currentframe()
60 if frame:
61 for f in inspect.getouterframes(frame):
62 f[0].f_locals['__warningregistry__'] = {}
63 del frame
64
65 for mod_name, mod in list(sys.modules.items()):
66 try:
67 mod.__warningregistry__.clear()
68 except AttributeError:
69 pass
70
71 with warnings.catch_warnings(record=True) as w:
72 warnings.simplefilter("always")
73 yield w
74
75
76 @contextmanager
77 def expected_warnings(matching):
78 r"""Context for use in testing to catch known warnings matching regexes
79
80 Parameters
81 ----------
82 matching : list of strings or compiled regexes
83 Regexes for the desired warning to catch
84
85 Examples
86 --------
87 >>> import numpy as np
88 >>> image = np.random.randint(0, 2**16, size=(100, 100), dtype=np.uint16)
89 >>> # rank filters are slow when bit-depth exceeds 10 bits
90 >>> from skimage import filters
91 >>> with expected_warnings(['Bad rank filter performance']):
92 ... median_filtered = filters.rank.median(image)
93
94 Notes
95 -----
96 Uses `all_warnings` to ensure all warnings are raised.
97 Upon exiting, it checks the recorded warnings for the desired matching
98 pattern(s).
99 Raises a ValueError if any match was not found or an unexpected
100 warning was raised.
101 Allows for three types of behaviors: `and`, `or`, and `optional` matches.
102 This is done to accommodate different build environments or loop conditions
103 that may produce different warnings. The behaviors can be combined.
104 If you pass multiple patterns, you get an orderless `and`, where all of the
105 warnings must be raised.
106 If you use the `|` operator in a pattern, you can catch one of several
107 warnings.
108 Finally, you can use `|\A\Z` in a pattern to signify it as optional.
109
110 """
111 if isinstance(matching, str):
112 raise ValueError('``matching`` should be a list of strings and not '
113 'a string itself.')
114
115 strict_warnings = os.environ.get('SKIMAGE_TEST_STRICT_WARNINGS', '1')
116 if strict_warnings.lower() == 'true':
117 strict_warnings = True
118 elif strict_warnings.lower() == 'false':
119 strict_warnings = False
120 else:
121 strict_warnings = bool(int(strict_warnings))
122
123 with all_warnings() as w:
124 # enter context
125 yield w
126 # exited user context, check the recorded warnings
127 # Allow users to provide None
128 while None in matching:
129 matching.remove(None)
130 remaining = [m for m in matching if r'\A\Z' not in m.split('|')]
131 for warn in w:
132 found = False
133 for match in matching:
134 if re.search(match, str(warn.message)) is not None:
135 found = True
136 if match in remaining:
137 remaining.remove(match)
138 if strict_warnings and not found:
139 raise ValueError('Unexpected warning: %s' % str(warn.message))
140 if strict_warnings and (len(remaining) > 0):
141 msg = 'No warning raised matching:\n%s' % '\n'.join(remaining)
142 raise ValueError(msg)
143
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/skimage/_shared/_warnings.py b/skimage/_shared/_warnings.py
--- a/skimage/_shared/_warnings.py
+++ b/skimage/_shared/_warnings.py
@@ -79,8 +79,9 @@
Parameters
----------
- matching : list of strings or compiled regexes
+ matching : None or a list of strings or compiled regexes
Regexes for the desired warning to catch
+ If matching is None, this behaves as a no-op.
Examples
--------
@@ -112,6 +113,11 @@
raise ValueError('``matching`` should be a list of strings and not '
'a string itself.')
+ # Special case for disabling the context manager
+ if matching is None:
+ yield None
+ return
+
strict_warnings = os.environ.get('SKIMAGE_TEST_STRICT_WARNINGS', '1')
if strict_warnings.lower() == 'true':
strict_warnings = True
| {"golden_diff": "diff --git a/skimage/_shared/_warnings.py b/skimage/_shared/_warnings.py\n--- a/skimage/_shared/_warnings.py\n+++ b/skimage/_shared/_warnings.py\n@@ -79,8 +79,9 @@\n \n Parameters\n ----------\n- matching : list of strings or compiled regexes\n+ matching : None or a list of strings or compiled regexes\n Regexes for the desired warning to catch\n+ If matching is None, this behaves as a no-op.\n \n Examples\n --------\n@@ -112,6 +113,11 @@\n raise ValueError('``matching`` should be a list of strings and not '\n 'a string itself.')\n \n+ # Special case for disabling the context manager\n+ if matching is None:\n+ yield None\n+ return\n+\n strict_warnings = os.environ.get('SKIMAGE_TEST_STRICT_WARNINGS', '1')\n if strict_warnings.lower() == 'true':\n strict_warnings = True\n", "issue": "travis has random failures on rank filters\n## Description\r\n\r\nSee for ex: https://travis-ci.org/scikit-image/scikit-image/jobs/563363217\r\n\r\n\r\n## Way to reproduce\r\n```python\r\n# Place the full code we need to recreate your issue here\r\n# upload all necessary images to github too!\r\n```\r\n\r\n\r\n## Version information\r\n```python\r\n# Paste the output of the following python commands\r\nfrom __future__ import print_function\r\nimport sys; print(sys.version)\r\nimport platform; print(platform.platform())\r\nimport skimage; print(\"scikit-image version: {}\".format(skimage.__version__))\r\nimport numpy; print(\"numpy version: {}\".format(numpy.__version__))\r\n```\r\n\r\n```python\r\n# your output here\r\n\r\n```\r\n\r\n\n", "before_files": [{"content": "from contextlib import contextmanager\nimport sys\nimport warnings\nimport re\nimport os\n\n__all__ = ['all_warnings', 'expected_warnings', 'warn']\n\n\ndef warn(message, category=None, stacklevel=2):\n \"\"\"A version of `warnings.warn` with a default stacklevel of 2.\n \"\"\"\n if category is not None:\n warnings.warn(message, category=category, stacklevel=stacklevel)\n else:\n warnings.warn(message, stacklevel=stacklevel)\n\n\n@contextmanager\ndef all_warnings():\n \"\"\"\n Context for use in testing to ensure that all warnings are raised.\n\n Examples\n --------\n >>> import warnings\n >>> def foo():\n ... warnings.warn(RuntimeWarning(\"bar\"))\n\n We raise the warning once, while the warning filter is set to \"once\".\n Hereafter, the warning is invisible, even with custom filters:\n\n >>> with warnings.catch_warnings():\n ... warnings.simplefilter('once')\n ... foo()\n\n We can now run ``foo()`` without a warning being raised:\n\n >>> from numpy.testing import assert_warns\n >>> foo()\n\n To catch the warning, we call in the help of ``all_warnings``:\n\n >>> with all_warnings():\n ... assert_warns(RuntimeWarning, foo)\n \"\"\"\n # _warnings.py is on the critical import path.\n # Since this is a testing only function, we lazy import inspect.\n import inspect\n # Whenever a warning is triggered, Python adds a __warningregistry__\n # member to the *calling* module. The exercize here is to find\n # and eradicate all those breadcrumbs that were left lying around.\n #\n # We proceed by first searching all parent calling frames and explicitly\n # clearing their warning registries (necessary for the doctests above to\n # pass). Then, we search for all submodules of skimage and clear theirs\n # as well (necessary for the skimage test suite to pass).\n\n frame = inspect.currentframe()\n if frame:\n for f in inspect.getouterframes(frame):\n f[0].f_locals['__warningregistry__'] = {}\n del frame\n\n for mod_name, mod in list(sys.modules.items()):\n try:\n mod.__warningregistry__.clear()\n except AttributeError:\n pass\n\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n yield w\n\n\n@contextmanager\ndef expected_warnings(matching):\n r\"\"\"Context for use in testing to catch known warnings matching regexes\n\n Parameters\n ----------\n matching : list of strings or compiled regexes\n Regexes for the desired warning to catch\n\n Examples\n --------\n >>> import numpy as np\n >>> image = np.random.randint(0, 2**16, size=(100, 100), dtype=np.uint16)\n >>> # rank filters are slow when bit-depth exceeds 10 bits\n >>> from skimage import filters\n >>> with expected_warnings(['Bad rank filter performance']):\n ... median_filtered = filters.rank.median(image)\n\n Notes\n -----\n Uses `all_warnings` to ensure all warnings are raised.\n Upon exiting, it checks the recorded warnings for the desired matching\n pattern(s).\n Raises a ValueError if any match was not found or an unexpected\n warning was raised.\n Allows for three types of behaviors: `and`, `or`, and `optional` matches.\n This is done to accommodate different build environments or loop conditions\n that may produce different warnings. The behaviors can be combined.\n If you pass multiple patterns, you get an orderless `and`, where all of the\n warnings must be raised.\n If you use the `|` operator in a pattern, you can catch one of several\n warnings.\n Finally, you can use `|\\A\\Z` in a pattern to signify it as optional.\n\n \"\"\"\n if isinstance(matching, str):\n raise ValueError('``matching`` should be a list of strings and not '\n 'a string itself.')\n\n strict_warnings = os.environ.get('SKIMAGE_TEST_STRICT_WARNINGS', '1')\n if strict_warnings.lower() == 'true':\n strict_warnings = True\n elif strict_warnings.lower() == 'false':\n strict_warnings = False\n else:\n strict_warnings = bool(int(strict_warnings))\n\n with all_warnings() as w:\n # enter context\n yield w\n # exited user context, check the recorded warnings\n # Allow users to provide None\n while None in matching:\n matching.remove(None)\n remaining = [m for m in matching if r'\\A\\Z' not in m.split('|')]\n for warn in w:\n found = False\n for match in matching:\n if re.search(match, str(warn.message)) is not None:\n found = True\n if match in remaining:\n remaining.remove(match)\n if strict_warnings and not found:\n raise ValueError('Unexpected warning: %s' % str(warn.message))\n if strict_warnings and (len(remaining) > 0):\n msg = 'No warning raised matching:\\n%s' % '\\n'.join(remaining)\n raise ValueError(msg)\n", "path": "skimage/_shared/_warnings.py"}], "after_files": [{"content": "from contextlib import contextmanager\nimport sys\nimport warnings\nimport re\nimport os\n\n__all__ = ['all_warnings', 'expected_warnings', 'warn']\n\n\ndef warn(message, category=None, stacklevel=2):\n \"\"\"A version of `warnings.warn` with a default stacklevel of 2.\n \"\"\"\n if category is not None:\n warnings.warn(message, category=category, stacklevel=stacklevel)\n else:\n warnings.warn(message, stacklevel=stacklevel)\n\n\n@contextmanager\ndef all_warnings():\n \"\"\"\n Context for use in testing to ensure that all warnings are raised.\n\n Examples\n --------\n >>> import warnings\n >>> def foo():\n ... warnings.warn(RuntimeWarning(\"bar\"))\n\n We raise the warning once, while the warning filter is set to \"once\".\n Hereafter, the warning is invisible, even with custom filters:\n\n >>> with warnings.catch_warnings():\n ... warnings.simplefilter('once')\n ... foo()\n\n We can now run ``foo()`` without a warning being raised:\n\n >>> from numpy.testing import assert_warns\n >>> foo()\n\n To catch the warning, we call in the help of ``all_warnings``:\n\n >>> with all_warnings():\n ... assert_warns(RuntimeWarning, foo)\n \"\"\"\n # _warnings.py is on the critical import path.\n # Since this is a testing only function, we lazy import inspect.\n import inspect\n # Whenever a warning is triggered, Python adds a __warningregistry__\n # member to the *calling* module. The exercize here is to find\n # and eradicate all those breadcrumbs that were left lying around.\n #\n # We proceed by first searching all parent calling frames and explicitly\n # clearing their warning registries (necessary for the doctests above to\n # pass). Then, we search for all submodules of skimage and clear theirs\n # as well (necessary for the skimage test suite to pass).\n\n frame = inspect.currentframe()\n if frame:\n for f in inspect.getouterframes(frame):\n f[0].f_locals['__warningregistry__'] = {}\n del frame\n\n for mod_name, mod in list(sys.modules.items()):\n try:\n mod.__warningregistry__.clear()\n except AttributeError:\n pass\n\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n yield w\n\n\n@contextmanager\ndef expected_warnings(matching):\n r\"\"\"Context for use in testing to catch known warnings matching regexes\n\n Parameters\n ----------\n matching : None or a list of strings or compiled regexes\n Regexes for the desired warning to catch\n If matching is None, this behaves as a no-op.\n\n Examples\n --------\n >>> import numpy as np\n >>> image = np.random.randint(0, 2**16, size=(100, 100), dtype=np.uint16)\n >>> # rank filters are slow when bit-depth exceeds 10 bits\n >>> from skimage import filters\n >>> with expected_warnings(['Bad rank filter performance']):\n ... median_filtered = filters.rank.median(image)\n\n Notes\n -----\n Uses `all_warnings` to ensure all warnings are raised.\n Upon exiting, it checks the recorded warnings for the desired matching\n pattern(s).\n Raises a ValueError if any match was not found or an unexpected\n warning was raised.\n Allows for three types of behaviors: `and`, `or`, and `optional` matches.\n This is done to accommodate different build environments or loop conditions\n that may produce different warnings. The behaviors can be combined.\n If you pass multiple patterns, you get an orderless `and`, where all of the\n warnings must be raised.\n If you use the `|` operator in a pattern, you can catch one of several\n warnings.\n Finally, you can use `|\\A\\Z` in a pattern to signify it as optional.\n\n \"\"\"\n if isinstance(matching, str):\n raise ValueError('``matching`` should be a list of strings and not '\n 'a string itself.')\n\n # Special case for disabling the context manager\n if matching is None:\n yield None\n return\n\n strict_warnings = os.environ.get('SKIMAGE_TEST_STRICT_WARNINGS', '1')\n if strict_warnings.lower() == 'true':\n strict_warnings = True\n elif strict_warnings.lower() == 'false':\n strict_warnings = False\n else:\n strict_warnings = bool(int(strict_warnings))\n\n with all_warnings() as w:\n # enter context\n yield w\n # exited user context, check the recorded warnings\n # Allow users to provide None\n while None in matching:\n matching.remove(None)\n remaining = [m for m in matching if r'\\A\\Z' not in m.split('|')]\n for warn in w:\n found = False\n for match in matching:\n if re.search(match, str(warn.message)) is not None:\n found = True\n if match in remaining:\n remaining.remove(match)\n if strict_warnings and not found:\n raise ValueError('Unexpected warning: %s' % str(warn.message))\n if strict_warnings and (len(remaining) > 0):\n msg = 'No warning raised matching:\\n%s' % '\\n'.join(remaining)\n raise ValueError(msg)\n", "path": "skimage/_shared/_warnings.py"}]} | 1,855 | 219 |
gh_patches_debug_30379 | rasdani/github-patches | git_diff | GPflow__GPflow-1654 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
A multioutput Kuf function throws an error
<!-- Lines like this are comments and will be invisible -->
# Bug
<!-- A clear and concise description of what the bug is. -->
The multioutput covariance function `Kuf` has several functions corresponding to different combinations of parameters. The function which accepts `FallbackSeparateIndependentInducingVariables` or `FallbackSharedIndependentInducingVariables` along with the `LinearCoregionalization` kernel throws an error.
## To reproduce
**Minimal, reproducible example**
<!-- We need to be able to reproduce the bug by simply copy and pasting your code -->
```python
import numpy as np
import gpflow
import gpflow.inducing_variables.multioutput as mf
import gpflow.kernels.multioutput as mk
from gpflow.covariances.multioutput import kufs as mo_kufs
inducing_variables = mf.FallbackSharedIndependentInducingVariables(
gpflow.inducing_variables.InducingPoints(np.random.rand(1, 1)))
kernel = mk.LinearCoregionalization(make_kernels(Datum.L), Datum.W)
Kuf = mo_kufs.Kuf(inducing_variables, kernel, Datum.Xnew)
```
**Stack trace, or error message**
```
@Kuf.register(
(FallbackSeparateIndependentInducingVariables, FallbackSharedIndependentInducingVariables),
LinearCoregionalization,
object,
)
def _Kuf(
inducing_variable: Union[
SeparateIndependentInducingVariables, SharedIndependentInducingVariables
],
kernel: LinearCoregionalization,
Xnew: tf.Tensor,
):
kuf_impl = Kuf.dispatch(type(inducing_variable), SeparateIndependent, object)
> K = tf.transpose(kuf_impl(inducing_variable, kernel, Xnew), [1, 0, 2]) # [M, L, N]
E TypeError: 'NoneType' object is not callable
../../../gpflow/covariances/multioutput/kufs.py:96: TypeError
```
## Expected behavior
The covariance should be computed.
## System information
* GPflow version: 2.1.4
* GPflow installed from: git commit 405eb97dd30cb43f02501b58b581f2608eb6e43e
* TensorFlow version: 2.3.0
* Python version: 3.7.5
* Operating system: Ubuntu 18.04.5 LTS
## Additional context
<!-- Add any other context about the problem here. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gpflow/covariances/multioutput/kufs.py`
Content:
```
1 # Copyright 2017-2020 The GPflow Contributors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import Union
16
17 import tensorflow as tf
18
19 from ...inducing_variables import (
20 FallbackSeparateIndependentInducingVariables,
21 FallbackSharedIndependentInducingVariables,
22 InducingPoints,
23 SeparateIndependentInducingVariables,
24 SharedIndependentInducingVariables,
25 )
26 from ...kernels import (
27 LinearCoregionalization,
28 MultioutputKernel,
29 SeparateIndependent,
30 SharedIndependent,
31 )
32 from ..dispatch import Kuf
33
34
35 @Kuf.register(InducingPoints, MultioutputKernel, object)
36 def _Kuf(inducing_variable: InducingPoints, kernel: MultioutputKernel, Xnew: tf.Tensor):
37 return kernel(inducing_variable.Z, Xnew, full_cov=True, full_output_cov=True) # [M, P, N, P]
38
39
40 @Kuf.register(SharedIndependentInducingVariables, SharedIndependent, object)
41 def _Kuf(
42 inducing_variable: SharedIndependentInducingVariables,
43 kernel: SharedIndependent,
44 Xnew: tf.Tensor,
45 ):
46 return Kuf(inducing_variable.inducing_variable, kernel.kernel, Xnew) # [M, N]
47
48
49 @Kuf.register(SeparateIndependentInducingVariables, SharedIndependent, object)
50 def _Kuf(
51 inducing_variable: SeparateIndependentInducingVariables,
52 kernel: SharedIndependent,
53 Xnew: tf.Tensor,
54 ):
55 return tf.stack(
56 [Kuf(f, kernel.kernel, Xnew) for f in inducing_variable.inducing_variable_list], axis=0
57 ) # [L, M, N]
58
59
60 @Kuf.register(SharedIndependentInducingVariables, SeparateIndependent, object)
61 def _Kuf(
62 inducing_variable: SharedIndependentInducingVariables,
63 kernel: SeparateIndependent,
64 Xnew: tf.Tensor,
65 ):
66 return tf.stack(
67 [Kuf(inducing_variable.inducing_variable, k, Xnew) for k in kernel.kernels], axis=0
68 ) # [L, M, N]
69
70
71 @Kuf.register(SeparateIndependentInducingVariables, SeparateIndependent, object)
72 def _Kuf(
73 inducing_variable: SeparateIndependentInducingVariables,
74 kernel: SeparateIndependent,
75 Xnew: tf.Tensor,
76 ):
77 Kufs = [
78 Kuf(f, k, Xnew) for f, k in zip(inducing_variable.inducing_variable_list, kernel.kernels)
79 ]
80 return tf.stack(Kufs, axis=0) # [L, M, N]
81
82
83 @Kuf.register(
84 (FallbackSeparateIndependentInducingVariables, FallbackSharedIndependentInducingVariables),
85 LinearCoregionalization,
86 object,
87 )
88 def _Kuf(
89 inducing_variable: Union[
90 SeparateIndependentInducingVariables, SharedIndependentInducingVariables
91 ],
92 kernel: LinearCoregionalization,
93 Xnew: tf.Tensor,
94 ):
95 kuf_impl = Kuf.dispatch(type(inducing_variable), SeparateIndependent, object)
96 K = tf.transpose(kuf_impl(inducing_variable, kernel, Xnew), [1, 0, 2]) # [M, L, N]
97 return K[:, :, :, None] * tf.transpose(kernel.W)[None, :, None, :] # [M, L, N, P]
98
99
100 @Kuf.register(SharedIndependentInducingVariables, LinearCoregionalization, object)
101 def _Kuf(
102 inducing_variable: SharedIndependentInducingVariables,
103 kernel: SeparateIndependent,
104 Xnew: tf.Tensor,
105 ):
106 return tf.stack(
107 [Kuf(inducing_variable.inducing_variable, k, Xnew) for k in kernel.kernels], axis=0
108 ) # [L, M, N]
109
110
111 @Kuf.register(SeparateIndependentInducingVariables, LinearCoregionalization, object)
112 def _Kuf(inducing_variable, kernel, Xnew):
113 return tf.stack(
114 [Kuf(f, k, Xnew) for f, k in zip(inducing_variable.inducing_variable_list, kernel.kernels)],
115 axis=0,
116 ) # [L, M, N]
117
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gpflow/covariances/multioutput/kufs.py b/gpflow/covariances/multioutput/kufs.py
--- a/gpflow/covariances/multioutput/kufs.py
+++ b/gpflow/covariances/multioutput/kufs.py
@@ -80,23 +80,42 @@
return tf.stack(Kufs, axis=0) # [L, M, N]
[email protected](
- (FallbackSeparateIndependentInducingVariables, FallbackSharedIndependentInducingVariables),
- LinearCoregionalization,
- object,
-)
-def _Kuf(
+def _fallback_Kuf(
+ kuf_impl,
inducing_variable: Union[
SeparateIndependentInducingVariables, SharedIndependentInducingVariables
],
kernel: LinearCoregionalization,
Xnew: tf.Tensor,
):
- kuf_impl = Kuf.dispatch(type(inducing_variable), SeparateIndependent, object)
K = tf.transpose(kuf_impl(inducing_variable, kernel, Xnew), [1, 0, 2]) # [M, L, N]
return K[:, :, :, None] * tf.transpose(kernel.W)[None, :, None, :] # [M, L, N, P]
[email protected](
+ FallbackSeparateIndependentInducingVariables, LinearCoregionalization, object,
+)
+def _Kuf(
+ inducing_variable: FallbackSeparateIndependentInducingVariables,
+ kernel: LinearCoregionalization,
+ Xnew: tf.Tensor,
+):
+ kuf_impl = Kuf.dispatch(SeparateIndependentInducingVariables, SeparateIndependent, object)
+ return _fallback_Kuf(kuf_impl, inducing_variable, kernel, Xnew)
+
+
[email protected](
+ FallbackSharedIndependentInducingVariables, LinearCoregionalization, object,
+)
+def _Kuf(
+ inducing_variable: FallbackSharedIndependentInducingVariables,
+ kernel: LinearCoregionalization,
+ Xnew: tf.Tensor,
+):
+ kuf_impl = Kuf.dispatch(SharedIndependentInducingVariables, SeparateIndependent, object)
+ return _fallback_Kuf(kuf_impl, inducing_variable, kernel, Xnew)
+
+
@Kuf.register(SharedIndependentInducingVariables, LinearCoregionalization, object)
def _Kuf(
inducing_variable: SharedIndependentInducingVariables,
| {"golden_diff": "diff --git a/gpflow/covariances/multioutput/kufs.py b/gpflow/covariances/multioutput/kufs.py\n--- a/gpflow/covariances/multioutput/kufs.py\n+++ b/gpflow/covariances/multioutput/kufs.py\n@@ -80,23 +80,42 @@\n return tf.stack(Kufs, axis=0) # [L, M, N]\n \n \[email protected](\n- (FallbackSeparateIndependentInducingVariables, FallbackSharedIndependentInducingVariables),\n- LinearCoregionalization,\n- object,\n-)\n-def _Kuf(\n+def _fallback_Kuf(\n+ kuf_impl,\n inducing_variable: Union[\n SeparateIndependentInducingVariables, SharedIndependentInducingVariables\n ],\n kernel: LinearCoregionalization,\n Xnew: tf.Tensor,\n ):\n- kuf_impl = Kuf.dispatch(type(inducing_variable), SeparateIndependent, object)\n K = tf.transpose(kuf_impl(inducing_variable, kernel, Xnew), [1, 0, 2]) # [M, L, N]\n return K[:, :, :, None] * tf.transpose(kernel.W)[None, :, None, :] # [M, L, N, P]\n \n \[email protected](\n+ FallbackSeparateIndependentInducingVariables, LinearCoregionalization, object,\n+)\n+def _Kuf(\n+ inducing_variable: FallbackSeparateIndependentInducingVariables,\n+ kernel: LinearCoregionalization,\n+ Xnew: tf.Tensor,\n+):\n+ kuf_impl = Kuf.dispatch(SeparateIndependentInducingVariables, SeparateIndependent, object)\n+ return _fallback_Kuf(kuf_impl, inducing_variable, kernel, Xnew)\n+\n+\[email protected](\n+ FallbackSharedIndependentInducingVariables, LinearCoregionalization, object,\n+)\n+def _Kuf(\n+ inducing_variable: FallbackSharedIndependentInducingVariables,\n+ kernel: LinearCoregionalization,\n+ Xnew: tf.Tensor,\n+):\n+ kuf_impl = Kuf.dispatch(SharedIndependentInducingVariables, SeparateIndependent, object)\n+ return _fallback_Kuf(kuf_impl, inducing_variable, kernel, Xnew)\n+\n+\n @Kuf.register(SharedIndependentInducingVariables, LinearCoregionalization, object)\n def _Kuf(\n inducing_variable: SharedIndependentInducingVariables,\n", "issue": "A multioutput Kuf function throws an error\n<!-- Lines like this are comments and will be invisible -->\r\n\r\n# Bug\r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\nThe multioutput covariance function `Kuf` has several functions corresponding to different combinations of parameters. The function which accepts `FallbackSeparateIndependentInducingVariables` or `FallbackSharedIndependentInducingVariables` along with the `LinearCoregionalization` kernel throws an error.\r\n\r\n## To reproduce\r\n\r\n**Minimal, reproducible example**\r\n<!-- We need to be able to reproduce the bug by simply copy and pasting your code -->\r\n```python\r\nimport numpy as np\r\nimport gpflow\r\nimport gpflow.inducing_variables.multioutput as mf\r\nimport gpflow.kernels.multioutput as mk\r\nfrom gpflow.covariances.multioutput import kufs as mo_kufs\r\n\r\ninducing_variables = mf.FallbackSharedIndependentInducingVariables(\r\n gpflow.inducing_variables.InducingPoints(np.random.rand(1, 1)))\r\nkernel = mk.LinearCoregionalization(make_kernels(Datum.L), Datum.W)\r\nKuf = mo_kufs.Kuf(inducing_variables, kernel, Datum.Xnew)\r\n```\r\n\r\n**Stack trace, or error message**\r\n```\r\n @Kuf.register(\r\n (FallbackSeparateIndependentInducingVariables, FallbackSharedIndependentInducingVariables),\r\n LinearCoregionalization,\r\n object,\r\n )\r\n def _Kuf(\r\n inducing_variable: Union[\r\n SeparateIndependentInducingVariables, SharedIndependentInducingVariables\r\n ],\r\n kernel: LinearCoregionalization,\r\n Xnew: tf.Tensor,\r\n ):\r\n kuf_impl = Kuf.dispatch(type(inducing_variable), SeparateIndependent, object)\r\n> K = tf.transpose(kuf_impl(inducing_variable, kernel, Xnew), [1, 0, 2]) # [M, L, N]\r\nE TypeError: 'NoneType' object is not callable\r\n\r\n../../../gpflow/covariances/multioutput/kufs.py:96: TypeError\r\n\r\n```\r\n\r\n## Expected behavior\r\n\r\nThe covariance should be computed.\r\n\r\n## System information\r\n\r\n* GPflow version: 2.1.4\r\n* GPflow installed from: git commit 405eb97dd30cb43f02501b58b581f2608eb6e43e\r\n* TensorFlow version: 2.3.0\r\n* Python version: 3.7.5\r\n* Operating system: Ubuntu 18.04.5 LTS\r\n\r\n## Additional context\r\n\r\n<!-- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "# Copyright 2017-2020 The GPflow Contributors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Union\n\nimport tensorflow as tf\n\nfrom ...inducing_variables import (\n FallbackSeparateIndependentInducingVariables,\n FallbackSharedIndependentInducingVariables,\n InducingPoints,\n SeparateIndependentInducingVariables,\n SharedIndependentInducingVariables,\n)\nfrom ...kernels import (\n LinearCoregionalization,\n MultioutputKernel,\n SeparateIndependent,\n SharedIndependent,\n)\nfrom ..dispatch import Kuf\n\n\[email protected](InducingPoints, MultioutputKernel, object)\ndef _Kuf(inducing_variable: InducingPoints, kernel: MultioutputKernel, Xnew: tf.Tensor):\n return kernel(inducing_variable.Z, Xnew, full_cov=True, full_output_cov=True) # [M, P, N, P]\n\n\[email protected](SharedIndependentInducingVariables, SharedIndependent, object)\ndef _Kuf(\n inducing_variable: SharedIndependentInducingVariables,\n kernel: SharedIndependent,\n Xnew: tf.Tensor,\n):\n return Kuf(inducing_variable.inducing_variable, kernel.kernel, Xnew) # [M, N]\n\n\[email protected](SeparateIndependentInducingVariables, SharedIndependent, object)\ndef _Kuf(\n inducing_variable: SeparateIndependentInducingVariables,\n kernel: SharedIndependent,\n Xnew: tf.Tensor,\n):\n return tf.stack(\n [Kuf(f, kernel.kernel, Xnew) for f in inducing_variable.inducing_variable_list], axis=0\n ) # [L, M, N]\n\n\[email protected](SharedIndependentInducingVariables, SeparateIndependent, object)\ndef _Kuf(\n inducing_variable: SharedIndependentInducingVariables,\n kernel: SeparateIndependent,\n Xnew: tf.Tensor,\n):\n return tf.stack(\n [Kuf(inducing_variable.inducing_variable, k, Xnew) for k in kernel.kernels], axis=0\n ) # [L, M, N]\n\n\[email protected](SeparateIndependentInducingVariables, SeparateIndependent, object)\ndef _Kuf(\n inducing_variable: SeparateIndependentInducingVariables,\n kernel: SeparateIndependent,\n Xnew: tf.Tensor,\n):\n Kufs = [\n Kuf(f, k, Xnew) for f, k in zip(inducing_variable.inducing_variable_list, kernel.kernels)\n ]\n return tf.stack(Kufs, axis=0) # [L, M, N]\n\n\[email protected](\n (FallbackSeparateIndependentInducingVariables, FallbackSharedIndependentInducingVariables),\n LinearCoregionalization,\n object,\n)\ndef _Kuf(\n inducing_variable: Union[\n SeparateIndependentInducingVariables, SharedIndependentInducingVariables\n ],\n kernel: LinearCoregionalization,\n Xnew: tf.Tensor,\n):\n kuf_impl = Kuf.dispatch(type(inducing_variable), SeparateIndependent, object)\n K = tf.transpose(kuf_impl(inducing_variable, kernel, Xnew), [1, 0, 2]) # [M, L, N]\n return K[:, :, :, None] * tf.transpose(kernel.W)[None, :, None, :] # [M, L, N, P]\n\n\[email protected](SharedIndependentInducingVariables, LinearCoregionalization, object)\ndef _Kuf(\n inducing_variable: SharedIndependentInducingVariables,\n kernel: SeparateIndependent,\n Xnew: tf.Tensor,\n):\n return tf.stack(\n [Kuf(inducing_variable.inducing_variable, k, Xnew) for k in kernel.kernels], axis=0\n ) # [L, M, N]\n\n\[email protected](SeparateIndependentInducingVariables, LinearCoregionalization, object)\ndef _Kuf(inducing_variable, kernel, Xnew):\n return tf.stack(\n [Kuf(f, k, Xnew) for f, k in zip(inducing_variable.inducing_variable_list, kernel.kernels)],\n axis=0,\n ) # [L, M, N]\n", "path": "gpflow/covariances/multioutput/kufs.py"}], "after_files": [{"content": "# Copyright 2017-2020 The GPflow Contributors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Union\n\nimport tensorflow as tf\n\nfrom ...inducing_variables import (\n FallbackSeparateIndependentInducingVariables,\n FallbackSharedIndependentInducingVariables,\n InducingPoints,\n SeparateIndependentInducingVariables,\n SharedIndependentInducingVariables,\n)\nfrom ...kernels import (\n LinearCoregionalization,\n MultioutputKernel,\n SeparateIndependent,\n SharedIndependent,\n)\nfrom ..dispatch import Kuf\n\n\[email protected](InducingPoints, MultioutputKernel, object)\ndef _Kuf(inducing_variable: InducingPoints, kernel: MultioutputKernel, Xnew: tf.Tensor):\n return kernel(inducing_variable.Z, Xnew, full_cov=True, full_output_cov=True) # [M, P, N, P]\n\n\[email protected](SharedIndependentInducingVariables, SharedIndependent, object)\ndef _Kuf(\n inducing_variable: SharedIndependentInducingVariables,\n kernel: SharedIndependent,\n Xnew: tf.Tensor,\n):\n return Kuf(inducing_variable.inducing_variable, kernel.kernel, Xnew) # [M, N]\n\n\[email protected](SeparateIndependentInducingVariables, SharedIndependent, object)\ndef _Kuf(\n inducing_variable: SeparateIndependentInducingVariables,\n kernel: SharedIndependent,\n Xnew: tf.Tensor,\n):\n return tf.stack(\n [Kuf(f, kernel.kernel, Xnew) for f in inducing_variable.inducing_variable_list], axis=0\n ) # [L, M, N]\n\n\[email protected](SharedIndependentInducingVariables, SeparateIndependent, object)\ndef _Kuf(\n inducing_variable: SharedIndependentInducingVariables,\n kernel: SeparateIndependent,\n Xnew: tf.Tensor,\n):\n return tf.stack(\n [Kuf(inducing_variable.inducing_variable, k, Xnew) for k in kernel.kernels], axis=0\n ) # [L, M, N]\n\n\[email protected](SeparateIndependentInducingVariables, SeparateIndependent, object)\ndef _Kuf(\n inducing_variable: SeparateIndependentInducingVariables,\n kernel: SeparateIndependent,\n Xnew: tf.Tensor,\n):\n Kufs = [\n Kuf(f, k, Xnew) for f, k in zip(inducing_variable.inducing_variable_list, kernel.kernels)\n ]\n return tf.stack(Kufs, axis=0) # [L, M, N]\n\n\ndef _fallback_Kuf(\n kuf_impl,\n inducing_variable: Union[\n SeparateIndependentInducingVariables, SharedIndependentInducingVariables\n ],\n kernel: LinearCoregionalization,\n Xnew: tf.Tensor,\n):\n K = tf.transpose(kuf_impl(inducing_variable, kernel, Xnew), [1, 0, 2]) # [M, L, N]\n return K[:, :, :, None] * tf.transpose(kernel.W)[None, :, None, :] # [M, L, N, P]\n\n\[email protected](\n FallbackSeparateIndependentInducingVariables, LinearCoregionalization, object,\n)\ndef _Kuf(\n inducing_variable: FallbackSeparateIndependentInducingVariables,\n kernel: LinearCoregionalization,\n Xnew: tf.Tensor,\n):\n kuf_impl = Kuf.dispatch(SeparateIndependentInducingVariables, SeparateIndependent, object)\n return _fallback_Kuf(kuf_impl, inducing_variable, kernel, Xnew)\n\n\[email protected](\n FallbackSharedIndependentInducingVariables, LinearCoregionalization, object,\n)\ndef _Kuf(\n inducing_variable: FallbackSharedIndependentInducingVariables,\n kernel: LinearCoregionalization,\n Xnew: tf.Tensor,\n):\n kuf_impl = Kuf.dispatch(SharedIndependentInducingVariables, SeparateIndependent, object)\n return _fallback_Kuf(kuf_impl, inducing_variable, kernel, Xnew)\n\n\[email protected](SharedIndependentInducingVariables, LinearCoregionalization, object)\ndef _Kuf(\n inducing_variable: SharedIndependentInducingVariables,\n kernel: SeparateIndependent,\n Xnew: tf.Tensor,\n):\n return tf.stack(\n [Kuf(inducing_variable.inducing_variable, k, Xnew) for k in kernel.kernels], axis=0\n ) # [L, M, N]\n\n\[email protected](SeparateIndependentInducingVariables, LinearCoregionalization, object)\ndef _Kuf(inducing_variable, kernel, Xnew):\n return tf.stack(\n [Kuf(f, k, Xnew) for f, k in zip(inducing_variable.inducing_variable_list, kernel.kernels)],\n axis=0,\n ) # [L, M, N]\n", "path": "gpflow/covariances/multioutput/kufs.py"}]} | 2,059 | 531 |
gh_patches_debug_25191 | rasdani/github-patches | git_diff | scipy__scipy-6119 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DeprecationWarnings in stats on python 3.5
```
/home/br/repos/scipy/build/testenv/lib/python3.5/site-packages/scipy/stats/tests/test_stats.py:101: DeprecationWarning: Please use assertRaisesRegex instead.
```
Apparently, `assertRaisesRegexp` was renamed to `assertRaisesRegex`: https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertRaisesRegexp
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scipy/_lib/_numpy_compat.py`
Content:
```
1 """Functions copypasted from newer versions of numpy.
2
3 """
4 from __future__ import division, print_function, absolute_import
5
6 import warnings
7
8 import numpy as np
9
10 from scipy._lib._version import NumpyVersion
11
12 if NumpyVersion(np.__version__) > '1.7.0.dev':
13 _assert_warns = np.testing.assert_warns
14 else:
15 def _assert_warns(warning_class, func, *args, **kw):
16 r"""
17 Fail unless the given callable throws the specified warning.
18
19 This definition is copypasted from numpy 1.9.0.dev.
20 The version in earlier numpy returns None.
21
22 Parameters
23 ----------
24 warning_class : class
25 The class defining the warning that `func` is expected to throw.
26 func : callable
27 The callable to test.
28 *args : Arguments
29 Arguments passed to `func`.
30 **kwargs : Kwargs
31 Keyword arguments passed to `func`.
32
33 Returns
34 -------
35 The value returned by `func`.
36
37 """
38 with warnings.catch_warnings(record=True) as l:
39 warnings.simplefilter('always')
40 result = func(*args, **kw)
41 if not len(l) > 0:
42 raise AssertionError("No warning raised when calling %s"
43 % func.__name__)
44 if not l[0].category is warning_class:
45 raise AssertionError("First warning for %s is not a "
46 "%s( is %s)" % (func.__name__, warning_class, l[0]))
47 return result
48
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scipy/_lib/_numpy_compat.py b/scipy/_lib/_numpy_compat.py
--- a/scipy/_lib/_numpy_compat.py
+++ b/scipy/_lib/_numpy_compat.py
@@ -4,8 +4,10 @@
from __future__ import division, print_function, absolute_import
import warnings
+import sys
import numpy as np
+from numpy.testing.nosetester import import_nose
from scipy._lib._version import NumpyVersion
@@ -45,3 +47,28 @@
raise AssertionError("First warning for %s is not a "
"%s( is %s)" % (func.__name__, warning_class, l[0]))
return result
+
+
+def assert_raises_regex(exception_class, expected_regexp,
+ callable_obj=None, *args, **kwargs):
+ """
+ Fail unless an exception of class exception_class and with message that
+ matches expected_regexp is thrown by callable when invoked with arguments
+ args and keyword arguments kwargs.
+ Name of this function adheres to Python 3.2+ reference, but should work in
+ all versions down to 2.6.
+ Notes
+ -----
+ .. versionadded:: 1.8.0
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ nose = import_nose()
+
+ if sys.version_info.major >= 3:
+ funcname = nose.tools.assert_raises_regex
+ else:
+ # Only present in Python 2.7, missing from unittest in 2.6
+ funcname = nose.tools.assert_raises_regexp
+
+ return funcname(exception_class, expected_regexp, callable_obj,
+ *args, **kwargs)
| {"golden_diff": "diff --git a/scipy/_lib/_numpy_compat.py b/scipy/_lib/_numpy_compat.py\n--- a/scipy/_lib/_numpy_compat.py\n+++ b/scipy/_lib/_numpy_compat.py\n@@ -4,8 +4,10 @@\n from __future__ import division, print_function, absolute_import\n \n import warnings\n+import sys\n \n import numpy as np\n+from numpy.testing.nosetester import import_nose\n \n from scipy._lib._version import NumpyVersion\n \n@@ -45,3 +47,28 @@\n raise AssertionError(\"First warning for %s is not a \"\n \"%s( is %s)\" % (func.__name__, warning_class, l[0]))\n return result\n+\n+\n+def assert_raises_regex(exception_class, expected_regexp,\n+ callable_obj=None, *args, **kwargs):\n+ \"\"\"\n+ Fail unless an exception of class exception_class and with message that\n+ matches expected_regexp is thrown by callable when invoked with arguments\n+ args and keyword arguments kwargs.\n+ Name of this function adheres to Python 3.2+ reference, but should work in\n+ all versions down to 2.6.\n+ Notes\n+ -----\n+ .. versionadded:: 1.8.0\n+ \"\"\"\n+ __tracebackhide__ = True # Hide traceback for py.test\n+ nose = import_nose()\n+\n+ if sys.version_info.major >= 3:\n+ funcname = nose.tools.assert_raises_regex\n+ else:\n+ # Only present in Python 2.7, missing from unittest in 2.6\n+ funcname = nose.tools.assert_raises_regexp\n+\n+ return funcname(exception_class, expected_regexp, callable_obj,\n+ *args, **kwargs)\n", "issue": "DeprecationWarnings in stats on python 3.5\n```\n/home/br/repos/scipy/build/testenv/lib/python3.5/site-packages/scipy/stats/tests/test_stats.py:101: DeprecationWarning: Please use assertRaisesRegex instead.\n```\n\nApparently, `assertRaisesRegexp` was renamed to `assertRaisesRegex`: https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertRaisesRegexp\n\n", "before_files": [{"content": "\"\"\"Functions copypasted from newer versions of numpy.\n\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nimport warnings\n\nimport numpy as np\n\nfrom scipy._lib._version import NumpyVersion\n\nif NumpyVersion(np.__version__) > '1.7.0.dev':\n _assert_warns = np.testing.assert_warns\nelse:\n def _assert_warns(warning_class, func, *args, **kw):\n r\"\"\"\n Fail unless the given callable throws the specified warning.\n\n This definition is copypasted from numpy 1.9.0.dev.\n The version in earlier numpy returns None.\n\n Parameters\n ----------\n warning_class : class\n The class defining the warning that `func` is expected to throw.\n func : callable\n The callable to test.\n *args : Arguments\n Arguments passed to `func`.\n **kwargs : Kwargs\n Keyword arguments passed to `func`.\n\n Returns\n -------\n The value returned by `func`.\n\n \"\"\"\n with warnings.catch_warnings(record=True) as l:\n warnings.simplefilter('always')\n result = func(*args, **kw)\n if not len(l) > 0:\n raise AssertionError(\"No warning raised when calling %s\"\n % func.__name__)\n if not l[0].category is warning_class:\n raise AssertionError(\"First warning for %s is not a \"\n \"%s( is %s)\" % (func.__name__, warning_class, l[0]))\n return result\n", "path": "scipy/_lib/_numpy_compat.py"}], "after_files": [{"content": "\"\"\"Functions copypasted from newer versions of numpy.\n\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nimport warnings\nimport sys\n\nimport numpy as np\nfrom numpy.testing.nosetester import import_nose\n\nfrom scipy._lib._version import NumpyVersion\n\nif NumpyVersion(np.__version__) > '1.7.0.dev':\n _assert_warns = np.testing.assert_warns\nelse:\n def _assert_warns(warning_class, func, *args, **kw):\n r\"\"\"\n Fail unless the given callable throws the specified warning.\n\n This definition is copypasted from numpy 1.9.0.dev.\n The version in earlier numpy returns None.\n\n Parameters\n ----------\n warning_class : class\n The class defining the warning that `func` is expected to throw.\n func : callable\n The callable to test.\n *args : Arguments\n Arguments passed to `func`.\n **kwargs : Kwargs\n Keyword arguments passed to `func`.\n\n Returns\n -------\n The value returned by `func`.\n\n \"\"\"\n with warnings.catch_warnings(record=True) as l:\n warnings.simplefilter('always')\n result = func(*args, **kw)\n if not len(l) > 0:\n raise AssertionError(\"No warning raised when calling %s\"\n % func.__name__)\n if not l[0].category is warning_class:\n raise AssertionError(\"First warning for %s is not a \"\n \"%s( is %s)\" % (func.__name__, warning_class, l[0]))\n return result\n\n\ndef assert_raises_regex(exception_class, expected_regexp,\n callable_obj=None, *args, **kwargs):\n \"\"\"\n Fail unless an exception of class exception_class and with message that\n matches expected_regexp is thrown by callable when invoked with arguments\n args and keyword arguments kwargs.\n Name of this function adheres to Python 3.2+ reference, but should work in\n all versions down to 2.6.\n Notes\n -----\n .. versionadded:: 1.8.0\n \"\"\"\n __tracebackhide__ = True # Hide traceback for py.test\n nose = import_nose()\n\n if sys.version_info.major >= 3:\n funcname = nose.tools.assert_raises_regex\n else:\n # Only present in Python 2.7, missing from unittest in 2.6\n funcname = nose.tools.assert_raises_regexp\n\n return funcname(exception_class, expected_regexp, callable_obj,\n *args, **kwargs)\n", "path": "scipy/_lib/_numpy_compat.py"}]} | 769 | 387 |
gh_patches_debug_34963 | rasdani/github-patches | git_diff | adfinis__timed-backend-925 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bug(auth): requests to the api with an invalid token receive a response status 500 instead of 401
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `timed/authentication.py`
Content:
```
1 import base64
2 import functools
3 import hashlib
4
5 import requests
6 from django.conf import settings
7 from django.core.cache import cache
8 from django.core.exceptions import SuspiciousOperation
9 from django.utils.encoding import force_bytes
10 from mozilla_django_oidc.auth import LOGGER, OIDCAuthenticationBackend
11
12
13 class TimedOIDCAuthenticationBackend(OIDCAuthenticationBackend):
14 def get_introspection(self, access_token, id_token, payload):
15 """Return user details dictionary."""
16
17 basic = base64.b64encode(
18 f"{settings.OIDC_RP_INTROSPECT_CLIENT_ID}:{settings.OIDC_RP_INTROSPECT_CLIENT_SECRET}".encode(
19 "utf-8"
20 )
21 ).decode()
22 headers = {
23 "Authorization": f"Basic {basic}",
24 "Content-Type": "application/x-www-form-urlencoded",
25 }
26 response = requests.post(
27 settings.OIDC_OP_INTROSPECT_ENDPOINT,
28 verify=settings.OIDC_VERIFY_SSL,
29 headers=headers,
30 data={"token": access_token},
31 )
32 response.raise_for_status()
33 return response.json()
34
35 def get_userinfo_or_introspection(self, access_token):
36 try:
37 claims = self.cached_request(
38 self.get_userinfo, access_token, "auth.userinfo"
39 )
40 except requests.HTTPError as e:
41 if not (
42 e.response.status_code in [401, 403] and settings.OIDC_CHECK_INTROSPECT
43 ):
44 raise e
45
46 # check introspection if userinfo fails (confidental client)
47 claims = self.cached_request(
48 self.get_introspection, access_token, "auth.introspection"
49 )
50 if "client_id" not in claims:
51 raise SuspiciousOperation("client_id not present in introspection")
52
53 return claims
54
55 def get_or_create_user(self, access_token, id_token, payload):
56 """Verify claims and return user, otherwise raise an Exception."""
57
58 claims = self.get_userinfo_or_introspection(access_token)
59
60 users = self.filter_users_by_claims(claims)
61
62 if len(users) == 1:
63 user = users.get()
64 self.update_user_from_claims(user, claims)
65 return user
66 elif settings.OIDC_CREATE_USER:
67 return self.create_user(claims)
68 else:
69 LOGGER.debug(
70 "Login failed: No user with username %s found, and "
71 "OIDC_CREATE_USER is False",
72 self.get_username(claims),
73 )
74 return None
75
76 def update_user_from_claims(self, user, claims):
77 user.email = claims.get(settings.OIDC_EMAIL_CLAIM, "")
78 user.first_name = claims.get(settings.OIDC_FIRSTNAME_CLAIM, "")
79 user.last_name = claims.get(settings.OIDC_LASTNAME_CLAIM, "")
80 user.save()
81
82 def filter_users_by_claims(self, claims):
83 username = self.get_username(claims)
84 return self.UserModel.objects.filter(username__iexact=username)
85
86 def cached_request(self, method, token, cache_prefix):
87 token_hash = hashlib.sha256(force_bytes(token)).hexdigest()
88
89 func = functools.partial(method, token, None, None)
90
91 return cache.get_or_set(
92 f"{cache_prefix}.{token_hash}",
93 func,
94 timeout=settings.OIDC_BEARER_TOKEN_REVALIDATION_TIME,
95 )
96
97 def create_user(self, claims):
98 """Return object for a newly created user account."""
99
100 username = self.get_username(claims)
101 email = claims.get(settings.OIDC_EMAIL_CLAIM, "")
102 first_name = claims.get(settings.OIDC_FIRSTNAME_CLAIM, "")
103 last_name = claims.get(settings.OIDC_LASTNAME_CLAIM, "")
104
105 return self.UserModel.objects.create(
106 username=username, email=email, first_name=first_name, last_name=last_name
107 )
108
109 def get_username(self, claims):
110 try:
111 return claims[settings.OIDC_USERNAME_CLAIM]
112 except KeyError:
113 raise SuspiciousOperation("Couldn't find username claim")
114
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/timed/authentication.py b/timed/authentication.py
--- a/timed/authentication.py
+++ b/timed/authentication.py
@@ -8,6 +8,7 @@
from django.core.exceptions import SuspiciousOperation
from django.utils.encoding import force_bytes
from mozilla_django_oidc.auth import LOGGER, OIDCAuthenticationBackend
+from rest_framework.exceptions import AuthenticationFailed
class TimedOIDCAuthenticationBackend(OIDCAuthenticationBackend):
@@ -37,20 +38,29 @@
claims = self.cached_request(
self.get_userinfo, access_token, "auth.userinfo"
)
+ return claims
except requests.HTTPError as e:
- if not (
- e.response.status_code in [401, 403] and settings.OIDC_CHECK_INTROSPECT
- ):
+ if e.response.status_code not in [401, 403]:
raise e
-
- # check introspection if userinfo fails (confidental client)
- claims = self.cached_request(
- self.get_introspection, access_token, "auth.introspection"
- )
- if "client_id" not in claims:
- raise SuspiciousOperation("client_id not present in introspection")
-
- return claims
+ if settings.OIDC_CHECK_INTROSPECT:
+ try:
+ # check introspection if userinfo fails (confidential client)
+ claims = self.cached_request(
+ self.get_introspection, access_token, "auth.introspection"
+ )
+ if "client_id" not in claims:
+ raise SuspiciousOperation(
+ "client_id not present in introspection"
+ )
+ return claims
+ except requests.HTTPError as e:
+ # if the authorization fails it's not a valid client or
+ # the token is expired and permission is denied.
+ # Handing on the 401 Client Error would be transformed into
+ # a 500 by Django's exception handling. But that's not what we want.
+ if e.response.status_code not in [401, 403]: # pragma: no cover
+ raise e
+ raise AuthenticationFailed()
def get_or_create_user(self, access_token, id_token, payload):
"""Verify claims and return user, otherwise raise an Exception."""
| {"golden_diff": "diff --git a/timed/authentication.py b/timed/authentication.py\n--- a/timed/authentication.py\n+++ b/timed/authentication.py\n@@ -8,6 +8,7 @@\n from django.core.exceptions import SuspiciousOperation\n from django.utils.encoding import force_bytes\n from mozilla_django_oidc.auth import LOGGER, OIDCAuthenticationBackend\n+from rest_framework.exceptions import AuthenticationFailed\n \n \n class TimedOIDCAuthenticationBackend(OIDCAuthenticationBackend):\n@@ -37,20 +38,29 @@\n claims = self.cached_request(\n self.get_userinfo, access_token, \"auth.userinfo\"\n )\n+ return claims\n except requests.HTTPError as e:\n- if not (\n- e.response.status_code in [401, 403] and settings.OIDC_CHECK_INTROSPECT\n- ):\n+ if e.response.status_code not in [401, 403]:\n raise e\n-\n- # check introspection if userinfo fails (confidental client)\n- claims = self.cached_request(\n- self.get_introspection, access_token, \"auth.introspection\"\n- )\n- if \"client_id\" not in claims:\n- raise SuspiciousOperation(\"client_id not present in introspection\")\n-\n- return claims\n+ if settings.OIDC_CHECK_INTROSPECT:\n+ try:\n+ # check introspection if userinfo fails (confidential client)\n+ claims = self.cached_request(\n+ self.get_introspection, access_token, \"auth.introspection\"\n+ )\n+ if \"client_id\" not in claims:\n+ raise SuspiciousOperation(\n+ \"client_id not present in introspection\"\n+ )\n+ return claims\n+ except requests.HTTPError as e:\n+ # if the authorization fails it's not a valid client or\n+ # the token is expired and permission is denied.\n+ # Handing on the 401 Client Error would be transformed into\n+ # a 500 by Django's exception handling. But that's not what we want.\n+ if e.response.status_code not in [401, 403]: # pragma: no cover\n+ raise e\n+ raise AuthenticationFailed()\n \n def get_or_create_user(self, access_token, id_token, payload):\n \"\"\"Verify claims and return user, otherwise raise an Exception.\"\"\"\n", "issue": "bug(auth): requests to the api with an invalid token receive a response status 500 instead of 401\n\n", "before_files": [{"content": "import base64\nimport functools\nimport hashlib\n\nimport requests\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom django.core.exceptions import SuspiciousOperation\nfrom django.utils.encoding import force_bytes\nfrom mozilla_django_oidc.auth import LOGGER, OIDCAuthenticationBackend\n\n\nclass TimedOIDCAuthenticationBackend(OIDCAuthenticationBackend):\n def get_introspection(self, access_token, id_token, payload):\n \"\"\"Return user details dictionary.\"\"\"\n\n basic = base64.b64encode(\n f\"{settings.OIDC_RP_INTROSPECT_CLIENT_ID}:{settings.OIDC_RP_INTROSPECT_CLIENT_SECRET}\".encode(\n \"utf-8\"\n )\n ).decode()\n headers = {\n \"Authorization\": f\"Basic {basic}\",\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n }\n response = requests.post(\n settings.OIDC_OP_INTROSPECT_ENDPOINT,\n verify=settings.OIDC_VERIFY_SSL,\n headers=headers,\n data={\"token\": access_token},\n )\n response.raise_for_status()\n return response.json()\n\n def get_userinfo_or_introspection(self, access_token):\n try:\n claims = self.cached_request(\n self.get_userinfo, access_token, \"auth.userinfo\"\n )\n except requests.HTTPError as e:\n if not (\n e.response.status_code in [401, 403] and settings.OIDC_CHECK_INTROSPECT\n ):\n raise e\n\n # check introspection if userinfo fails (confidental client)\n claims = self.cached_request(\n self.get_introspection, access_token, \"auth.introspection\"\n )\n if \"client_id\" not in claims:\n raise SuspiciousOperation(\"client_id not present in introspection\")\n\n return claims\n\n def get_or_create_user(self, access_token, id_token, payload):\n \"\"\"Verify claims and return user, otherwise raise an Exception.\"\"\"\n\n claims = self.get_userinfo_or_introspection(access_token)\n\n users = self.filter_users_by_claims(claims)\n\n if len(users) == 1:\n user = users.get()\n self.update_user_from_claims(user, claims)\n return user\n elif settings.OIDC_CREATE_USER:\n return self.create_user(claims)\n else:\n LOGGER.debug(\n \"Login failed: No user with username %s found, and \"\n \"OIDC_CREATE_USER is False\",\n self.get_username(claims),\n )\n return None\n\n def update_user_from_claims(self, user, claims):\n user.email = claims.get(settings.OIDC_EMAIL_CLAIM, \"\")\n user.first_name = claims.get(settings.OIDC_FIRSTNAME_CLAIM, \"\")\n user.last_name = claims.get(settings.OIDC_LASTNAME_CLAIM, \"\")\n user.save()\n\n def filter_users_by_claims(self, claims):\n username = self.get_username(claims)\n return self.UserModel.objects.filter(username__iexact=username)\n\n def cached_request(self, method, token, cache_prefix):\n token_hash = hashlib.sha256(force_bytes(token)).hexdigest()\n\n func = functools.partial(method, token, None, None)\n\n return cache.get_or_set(\n f\"{cache_prefix}.{token_hash}\",\n func,\n timeout=settings.OIDC_BEARER_TOKEN_REVALIDATION_TIME,\n )\n\n def create_user(self, claims):\n \"\"\"Return object for a newly created user account.\"\"\"\n\n username = self.get_username(claims)\n email = claims.get(settings.OIDC_EMAIL_CLAIM, \"\")\n first_name = claims.get(settings.OIDC_FIRSTNAME_CLAIM, \"\")\n last_name = claims.get(settings.OIDC_LASTNAME_CLAIM, \"\")\n\n return self.UserModel.objects.create(\n username=username, email=email, first_name=first_name, last_name=last_name\n )\n\n def get_username(self, claims):\n try:\n return claims[settings.OIDC_USERNAME_CLAIM]\n except KeyError:\n raise SuspiciousOperation(\"Couldn't find username claim\")\n", "path": "timed/authentication.py"}], "after_files": [{"content": "import base64\nimport functools\nimport hashlib\n\nimport requests\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom django.core.exceptions import SuspiciousOperation\nfrom django.utils.encoding import force_bytes\nfrom mozilla_django_oidc.auth import LOGGER, OIDCAuthenticationBackend\nfrom rest_framework.exceptions import AuthenticationFailed\n\n\nclass TimedOIDCAuthenticationBackend(OIDCAuthenticationBackend):\n def get_introspection(self, access_token, id_token, payload):\n \"\"\"Return user details dictionary.\"\"\"\n\n basic = base64.b64encode(\n f\"{settings.OIDC_RP_INTROSPECT_CLIENT_ID}:{settings.OIDC_RP_INTROSPECT_CLIENT_SECRET}\".encode(\n \"utf-8\"\n )\n ).decode()\n headers = {\n \"Authorization\": f\"Basic {basic}\",\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n }\n response = requests.post(\n settings.OIDC_OP_INTROSPECT_ENDPOINT,\n verify=settings.OIDC_VERIFY_SSL,\n headers=headers,\n data={\"token\": access_token},\n )\n response.raise_for_status()\n return response.json()\n\n def get_userinfo_or_introspection(self, access_token):\n try:\n claims = self.cached_request(\n self.get_userinfo, access_token, \"auth.userinfo\"\n )\n return claims\n except requests.HTTPError as e:\n if e.response.status_code not in [401, 403]:\n raise e\n if settings.OIDC_CHECK_INTROSPECT:\n try:\n # check introspection if userinfo fails (confidential client)\n claims = self.cached_request(\n self.get_introspection, access_token, \"auth.introspection\"\n )\n if \"client_id\" not in claims:\n raise SuspiciousOperation(\n \"client_id not present in introspection\"\n )\n return claims\n except requests.HTTPError as e:\n # if the authorization fails it's not a valid client or\n # the token is expired and permission is denied.\n # Handing on the 401 Client Error would be transformed into\n # a 500 by Django's exception handling. But that's not what we want.\n if e.response.status_code not in [401, 403]: # pragma: no cover\n raise e\n raise AuthenticationFailed()\n\n def get_or_create_user(self, access_token, id_token, payload):\n \"\"\"Verify claims and return user, otherwise raise an Exception.\"\"\"\n\n claims = self.get_userinfo_or_introspection(access_token)\n\n users = self.filter_users_by_claims(claims)\n\n if len(users) == 1:\n user = users.get()\n self.update_user_from_claims(user, claims)\n return user\n elif settings.OIDC_CREATE_USER:\n return self.create_user(claims)\n else:\n LOGGER.debug(\n \"Login failed: No user with username %s found, and \"\n \"OIDC_CREATE_USER is False\",\n self.get_username(claims),\n )\n return None\n\n def update_user_from_claims(self, user, claims):\n user.email = claims.get(settings.OIDC_EMAIL_CLAIM, \"\")\n user.first_name = claims.get(settings.OIDC_FIRSTNAME_CLAIM, \"\")\n user.last_name = claims.get(settings.OIDC_LASTNAME_CLAIM, \"\")\n user.save()\n\n def filter_users_by_claims(self, claims):\n username = self.get_username(claims)\n return self.UserModel.objects.filter(username__iexact=username)\n\n def cached_request(self, method, token, cache_prefix):\n token_hash = hashlib.sha256(force_bytes(token)).hexdigest()\n\n func = functools.partial(method, token, None, None)\n\n return cache.get_or_set(\n f\"{cache_prefix}.{token_hash}\",\n func,\n timeout=settings.OIDC_BEARER_TOKEN_REVALIDATION_TIME,\n )\n\n def create_user(self, claims):\n \"\"\"Return object for a newly created user account.\"\"\"\n\n username = self.get_username(claims)\n email = claims.get(settings.OIDC_EMAIL_CLAIM, \"\")\n first_name = claims.get(settings.OIDC_FIRSTNAME_CLAIM, \"\")\n last_name = claims.get(settings.OIDC_LASTNAME_CLAIM, \"\")\n\n return self.UserModel.objects.create(\n username=username, email=email, first_name=first_name, last_name=last_name\n )\n\n def get_username(self, claims):\n try:\n return claims[settings.OIDC_USERNAME_CLAIM]\n except KeyError:\n raise SuspiciousOperation(\"Couldn't find username claim\")\n", "path": "timed/authentication.py"}]} | 1,383 | 520 |
gh_patches_debug_20812 | rasdani/github-patches | git_diff | ipython__ipython-5202 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
node != nodejs within Debian packages
As part of resolving https://github.com/ipython/nbviewer/issues/196, (and https://github.com/ipython/nbviewer/pull/194), @ahmadia and I ended up finding out that Debian based Linux Distributions build the `node` binary as `nodejs`.
IPython nbconvert defaults to using `node`, which is actually `ax25-node` on Debian based systems. [See relevant posting on the Debian mailing list for more](https://lists.debian.org/debian-devel-announce/2012/07/msg00002.html).
This won't affect users of nvm (who provide `node`) or those who build from source. This will affect certain strains of Ubuntu (Saucy Salamander was what I used to test).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `IPython/nbconvert/filters/markdown.py`
Content:
```
1 """Markdown filters
2 This file contains a collection of utility filters for dealing with
3 markdown within Jinja templates.
4 """
5 #-----------------------------------------------------------------------------
6 # Copyright (c) 2013, the IPython Development Team.
7 #
8 # Distributed under the terms of the Modified BSD License.
9 #
10 # The full license is in the file COPYING.txt, distributed with this software.
11 #-----------------------------------------------------------------------------
12
13 #-----------------------------------------------------------------------------
14 # Imports
15 #-----------------------------------------------------------------------------
16 from __future__ import print_function
17
18 # Stdlib imports
19 import os
20 import subprocess
21 from io import TextIOWrapper, BytesIO
22
23 # IPython imports
24 from IPython.nbconvert.utils.pandoc import pandoc
25 from IPython.nbconvert.utils.exceptions import ConversionException
26 from IPython.utils.process import find_cmd, FindCmdError
27 from IPython.utils.py3compat import cast_bytes
28
29 #-----------------------------------------------------------------------------
30 # Functions
31 #-----------------------------------------------------------------------------
32 marked = os.path.join(os.path.dirname(__file__), "marked.js")
33
34 __all__ = [
35 'markdown2html',
36 'markdown2html_pandoc',
37 'markdown2html_marked',
38 'markdown2latex',
39 'markdown2rst',
40 ]
41
42 class NodeJSMissing(ConversionException):
43 """Exception raised when node.js is missing."""
44 pass
45
46 def markdown2latex(source):
47 """Convert a markdown string to LaTeX via pandoc.
48
49 This function will raise an error if pandoc is not installed.
50 Any error messages generated by pandoc are printed to stderr.
51
52 Parameters
53 ----------
54 source : string
55 Input string, assumed to be valid markdown.
56
57 Returns
58 -------
59 out : string
60 Output as returned by pandoc.
61 """
62 return pandoc(source, 'markdown', 'latex')
63
64 def markdown2html_pandoc(source):
65 """Convert a markdown string to HTML via pandoc"""
66 return pandoc(source, 'markdown', 'html', extra_args=['--mathjax'])
67
68 def markdown2html_marked(source, encoding='utf-8'):
69 """Convert a markdown string to HTML via marked"""
70 command = ['node', marked]
71 try:
72 p = subprocess.Popen(command,
73 stdin=subprocess.PIPE, stdout=subprocess.PIPE
74 )
75 except OSError as e:
76 raise NodeJSMissing(
77 "The command '%s' returned an error: %s.\n" % (" ".join(command), e) +
78 "Please check that Node.js is installed."
79 )
80 out, _ = p.communicate(cast_bytes(source, encoding))
81 out = TextIOWrapper(BytesIO(out), encoding, 'replace').read()
82 return out.rstrip('\n')
83
84 def markdown2rst(source):
85 """Convert a markdown string to LaTeX via pandoc.
86
87 This function will raise an error if pandoc is not installed.
88 Any error messages generated by pandoc are printed to stderr.
89
90 Parameters
91 ----------
92 source : string
93 Input string, assumed to be valid markdown.
94
95 Returns
96 -------
97 out : string
98 Output as returned by pandoc.
99 """
100 return pandoc(source, 'markdown', 'rst')
101
102 try:
103 find_cmd('node')
104 except FindCmdError:
105 markdown2html = markdown2html_pandoc
106 else:
107 markdown2html = markdown2html_marked
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/IPython/nbconvert/filters/markdown.py b/IPython/nbconvert/filters/markdown.py
--- a/IPython/nbconvert/filters/markdown.py
+++ b/IPython/nbconvert/filters/markdown.py
@@ -67,7 +67,7 @@
def markdown2html_marked(source, encoding='utf-8'):
"""Convert a markdown string to HTML via marked"""
- command = ['node', marked]
+ command = [node_cmd, marked]
try:
p = subprocess.Popen(command,
stdin=subprocess.PIPE, stdout=subprocess.PIPE
@@ -99,9 +99,18 @@
"""
return pandoc(source, 'markdown', 'rst')
+# prefer md2html via marked if node.js is available
+# node is called nodejs on debian, so try that first
+node_cmd = 'nodejs'
try:
- find_cmd('node')
+ find_cmd(node_cmd)
except FindCmdError:
- markdown2html = markdown2html_pandoc
+ node_cmd = 'node'
+ try:
+ find_cmd(node_cmd)
+ except FindCmdError:
+ markdown2html = markdown2html_pandoc
+ else:
+ markdown2html = markdown2html_marked
else:
markdown2html = markdown2html_marked
| {"golden_diff": "diff --git a/IPython/nbconvert/filters/markdown.py b/IPython/nbconvert/filters/markdown.py\n--- a/IPython/nbconvert/filters/markdown.py\n+++ b/IPython/nbconvert/filters/markdown.py\n@@ -67,7 +67,7 @@\n \n def markdown2html_marked(source, encoding='utf-8'):\n \"\"\"Convert a markdown string to HTML via marked\"\"\"\n- command = ['node', marked]\n+ command = [node_cmd, marked]\n try:\n p = subprocess.Popen(command,\n stdin=subprocess.PIPE, stdout=subprocess.PIPE\n@@ -99,9 +99,18 @@\n \"\"\"\n return pandoc(source, 'markdown', 'rst')\n \n+# prefer md2html via marked if node.js is available\n+# node is called nodejs on debian, so try that first\n+node_cmd = 'nodejs'\n try:\n- find_cmd('node')\n+ find_cmd(node_cmd)\n except FindCmdError:\n- markdown2html = markdown2html_pandoc\n+ node_cmd = 'node'\n+ try:\n+ find_cmd(node_cmd)\n+ except FindCmdError:\n+ markdown2html = markdown2html_pandoc\n+ else:\n+ markdown2html = markdown2html_marked\n else:\n markdown2html = markdown2html_marked\n", "issue": "node != nodejs within Debian packages\nAs part of resolving https://github.com/ipython/nbviewer/issues/196, (and https://github.com/ipython/nbviewer/pull/194), @ahmadia and I ended up finding out that Debian based Linux Distributions build the `node` binary as `nodejs`.\n\nIPython nbconvert defaults to using `node`, which is actually `ax25-node` on Debian based systems. [See relevant posting on the Debian mailing list for more](https://lists.debian.org/debian-devel-announce/2012/07/msg00002.html).\n\nThis won't affect users of nvm (who provide `node`) or those who build from source. This will affect certain strains of Ubuntu (Saucy Salamander was what I used to test).\n\n", "before_files": [{"content": "\"\"\"Markdown filters\nThis file contains a collection of utility filters for dealing with \nmarkdown within Jinja templates.\n\"\"\"\n#-----------------------------------------------------------------------------\n# Copyright (c) 2013, the IPython Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\nfrom __future__ import print_function\n\n# Stdlib imports\nimport os\nimport subprocess\nfrom io import TextIOWrapper, BytesIO\n\n# IPython imports\nfrom IPython.nbconvert.utils.pandoc import pandoc\nfrom IPython.nbconvert.utils.exceptions import ConversionException\nfrom IPython.utils.process import find_cmd, FindCmdError\nfrom IPython.utils.py3compat import cast_bytes\n\n#-----------------------------------------------------------------------------\n# Functions\n#-----------------------------------------------------------------------------\nmarked = os.path.join(os.path.dirname(__file__), \"marked.js\")\n\n__all__ = [\n 'markdown2html',\n 'markdown2html_pandoc',\n 'markdown2html_marked',\n 'markdown2latex',\n 'markdown2rst',\n]\n\nclass NodeJSMissing(ConversionException):\n \"\"\"Exception raised when node.js is missing.\"\"\"\n pass\n\ndef markdown2latex(source):\n \"\"\"Convert a markdown string to LaTeX via pandoc.\n\n This function will raise an error if pandoc is not installed.\n Any error messages generated by pandoc are printed to stderr.\n\n Parameters\n ----------\n source : string\n Input string, assumed to be valid markdown.\n\n Returns\n -------\n out : string\n Output as returned by pandoc.\n \"\"\"\n return pandoc(source, 'markdown', 'latex')\n\ndef markdown2html_pandoc(source):\n \"\"\"Convert a markdown string to HTML via pandoc\"\"\"\n return pandoc(source, 'markdown', 'html', extra_args=['--mathjax'])\n\ndef markdown2html_marked(source, encoding='utf-8'):\n \"\"\"Convert a markdown string to HTML via marked\"\"\"\n command = ['node', marked]\n try:\n p = subprocess.Popen(command,\n stdin=subprocess.PIPE, stdout=subprocess.PIPE\n )\n except OSError as e:\n raise NodeJSMissing(\n \"The command '%s' returned an error: %s.\\n\" % (\" \".join(command), e) +\n \"Please check that Node.js is installed.\"\n )\n out, _ = p.communicate(cast_bytes(source, encoding))\n out = TextIOWrapper(BytesIO(out), encoding, 'replace').read()\n return out.rstrip('\\n')\n\ndef markdown2rst(source):\n \"\"\"Convert a markdown string to LaTeX via pandoc.\n\n This function will raise an error if pandoc is not installed.\n Any error messages generated by pandoc are printed to stderr.\n\n Parameters\n ----------\n source : string\n Input string, assumed to be valid markdown.\n\n Returns\n -------\n out : string\n Output as returned by pandoc.\n \"\"\"\n return pandoc(source, 'markdown', 'rst')\n\ntry:\n find_cmd('node')\nexcept FindCmdError:\n markdown2html = markdown2html_pandoc\nelse:\n markdown2html = markdown2html_marked\n", "path": "IPython/nbconvert/filters/markdown.py"}], "after_files": [{"content": "\"\"\"Markdown filters\nThis file contains a collection of utility filters for dealing with \nmarkdown within Jinja templates.\n\"\"\"\n#-----------------------------------------------------------------------------\n# Copyright (c) 2013, the IPython Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\nfrom __future__ import print_function\n\n# Stdlib imports\nimport os\nimport subprocess\nfrom io import TextIOWrapper, BytesIO\n\n# IPython imports\nfrom IPython.nbconvert.utils.pandoc import pandoc\nfrom IPython.nbconvert.utils.exceptions import ConversionException\nfrom IPython.utils.process import find_cmd, FindCmdError\nfrom IPython.utils.py3compat import cast_bytes\n\n#-----------------------------------------------------------------------------\n# Functions\n#-----------------------------------------------------------------------------\nmarked = os.path.join(os.path.dirname(__file__), \"marked.js\")\n\n__all__ = [\n 'markdown2html',\n 'markdown2html_pandoc',\n 'markdown2html_marked',\n 'markdown2latex',\n 'markdown2rst',\n]\n\nclass NodeJSMissing(ConversionException):\n \"\"\"Exception raised when node.js is missing.\"\"\"\n pass\n\ndef markdown2latex(source):\n \"\"\"Convert a markdown string to LaTeX via pandoc.\n\n This function will raise an error if pandoc is not installed.\n Any error messages generated by pandoc are printed to stderr.\n\n Parameters\n ----------\n source : string\n Input string, assumed to be valid markdown.\n\n Returns\n -------\n out : string\n Output as returned by pandoc.\n \"\"\"\n return pandoc(source, 'markdown', 'latex')\n\ndef markdown2html_pandoc(source):\n \"\"\"Convert a markdown string to HTML via pandoc\"\"\"\n return pandoc(source, 'markdown', 'html', extra_args=['--mathjax'])\n\ndef markdown2html_marked(source, encoding='utf-8'):\n \"\"\"Convert a markdown string to HTML via marked\"\"\"\n command = [node_cmd, marked]\n try:\n p = subprocess.Popen(command,\n stdin=subprocess.PIPE, stdout=subprocess.PIPE\n )\n except OSError as e:\n raise NodeJSMissing(\n \"The command '%s' returned an error: %s.\\n\" % (\" \".join(command), e) +\n \"Please check that Node.js is installed.\"\n )\n out, _ = p.communicate(cast_bytes(source, encoding))\n out = TextIOWrapper(BytesIO(out), encoding, 'replace').read()\n return out.rstrip('\\n')\n\ndef markdown2rst(source):\n \"\"\"Convert a markdown string to LaTeX via pandoc.\n\n This function will raise an error if pandoc is not installed.\n Any error messages generated by pandoc are printed to stderr.\n\n Parameters\n ----------\n source : string\n Input string, assumed to be valid markdown.\n\n Returns\n -------\n out : string\n Output as returned by pandoc.\n \"\"\"\n return pandoc(source, 'markdown', 'rst')\n\n# prefer md2html via marked if node.js is available\n# node is called nodejs on debian, so try that first\nnode_cmd = 'nodejs'\ntry:\n find_cmd(node_cmd)\nexcept FindCmdError:\n node_cmd = 'node'\n try:\n find_cmd(node_cmd)\n except FindCmdError:\n markdown2html = markdown2html_pandoc\n else:\n markdown2html = markdown2html_marked\nelse:\n markdown2html = markdown2html_marked\n", "path": "IPython/nbconvert/filters/markdown.py"}]} | 1,345 | 293 |
gh_patches_debug_1512 | rasdani/github-patches | git_diff | Mailu__Mailu-2034 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Timezone in containers
Hello,
I have seen that timezones are not set for containers. This causes logs to have wrong timestamps and email "received" headers have wrong timezones.
It should be possible to set the timezone for all containers. The setting should be exposed and described in the .env file.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/admin/mailu/configuration.py`
Content:
```
1 import os
2
3 from datetime import timedelta
4 from socrate import system
5 import ipaddress
6
7 DEFAULT_CONFIG = {
8 # Specific to the admin UI
9 'DOCKER_SOCKET': 'unix:///var/run/docker.sock',
10 'BABEL_DEFAULT_LOCALE': 'en',
11 'BABEL_DEFAULT_TIMEZONE': 'UTC',
12 'BOOTSTRAP_SERVE_LOCAL': True,
13 'RATELIMIT_STORAGE_URL': '',
14 'QUOTA_STORAGE_URL': '',
15 'DEBUG': False,
16 'DOMAIN_REGISTRATION': False,
17 'TEMPLATES_AUTO_RELOAD': True,
18 'MEMORY_SESSIONS': False,
19 # Database settings
20 'DB_FLAVOR': None,
21 'DB_USER': 'mailu',
22 'DB_PW': None,
23 'DB_HOST': 'database',
24 'DB_NAME': 'mailu',
25 'SQLITE_DATABASE_FILE':'data/main.db',
26 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db',
27 'SQLALCHEMY_TRACK_MODIFICATIONS': False,
28 # Statistics management
29 'INSTANCE_ID_PATH': '/data/instance',
30 'STATS_ENDPOINT': '18.{}.stats.mailu.io',
31 # Common configuration variables
32 'SECRET_KEY': 'changeMe',
33 'DOMAIN': 'mailu.io',
34 'HOSTNAMES': 'mail.mailu.io,alternative.mailu.io,yetanother.mailu.io',
35 'POSTMASTER': 'postmaster',
36 'WILDCARD_SENDERS': '',
37 'TLS_FLAVOR': 'cert',
38 'INBOUND_TLS_ENFORCE': False,
39 'DEFER_ON_TLS_ERROR': True,
40 'AUTH_RATELIMIT_IP': '60/hour',
41 'AUTH_RATELIMIT_IP_V4_MASK': 24,
42 'AUTH_RATELIMIT_IP_V6_MASK': 56,
43 'AUTH_RATELIMIT_USER': '100/day',
44 'AUTH_RATELIMIT_EXEMPTION': '',
45 'AUTH_RATELIMIT_EXEMPTION_LENGTH': 86400,
46 'DISABLE_STATISTICS': False,
47 # Mail settings
48 'DMARC_RUA': None,
49 'DMARC_RUF': None,
50 'WELCOME': False,
51 'WELCOME_SUBJECT': 'Dummy welcome topic',
52 'WELCOME_BODY': 'Dummy welcome body',
53 'DKIM_SELECTOR': 'dkim',
54 'DKIM_PATH': '/dkim/{domain}.{selector}.key',
55 'DEFAULT_QUOTA': 1000000000,
56 'MESSAGE_RATELIMIT': '200/day',
57 'RECIPIENT_DELIMITER': '',
58 # Web settings
59 'SITENAME': 'Mailu',
60 'WEBSITE': 'https://mailu.io',
61 'ADMIN' : 'none',
62 'WEB_ADMIN': '/admin',
63 'WEB_WEBMAIL': '/webmail',
64 'WEBMAIL': 'none',
65 'RECAPTCHA_PUBLIC_KEY': '',
66 'RECAPTCHA_PRIVATE_KEY': '',
67 'LOGO_URL': None,
68 'LOGO_BACKGROUND': None,
69 # Advanced settings
70 'LOG_LEVEL': 'WARNING',
71 'SESSION_KEY_BITS': 128,
72 'SESSION_LIFETIME': 24,
73 'SESSION_COOKIE_SECURE': True,
74 'CREDENTIAL_ROUNDS': 12,
75 # Host settings
76 'HOST_IMAP': 'imap',
77 'HOST_LMTP': 'imap:2525',
78 'HOST_POP3': 'imap',
79 'HOST_SMTP': 'smtp',
80 'HOST_AUTHSMTP': 'smtp',
81 'HOST_ADMIN': 'admin',
82 'HOST_WEBMAIL': 'webmail',
83 'HOST_WEBDAV': 'webdav:5232',
84 'HOST_REDIS': 'redis',
85 'HOST_FRONT': 'front',
86 'SUBNET': '192.168.203.0/24',
87 'SUBNET6': None,
88 'POD_ADDRESS_RANGE': None
89 }
90
91 class ConfigManager(dict):
92 """ Naive configuration manager that uses environment only
93 """
94
95 DB_TEMPLATES = {
96 'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}',
97 'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',
98 'mysql': 'mysql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}'
99 }
100
101 def __init__(self):
102 self.config = dict()
103
104 def get_host_address(self, name):
105 # if MYSERVICE_ADDRESS is defined, use this
106 if '{}_ADDRESS'.format(name) in os.environ:
107 return os.environ.get('{}_ADDRESS'.format(name))
108 # otherwise use the host name and resolve it
109 return system.resolve_address(self.config['HOST_{}'.format(name)])
110
111 def resolve_hosts(self):
112 self.config["IMAP_ADDRESS"] = self.get_host_address("IMAP")
113 self.config["POP3_ADDRESS"] = self.get_host_address("POP3")
114 self.config["AUTHSMTP_ADDRESS"] = self.get_host_address("AUTHSMTP")
115 self.config["SMTP_ADDRESS"] = self.get_host_address("SMTP")
116 self.config["REDIS_ADDRESS"] = self.get_host_address("REDIS")
117 if self.config["WEBMAIL"] != "none":
118 self.config["WEBMAIL_ADDRESS"] = self.get_host_address("WEBMAIL")
119
120 def __get_env(self, key, value):
121 key_file = key + "_FILE"
122 if key_file in os.environ:
123 with open(os.environ.get(key_file)) as file:
124 value_from_file = file.read()
125 return value_from_file.strip()
126 else:
127 return os.environ.get(key, value)
128
129 def __coerce_value(self, value):
130 if isinstance(value, str) and value.lower() in ('true','yes'):
131 return True
132 elif isinstance(value, str) and value.lower() in ('false', 'no'):
133 return False
134 return value
135
136 def init_app(self, app):
137 self.config.update(app.config)
138 # get environment variables
139 self.config.update({
140 key: self.__coerce_value(self.__get_env(key, value))
141 for key, value in DEFAULT_CONFIG.items()
142 })
143 self.resolve_hosts()
144
145 # automatically set the sqlalchemy string
146 if self.config['DB_FLAVOR']:
147 template = self.DB_TEMPLATES[self.config['DB_FLAVOR']]
148 self.config['SQLALCHEMY_DATABASE_URI'] = template.format(**self.config)
149
150 self.config['RATELIMIT_STORAGE_URL'] = 'redis://{0}/2'.format(self.config['REDIS_ADDRESS'])
151 self.config['QUOTA_STORAGE_URL'] = 'redis://{0}/1'.format(self.config['REDIS_ADDRESS'])
152 self.config['SESSION_STORAGE_URL'] = 'redis://{0}/3'.format(self.config['REDIS_ADDRESS'])
153 self.config['SESSION_COOKIE_SAMESITE'] = 'Strict'
154 self.config['SESSION_COOKIE_HTTPONLY'] = True
155 self.config['PERMANENT_SESSION_LIFETIME'] = timedelta(hours=int(self.config['SESSION_LIFETIME']))
156 hostnames = [host.strip() for host in self.config['HOSTNAMES'].split(',')]
157 self.config['AUTH_RATELIMIT_EXEMPTION'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['AUTH_RATELIMIT_EXEMPTION'].split(',')) if cidr)
158 self.config['HOSTNAMES'] = ','.join(hostnames)
159 self.config['HOSTNAME'] = hostnames[0]
160 # update the app config itself
161 app.config = self
162
163 def setdefault(self, key, value):
164 if key not in self.config:
165 self.config[key] = value
166 return self.config[key]
167
168 def get(self, *args):
169 return self.config.get(*args)
170
171 def keys(self):
172 return self.config.keys()
173
174 def __getitem__(self, key):
175 return self.config.get(key)
176
177 def __setitem__(self, key, value):
178 self.config[key] = value
179
180 def __contains__(self, key):
181 return key in self.config
182
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py
--- a/core/admin/mailu/configuration.py
+++ b/core/admin/mailu/configuration.py
@@ -72,6 +72,7 @@
'SESSION_LIFETIME': 24,
'SESSION_COOKIE_SECURE': True,
'CREDENTIAL_ROUNDS': 12,
+ 'TZ': 'Etc/UTC',
# Host settings
'HOST_IMAP': 'imap',
'HOST_LMTP': 'imap:2525',
| {"golden_diff": "diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py\n--- a/core/admin/mailu/configuration.py\n+++ b/core/admin/mailu/configuration.py\n@@ -72,6 +72,7 @@\n 'SESSION_LIFETIME': 24,\n 'SESSION_COOKIE_SECURE': True,\n 'CREDENTIAL_ROUNDS': 12,\n+ 'TZ': 'Etc/UTC',\n # Host settings\n 'HOST_IMAP': 'imap',\n 'HOST_LMTP': 'imap:2525',\n", "issue": "Timezone in containers\nHello,\r\n\r\nI have seen that timezones are not set for containers. This causes logs to have wrong timestamps and email \"received\" headers have wrong timezones.\r\n\r\nIt should be possible to set the timezone for all containers. The setting should be exposed and described in the .env file.\n", "before_files": [{"content": "import os\n\nfrom datetime import timedelta\nfrom socrate import system\nimport ipaddress\n\nDEFAULT_CONFIG = {\n # Specific to the admin UI\n 'DOCKER_SOCKET': 'unix:///var/run/docker.sock',\n 'BABEL_DEFAULT_LOCALE': 'en',\n 'BABEL_DEFAULT_TIMEZONE': 'UTC',\n 'BOOTSTRAP_SERVE_LOCAL': True,\n 'RATELIMIT_STORAGE_URL': '',\n 'QUOTA_STORAGE_URL': '',\n 'DEBUG': False,\n 'DOMAIN_REGISTRATION': False,\n 'TEMPLATES_AUTO_RELOAD': True,\n 'MEMORY_SESSIONS': False,\n # Database settings\n 'DB_FLAVOR': None,\n 'DB_USER': 'mailu',\n 'DB_PW': None,\n 'DB_HOST': 'database',\n 'DB_NAME': 'mailu',\n 'SQLITE_DATABASE_FILE':'data/main.db',\n 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db',\n 'SQLALCHEMY_TRACK_MODIFICATIONS': False,\n # Statistics management\n 'INSTANCE_ID_PATH': '/data/instance',\n 'STATS_ENDPOINT': '18.{}.stats.mailu.io',\n # Common configuration variables\n 'SECRET_KEY': 'changeMe',\n 'DOMAIN': 'mailu.io',\n 'HOSTNAMES': 'mail.mailu.io,alternative.mailu.io,yetanother.mailu.io',\n 'POSTMASTER': 'postmaster',\n 'WILDCARD_SENDERS': '',\n 'TLS_FLAVOR': 'cert',\n 'INBOUND_TLS_ENFORCE': False,\n 'DEFER_ON_TLS_ERROR': True,\n 'AUTH_RATELIMIT_IP': '60/hour',\n 'AUTH_RATELIMIT_IP_V4_MASK': 24,\n 'AUTH_RATELIMIT_IP_V6_MASK': 56,\n 'AUTH_RATELIMIT_USER': '100/day',\n 'AUTH_RATELIMIT_EXEMPTION': '',\n 'AUTH_RATELIMIT_EXEMPTION_LENGTH': 86400,\n 'DISABLE_STATISTICS': False,\n # Mail settings\n 'DMARC_RUA': None,\n 'DMARC_RUF': None,\n 'WELCOME': False,\n 'WELCOME_SUBJECT': 'Dummy welcome topic',\n 'WELCOME_BODY': 'Dummy welcome body',\n 'DKIM_SELECTOR': 'dkim',\n 'DKIM_PATH': '/dkim/{domain}.{selector}.key',\n 'DEFAULT_QUOTA': 1000000000,\n 'MESSAGE_RATELIMIT': '200/day',\n 'RECIPIENT_DELIMITER': '',\n # Web settings\n 'SITENAME': 'Mailu',\n 'WEBSITE': 'https://mailu.io',\n 'ADMIN' : 'none',\n 'WEB_ADMIN': '/admin',\n 'WEB_WEBMAIL': '/webmail',\n 'WEBMAIL': 'none',\n 'RECAPTCHA_PUBLIC_KEY': '',\n 'RECAPTCHA_PRIVATE_KEY': '',\n 'LOGO_URL': None,\n 'LOGO_BACKGROUND': None,\n # Advanced settings\n 'LOG_LEVEL': 'WARNING',\n 'SESSION_KEY_BITS': 128,\n 'SESSION_LIFETIME': 24,\n 'SESSION_COOKIE_SECURE': True,\n 'CREDENTIAL_ROUNDS': 12,\n # Host settings\n 'HOST_IMAP': 'imap',\n 'HOST_LMTP': 'imap:2525',\n 'HOST_POP3': 'imap',\n 'HOST_SMTP': 'smtp',\n 'HOST_AUTHSMTP': 'smtp',\n 'HOST_ADMIN': 'admin',\n 'HOST_WEBMAIL': 'webmail',\n 'HOST_WEBDAV': 'webdav:5232',\n 'HOST_REDIS': 'redis',\n 'HOST_FRONT': 'front',\n 'SUBNET': '192.168.203.0/24',\n 'SUBNET6': None,\n 'POD_ADDRESS_RANGE': None\n}\n\nclass ConfigManager(dict):\n \"\"\" Naive configuration manager that uses environment only\n \"\"\"\n\n DB_TEMPLATES = {\n 'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}',\n 'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',\n 'mysql': 'mysql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}'\n }\n\n def __init__(self):\n self.config = dict()\n\n def get_host_address(self, name):\n # if MYSERVICE_ADDRESS is defined, use this\n if '{}_ADDRESS'.format(name) in os.environ:\n return os.environ.get('{}_ADDRESS'.format(name))\n # otherwise use the host name and resolve it\n return system.resolve_address(self.config['HOST_{}'.format(name)])\n\n def resolve_hosts(self):\n self.config[\"IMAP_ADDRESS\"] = self.get_host_address(\"IMAP\")\n self.config[\"POP3_ADDRESS\"] = self.get_host_address(\"POP3\")\n self.config[\"AUTHSMTP_ADDRESS\"] = self.get_host_address(\"AUTHSMTP\")\n self.config[\"SMTP_ADDRESS\"] = self.get_host_address(\"SMTP\")\n self.config[\"REDIS_ADDRESS\"] = self.get_host_address(\"REDIS\")\n if self.config[\"WEBMAIL\"] != \"none\":\n self.config[\"WEBMAIL_ADDRESS\"] = self.get_host_address(\"WEBMAIL\")\n\n def __get_env(self, key, value):\n key_file = key + \"_FILE\"\n if key_file in os.environ:\n with open(os.environ.get(key_file)) as file:\n value_from_file = file.read()\n return value_from_file.strip()\n else:\n return os.environ.get(key, value)\n\n def __coerce_value(self, value):\n if isinstance(value, str) and value.lower() in ('true','yes'):\n return True\n elif isinstance(value, str) and value.lower() in ('false', 'no'):\n return False\n return value\n\n def init_app(self, app):\n self.config.update(app.config)\n # get environment variables\n self.config.update({\n key: self.__coerce_value(self.__get_env(key, value))\n for key, value in DEFAULT_CONFIG.items()\n })\n self.resolve_hosts()\n\n # automatically set the sqlalchemy string\n if self.config['DB_FLAVOR']:\n template = self.DB_TEMPLATES[self.config['DB_FLAVOR']]\n self.config['SQLALCHEMY_DATABASE_URI'] = template.format(**self.config)\n\n self.config['RATELIMIT_STORAGE_URL'] = 'redis://{0}/2'.format(self.config['REDIS_ADDRESS'])\n self.config['QUOTA_STORAGE_URL'] = 'redis://{0}/1'.format(self.config['REDIS_ADDRESS'])\n self.config['SESSION_STORAGE_URL'] = 'redis://{0}/3'.format(self.config['REDIS_ADDRESS'])\n self.config['SESSION_COOKIE_SAMESITE'] = 'Strict'\n self.config['SESSION_COOKIE_HTTPONLY'] = True\n self.config['PERMANENT_SESSION_LIFETIME'] = timedelta(hours=int(self.config['SESSION_LIFETIME']))\n hostnames = [host.strip() for host in self.config['HOSTNAMES'].split(',')]\n self.config['AUTH_RATELIMIT_EXEMPTION'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['AUTH_RATELIMIT_EXEMPTION'].split(',')) if cidr)\n self.config['HOSTNAMES'] = ','.join(hostnames)\n self.config['HOSTNAME'] = hostnames[0]\n # update the app config itself\n app.config = self\n\n def setdefault(self, key, value):\n if key not in self.config:\n self.config[key] = value\n return self.config[key]\n\n def get(self, *args):\n return self.config.get(*args)\n\n def keys(self):\n return self.config.keys()\n\n def __getitem__(self, key):\n return self.config.get(key)\n\n def __setitem__(self, key, value):\n self.config[key] = value\n\n def __contains__(self, key):\n return key in self.config\n", "path": "core/admin/mailu/configuration.py"}], "after_files": [{"content": "import os\n\nfrom datetime import timedelta\nfrom socrate import system\nimport ipaddress\n\nDEFAULT_CONFIG = {\n # Specific to the admin UI\n 'DOCKER_SOCKET': 'unix:///var/run/docker.sock',\n 'BABEL_DEFAULT_LOCALE': 'en',\n 'BABEL_DEFAULT_TIMEZONE': 'UTC',\n 'BOOTSTRAP_SERVE_LOCAL': True,\n 'RATELIMIT_STORAGE_URL': '',\n 'QUOTA_STORAGE_URL': '',\n 'DEBUG': False,\n 'DOMAIN_REGISTRATION': False,\n 'TEMPLATES_AUTO_RELOAD': True,\n 'MEMORY_SESSIONS': False,\n # Database settings\n 'DB_FLAVOR': None,\n 'DB_USER': 'mailu',\n 'DB_PW': None,\n 'DB_HOST': 'database',\n 'DB_NAME': 'mailu',\n 'SQLITE_DATABASE_FILE':'data/main.db',\n 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db',\n 'SQLALCHEMY_TRACK_MODIFICATIONS': False,\n # Statistics management\n 'INSTANCE_ID_PATH': '/data/instance',\n 'STATS_ENDPOINT': '18.{}.stats.mailu.io',\n # Common configuration variables\n 'SECRET_KEY': 'changeMe',\n 'DOMAIN': 'mailu.io',\n 'HOSTNAMES': 'mail.mailu.io,alternative.mailu.io,yetanother.mailu.io',\n 'POSTMASTER': 'postmaster',\n 'WILDCARD_SENDERS': '',\n 'TLS_FLAVOR': 'cert',\n 'INBOUND_TLS_ENFORCE': False,\n 'DEFER_ON_TLS_ERROR': True,\n 'AUTH_RATELIMIT_IP': '60/hour',\n 'AUTH_RATELIMIT_IP_V4_MASK': 24,\n 'AUTH_RATELIMIT_IP_V6_MASK': 56,\n 'AUTH_RATELIMIT_USER': '100/day',\n 'AUTH_RATELIMIT_EXEMPTION': '',\n 'AUTH_RATELIMIT_EXEMPTION_LENGTH': 86400,\n 'DISABLE_STATISTICS': False,\n # Mail settings\n 'DMARC_RUA': None,\n 'DMARC_RUF': None,\n 'WELCOME': False,\n 'WELCOME_SUBJECT': 'Dummy welcome topic',\n 'WELCOME_BODY': 'Dummy welcome body',\n 'DKIM_SELECTOR': 'dkim',\n 'DKIM_PATH': '/dkim/{domain}.{selector}.key',\n 'DEFAULT_QUOTA': 1000000000,\n 'MESSAGE_RATELIMIT': '200/day',\n 'RECIPIENT_DELIMITER': '',\n # Web settings\n 'SITENAME': 'Mailu',\n 'WEBSITE': 'https://mailu.io',\n 'ADMIN' : 'none',\n 'WEB_ADMIN': '/admin',\n 'WEB_WEBMAIL': '/webmail',\n 'WEBMAIL': 'none',\n 'RECAPTCHA_PUBLIC_KEY': '',\n 'RECAPTCHA_PRIVATE_KEY': '',\n 'LOGO_URL': None,\n 'LOGO_BACKGROUND': None,\n # Advanced settings\n 'LOG_LEVEL': 'WARNING',\n 'SESSION_KEY_BITS': 128,\n 'SESSION_LIFETIME': 24,\n 'SESSION_COOKIE_SECURE': True,\n 'CREDENTIAL_ROUNDS': 12,\n 'TZ': 'Etc/UTC',\n # Host settings\n 'HOST_IMAP': 'imap',\n 'HOST_LMTP': 'imap:2525',\n 'HOST_POP3': 'imap',\n 'HOST_SMTP': 'smtp',\n 'HOST_AUTHSMTP': 'smtp',\n 'HOST_ADMIN': 'admin',\n 'HOST_WEBMAIL': 'webmail',\n 'HOST_WEBDAV': 'webdav:5232',\n 'HOST_REDIS': 'redis',\n 'HOST_FRONT': 'front',\n 'SUBNET': '192.168.203.0/24',\n 'SUBNET6': None,\n 'POD_ADDRESS_RANGE': None\n}\n\nclass ConfigManager(dict):\n \"\"\" Naive configuration manager that uses environment only\n \"\"\"\n\n DB_TEMPLATES = {\n 'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}',\n 'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',\n 'mysql': 'mysql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}'\n }\n\n def __init__(self):\n self.config = dict()\n\n def get_host_address(self, name):\n # if MYSERVICE_ADDRESS is defined, use this\n if '{}_ADDRESS'.format(name) in os.environ:\n return os.environ.get('{}_ADDRESS'.format(name))\n # otherwise use the host name and resolve it\n return system.resolve_address(self.config['HOST_{}'.format(name)])\n\n def resolve_hosts(self):\n self.config[\"IMAP_ADDRESS\"] = self.get_host_address(\"IMAP\")\n self.config[\"POP3_ADDRESS\"] = self.get_host_address(\"POP3\")\n self.config[\"AUTHSMTP_ADDRESS\"] = self.get_host_address(\"AUTHSMTP\")\n self.config[\"SMTP_ADDRESS\"] = self.get_host_address(\"SMTP\")\n self.config[\"REDIS_ADDRESS\"] = self.get_host_address(\"REDIS\")\n if self.config[\"WEBMAIL\"] != \"none\":\n self.config[\"WEBMAIL_ADDRESS\"] = self.get_host_address(\"WEBMAIL\")\n\n def __get_env(self, key, value):\n key_file = key + \"_FILE\"\n if key_file in os.environ:\n with open(os.environ.get(key_file)) as file:\n value_from_file = file.read()\n return value_from_file.strip()\n else:\n return os.environ.get(key, value)\n\n def __coerce_value(self, value):\n if isinstance(value, str) and value.lower() in ('true','yes'):\n return True\n elif isinstance(value, str) and value.lower() in ('false', 'no'):\n return False\n return value\n\n def init_app(self, app):\n self.config.update(app.config)\n # get environment variables\n self.config.update({\n key: self.__coerce_value(self.__get_env(key, value))\n for key, value in DEFAULT_CONFIG.items()\n })\n self.resolve_hosts()\n\n # automatically set the sqlalchemy string\n if self.config['DB_FLAVOR']:\n template = self.DB_TEMPLATES[self.config['DB_FLAVOR']]\n self.config['SQLALCHEMY_DATABASE_URI'] = template.format(**self.config)\n\n self.config['RATELIMIT_STORAGE_URL'] = 'redis://{0}/2'.format(self.config['REDIS_ADDRESS'])\n self.config['QUOTA_STORAGE_URL'] = 'redis://{0}/1'.format(self.config['REDIS_ADDRESS'])\n self.config['SESSION_STORAGE_URL'] = 'redis://{0}/3'.format(self.config['REDIS_ADDRESS'])\n self.config['SESSION_COOKIE_SAMESITE'] = 'Strict'\n self.config['SESSION_COOKIE_HTTPONLY'] = True\n self.config['PERMANENT_SESSION_LIFETIME'] = timedelta(hours=int(self.config['SESSION_LIFETIME']))\n hostnames = [host.strip() for host in self.config['HOSTNAMES'].split(',')]\n self.config['AUTH_RATELIMIT_EXEMPTION'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['AUTH_RATELIMIT_EXEMPTION'].split(',')) if cidr)\n self.config['HOSTNAMES'] = ','.join(hostnames)\n self.config['HOSTNAME'] = hostnames[0]\n # update the app config itself\n app.config = self\n\n def setdefault(self, key, value):\n if key not in self.config:\n self.config[key] = value\n return self.config[key]\n\n def get(self, *args):\n return self.config.get(*args)\n\n def keys(self):\n return self.config.keys()\n\n def __getitem__(self, key):\n return self.config.get(key)\n\n def __setitem__(self, key, value):\n self.config[key] = value\n\n def __contains__(self, key):\n return key in self.config\n", "path": "core/admin/mailu/configuration.py"}]} | 2,477 | 119 |
gh_patches_debug_21153 | rasdani/github-patches | git_diff | cloud-custodian__cloud-custodian-5100 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
aws.cloudtrail - tag filter
Objective: I want to write a policy to identify all cloud trails with missing tags. Currently it does not support this.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `c7n/resources/cloudtrail.py`
Content:
```
1 # Copyright 2017-2019 Capital One Services, LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from __future__ import absolute_import, division, print_function, unicode_literals
15
16 import logging
17
18 from c7n.actions import Action, BaseAction
19 from c7n.exceptions import PolicyValidationError
20 from c7n.filters import ValueFilter, Filter
21 from c7n.manager import resources
22 from c7n.query import QueryResourceManager, TypeInfo
23 from c7n.utils import local_session, type_schema
24
25 from .aws import shape_validate, Arn
26
27 log = logging.getLogger('c7n.resources.cloudtrail')
28
29
30 @resources.register('cloudtrail')
31 class CloudTrail(QueryResourceManager):
32
33 class resource_type(TypeInfo):
34 service = 'cloudtrail'
35 enum_spec = ('describe_trails', 'trailList', None)
36 filter_name = 'trailNameList'
37 filter_type = 'list'
38 arn = id = 'TrailARN'
39 name = 'Name'
40 config_type = "AWS::CloudTrail::Trail"
41
42
43 @CloudTrail.filter_registry.register('is-shadow')
44 class IsShadow(Filter):
45 """Identify shadow trails (secondary copies), shadow trails
46 can't be modified directly, the origin trail needs to be modified.
47
48 Shadow trails are created for multi-region trails as well for
49 organizational trails.
50 """
51 schema = type_schema('is-shadow', state={'type': 'boolean'})
52 permissions = ('cloudtrail:DescribeTrails',)
53 embedded = False
54
55 def process(self, resources, event=None):
56 rcount = len(resources)
57 trails = [t for t in resources if (self.is_shadow(t) == self.data.get('state', True))]
58 if len(trails) != rcount and self.embedded:
59 self.log.info("implicitly filtering shadow trails %d -> %d",
60 rcount, len(trails))
61 return trails
62
63 def is_shadow(self, t):
64 if t.get('IsOrganizationTrail') and self.manager.config.account_id not in t['TrailARN']:
65 return True
66 if t.get('IsMultiRegionTrail') and t['HomeRegion'] != self.manager.config.region:
67 return True
68 return False
69
70
71 @CloudTrail.filter_registry.register('status')
72 class Status(ValueFilter):
73 """Filter a cloudtrail by its status.
74
75 :Example:
76
77 .. code-block:: yaml
78
79 policies:
80 - name: cloudtrail-check-status
81 resource: aws.cloudtrail
82 filters:
83 - type: status
84 key: IsLogging
85 value: False
86 """
87
88 schema = type_schema('status', rinherit=ValueFilter.schema)
89 schema_alias = False
90 permissions = ('cloudtrail:GetTrailStatus',)
91 annotation_key = 'c7n:TrailStatus'
92
93 def process(self, resources, event=None):
94 for r in resources:
95 region = self.manager.config.region
96 trail_arn = Arn.parse(r['TrailARN'])
97
98 if (r.get('IsOrganizationTrail') and
99 self.manager.config.account_id != trail_arn.account_id):
100 continue
101 if r.get('HomeRegion') and r['HomeRegion'] != region:
102 region = trail_arn.region
103 if self.annotation_key in r:
104 continue
105 client = local_session(self.manager.session_factory).client(
106 'cloudtrail', region_name=region)
107 status = client.get_trail_status(Name=r['Name'])
108 status.pop('ResponseMetadata')
109 r[self.annotation_key] = status
110
111 return super(Status, self).process(resources)
112
113 def __call__(self, r):
114 return self.match(r['c7n:TrailStatus'])
115
116
117 @CloudTrail.action_registry.register('update-trail')
118 class UpdateTrail(Action):
119 """Update trail attributes.
120
121 :Example:
122
123 .. code-block:: yaml
124
125 policies:
126 - name: cloudtrail-set-log
127 resource: aws.cloudtrail
128 filters:
129 - or:
130 - KmsKeyId: empty
131 - LogFileValidationEnabled: false
132 actions:
133 - type: update-trail
134 attributes:
135 KmsKeyId: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef
136 EnableLogFileValidation: true
137 """
138 schema = type_schema(
139 'update-trail',
140 attributes={'type': 'object'},
141 required=('attributes',))
142 shape = 'UpdateTrailRequest'
143 permissions = ('cloudtrail:UpdateTrail',)
144
145 def validate(self):
146 attrs = dict(self.data['attributes'])
147 if 'Name' in attrs:
148 raise PolicyValidationError(
149 "Can't include Name in update-trail action")
150 attrs['Name'] = 'PolicyValidation'
151 return shape_validate(
152 attrs,
153 self.shape,
154 self.manager.resource_type.service)
155
156 def process(self, resources):
157 client = local_session(self.manager.session_factory).client('cloudtrail')
158 shadow_check = IsShadow({'state': False}, self.manager)
159 shadow_check.embedded = True
160 resources = shadow_check.process(resources)
161
162 for r in resources:
163 client.update_trail(
164 Name=r['Name'],
165 **self.data['attributes'])
166
167
168 @CloudTrail.action_registry.register('set-logging')
169 class SetLogging(Action):
170 """Set the logging state of a trail
171
172 :Example:
173
174 .. code-block:: yaml
175
176 policies:
177 - name: cloudtrail-set-active
178 resource: aws.cloudtrail
179 filters:
180 - type: status
181 key: IsLogging
182 value: False
183 actions:
184 - type: set-logging
185 enabled: True
186 """
187 schema = type_schema(
188 'set-logging', enabled={'type': 'boolean'})
189
190 def get_permissions(self):
191 enable = self.data.get('enabled', True)
192 if enable is True:
193 return ('cloudtrail:StartLogging',)
194 else:
195 return ('cloudtrail:StopLogging',)
196
197 def process(self, resources):
198 client = local_session(self.manager.session_factory).client('cloudtrail')
199 shadow_check = IsShadow({'state': False}, self.manager)
200 shadow_check.embedded = True
201 resources = shadow_check.process(resources)
202 enable = self.data.get('enabled', True)
203
204 for r in resources:
205 if enable:
206 client.start_logging(Name=r['Name'])
207 else:
208 client.stop_logging(Name=r['Name'])
209
210
211 @CloudTrail.action_registry.register('delete')
212 class DeleteTrail(BaseAction):
213 """ Delete a cloud trail
214
215 :example:
216
217 .. code-block:: yaml
218
219 policies:
220 - name: delete-cloudtrail
221 resource: aws.cloudtrail
222 filters:
223 - type: value
224 key: Name
225 value: delete-me
226 op: eq
227 actions:
228 - type: delete
229 """
230
231 schema = type_schema('delete')
232 permissions = ('cloudtrail:DeleteTrail',)
233
234 def process(self, resources):
235 client = local_session(self.manager.session_factory).client('cloudtrail')
236 shadow_check = IsShadow({'state': False}, self.manager)
237 shadow_check.embedded = True
238 resources = shadow_check.process(resources)
239 for r in resources:
240 try:
241 client.delete_trail(Name=r['Name'])
242 except client.exceptions.TrailNotFoundException:
243 continue
244
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/c7n/resources/cloudtrail.py b/c7n/resources/cloudtrail.py
--- a/c7n/resources/cloudtrail.py
+++ b/c7n/resources/cloudtrail.py
@@ -19,7 +19,8 @@
from c7n.exceptions import PolicyValidationError
from c7n.filters import ValueFilter, Filter
from c7n.manager import resources
-from c7n.query import QueryResourceManager, TypeInfo
+from c7n.tags import universal_augment
+from c7n.query import DescribeSource, QueryResourceManager, TypeInfo
from c7n.utils import local_session, type_schema
from .aws import shape_validate, Arn
@@ -38,6 +39,18 @@
arn = id = 'TrailARN'
name = 'Name'
config_type = "AWS::CloudTrail::Trail"
+ universal_taggable = object()
+
+ def get_source(self, source_type):
+ if source_type == 'describe':
+ return DescribeTrail(self)
+ return super(CloudTrail, self).get_source(source_type)
+
+
+class DescribeTrail(DescribeSource):
+
+ def augment(self, resources):
+ return universal_augment(self.manager, resources)
@CloudTrail.filter_registry.register('is-shadow')
| {"golden_diff": "diff --git a/c7n/resources/cloudtrail.py b/c7n/resources/cloudtrail.py\n--- a/c7n/resources/cloudtrail.py\n+++ b/c7n/resources/cloudtrail.py\n@@ -19,7 +19,8 @@\n from c7n.exceptions import PolicyValidationError\n from c7n.filters import ValueFilter, Filter\n from c7n.manager import resources\n-from c7n.query import QueryResourceManager, TypeInfo\n+from c7n.tags import universal_augment\n+from c7n.query import DescribeSource, QueryResourceManager, TypeInfo\n from c7n.utils import local_session, type_schema\n \n from .aws import shape_validate, Arn\n@@ -38,6 +39,18 @@\n arn = id = 'TrailARN'\n name = 'Name'\n config_type = \"AWS::CloudTrail::Trail\"\n+ universal_taggable = object()\n+\n+ def get_source(self, source_type):\n+ if source_type == 'describe':\n+ return DescribeTrail(self)\n+ return super(CloudTrail, self).get_source(source_type)\n+\n+\n+class DescribeTrail(DescribeSource):\n+\n+ def augment(self, resources):\n+ return universal_augment(self.manager, resources)\n \n \n @CloudTrail.filter_registry.register('is-shadow')\n", "issue": "aws.cloudtrail - tag filter\nObjective: I want to write a policy to identify all cloud trails with missing tags. Currently it does not support this.\n", "before_files": [{"content": "# Copyright 2017-2019 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\n\nfrom c7n.actions import Action, BaseAction\nfrom c7n.exceptions import PolicyValidationError\nfrom c7n.filters import ValueFilter, Filter\nfrom c7n.manager import resources\nfrom c7n.query import QueryResourceManager, TypeInfo\nfrom c7n.utils import local_session, type_schema\n\nfrom .aws import shape_validate, Arn\n\nlog = logging.getLogger('c7n.resources.cloudtrail')\n\n\[email protected]('cloudtrail')\nclass CloudTrail(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'cloudtrail'\n enum_spec = ('describe_trails', 'trailList', None)\n filter_name = 'trailNameList'\n filter_type = 'list'\n arn = id = 'TrailARN'\n name = 'Name'\n config_type = \"AWS::CloudTrail::Trail\"\n\n\[email protected]_registry.register('is-shadow')\nclass IsShadow(Filter):\n \"\"\"Identify shadow trails (secondary copies), shadow trails\n can't be modified directly, the origin trail needs to be modified.\n\n Shadow trails are created for multi-region trails as well for\n organizational trails.\n \"\"\"\n schema = type_schema('is-shadow', state={'type': 'boolean'})\n permissions = ('cloudtrail:DescribeTrails',)\n embedded = False\n\n def process(self, resources, event=None):\n rcount = len(resources)\n trails = [t for t in resources if (self.is_shadow(t) == self.data.get('state', True))]\n if len(trails) != rcount and self.embedded:\n self.log.info(\"implicitly filtering shadow trails %d -> %d\",\n rcount, len(trails))\n return trails\n\n def is_shadow(self, t):\n if t.get('IsOrganizationTrail') and self.manager.config.account_id not in t['TrailARN']:\n return True\n if t.get('IsMultiRegionTrail') and t['HomeRegion'] != self.manager.config.region:\n return True\n return False\n\n\[email protected]_registry.register('status')\nclass Status(ValueFilter):\n \"\"\"Filter a cloudtrail by its status.\n\n :Example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudtrail-check-status\n resource: aws.cloudtrail\n filters:\n - type: status\n key: IsLogging\n value: False\n \"\"\"\n\n schema = type_schema('status', rinherit=ValueFilter.schema)\n schema_alias = False\n permissions = ('cloudtrail:GetTrailStatus',)\n annotation_key = 'c7n:TrailStatus'\n\n def process(self, resources, event=None):\n for r in resources:\n region = self.manager.config.region\n trail_arn = Arn.parse(r['TrailARN'])\n\n if (r.get('IsOrganizationTrail') and\n self.manager.config.account_id != trail_arn.account_id):\n continue\n if r.get('HomeRegion') and r['HomeRegion'] != region:\n region = trail_arn.region\n if self.annotation_key in r:\n continue\n client = local_session(self.manager.session_factory).client(\n 'cloudtrail', region_name=region)\n status = client.get_trail_status(Name=r['Name'])\n status.pop('ResponseMetadata')\n r[self.annotation_key] = status\n\n return super(Status, self).process(resources)\n\n def __call__(self, r):\n return self.match(r['c7n:TrailStatus'])\n\n\[email protected]_registry.register('update-trail')\nclass UpdateTrail(Action):\n \"\"\"Update trail attributes.\n\n :Example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudtrail-set-log\n resource: aws.cloudtrail\n filters:\n - or:\n - KmsKeyId: empty\n - LogFileValidationEnabled: false\n actions:\n - type: update-trail\n attributes:\n KmsKeyId: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef\n EnableLogFileValidation: true\n \"\"\"\n schema = type_schema(\n 'update-trail',\n attributes={'type': 'object'},\n required=('attributes',))\n shape = 'UpdateTrailRequest'\n permissions = ('cloudtrail:UpdateTrail',)\n\n def validate(self):\n attrs = dict(self.data['attributes'])\n if 'Name' in attrs:\n raise PolicyValidationError(\n \"Can't include Name in update-trail action\")\n attrs['Name'] = 'PolicyValidation'\n return shape_validate(\n attrs,\n self.shape,\n self.manager.resource_type.service)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('cloudtrail')\n shadow_check = IsShadow({'state': False}, self.manager)\n shadow_check.embedded = True\n resources = shadow_check.process(resources)\n\n for r in resources:\n client.update_trail(\n Name=r['Name'],\n **self.data['attributes'])\n\n\[email protected]_registry.register('set-logging')\nclass SetLogging(Action):\n \"\"\"Set the logging state of a trail\n\n :Example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudtrail-set-active\n resource: aws.cloudtrail\n filters:\n - type: status\n key: IsLogging\n value: False\n actions:\n - type: set-logging\n enabled: True\n \"\"\"\n schema = type_schema(\n 'set-logging', enabled={'type': 'boolean'})\n\n def get_permissions(self):\n enable = self.data.get('enabled', True)\n if enable is True:\n return ('cloudtrail:StartLogging',)\n else:\n return ('cloudtrail:StopLogging',)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('cloudtrail')\n shadow_check = IsShadow({'state': False}, self.manager)\n shadow_check.embedded = True\n resources = shadow_check.process(resources)\n enable = self.data.get('enabled', True)\n\n for r in resources:\n if enable:\n client.start_logging(Name=r['Name'])\n else:\n client.stop_logging(Name=r['Name'])\n\n\[email protected]_registry.register('delete')\nclass DeleteTrail(BaseAction):\n \"\"\" Delete a cloud trail\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: delete-cloudtrail\n resource: aws.cloudtrail\n filters:\n - type: value\n key: Name\n value: delete-me\n op: eq\n actions:\n - type: delete\n \"\"\"\n\n schema = type_schema('delete')\n permissions = ('cloudtrail:DeleteTrail',)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('cloudtrail')\n shadow_check = IsShadow({'state': False}, self.manager)\n shadow_check.embedded = True\n resources = shadow_check.process(resources)\n for r in resources:\n try:\n client.delete_trail(Name=r['Name'])\n except client.exceptions.TrailNotFoundException:\n continue\n", "path": "c7n/resources/cloudtrail.py"}], "after_files": [{"content": "# Copyright 2017-2019 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\n\nfrom c7n.actions import Action, BaseAction\nfrom c7n.exceptions import PolicyValidationError\nfrom c7n.filters import ValueFilter, Filter\nfrom c7n.manager import resources\nfrom c7n.tags import universal_augment\nfrom c7n.query import DescribeSource, QueryResourceManager, TypeInfo\nfrom c7n.utils import local_session, type_schema\n\nfrom .aws import shape_validate, Arn\n\nlog = logging.getLogger('c7n.resources.cloudtrail')\n\n\[email protected]('cloudtrail')\nclass CloudTrail(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'cloudtrail'\n enum_spec = ('describe_trails', 'trailList', None)\n filter_name = 'trailNameList'\n filter_type = 'list'\n arn = id = 'TrailARN'\n name = 'Name'\n config_type = \"AWS::CloudTrail::Trail\"\n universal_taggable = object()\n\n def get_source(self, source_type):\n if source_type == 'describe':\n return DescribeTrail(self)\n return super(CloudTrail, self).get_source(source_type)\n\n\nclass DescribeTrail(DescribeSource):\n\n def augment(self, resources):\n return universal_augment(self.manager, resources)\n\n\[email protected]_registry.register('is-shadow')\nclass IsShadow(Filter):\n \"\"\"Identify shadow trails (secondary copies), shadow trails\n can't be modified directly, the origin trail needs to be modified.\n\n Shadow trails are created for multi-region trails as well for\n organizational trails.\n \"\"\"\n schema = type_schema('is-shadow', state={'type': 'boolean'})\n permissions = ('cloudtrail:DescribeTrails',)\n embedded = False\n\n def process(self, resources, event=None):\n rcount = len(resources)\n trails = [t for t in resources if (self.is_shadow(t) == self.data.get('state', True))]\n if len(trails) != rcount and self.embedded:\n self.log.info(\"implicitly filtering shadow trails %d -> %d\",\n rcount, len(trails))\n return trails\n\n def is_shadow(self, t):\n if t.get('IsOrganizationTrail') and self.manager.config.account_id not in t['TrailARN']:\n return True\n if t.get('IsMultiRegionTrail') and t['HomeRegion'] != self.manager.config.region:\n return True\n return False\n\n\[email protected]_registry.register('status')\nclass Status(ValueFilter):\n \"\"\"Filter a cloudtrail by its status.\n\n :Example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudtrail-check-status\n resource: aws.cloudtrail\n filters:\n - type: status\n key: IsLogging\n value: False\n \"\"\"\n\n schema = type_schema('status', rinherit=ValueFilter.schema)\n schema_alias = False\n permissions = ('cloudtrail:GetTrailStatus',)\n annotation_key = 'c7n:TrailStatus'\n\n def process(self, resources, event=None):\n for r in resources:\n region = self.manager.config.region\n trail_arn = Arn.parse(r['TrailARN'])\n\n if (r.get('IsOrganizationTrail') and\n self.manager.config.account_id != trail_arn.account_id):\n continue\n if r.get('HomeRegion') and r['HomeRegion'] != region:\n region = trail_arn.region\n if self.annotation_key in r:\n continue\n client = local_session(self.manager.session_factory).client(\n 'cloudtrail', region_name=region)\n status = client.get_trail_status(Name=r['Name'])\n status.pop('ResponseMetadata')\n r[self.annotation_key] = status\n\n return super(Status, self).process(resources)\n\n def __call__(self, r):\n return self.match(r['c7n:TrailStatus'])\n\n\[email protected]_registry.register('update-trail')\nclass UpdateTrail(Action):\n \"\"\"Update trail attributes.\n\n :Example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudtrail-set-log\n resource: aws.cloudtrail\n filters:\n - or:\n - KmsKeyId: empty\n - LogFileValidationEnabled: false\n actions:\n - type: update-trail\n attributes:\n KmsKeyId: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef\n EnableLogFileValidation: true\n \"\"\"\n schema = type_schema(\n 'update-trail',\n attributes={'type': 'object'},\n required=('attributes',))\n shape = 'UpdateTrailRequest'\n permissions = ('cloudtrail:UpdateTrail',)\n\n def validate(self):\n attrs = dict(self.data['attributes'])\n if 'Name' in attrs:\n raise PolicyValidationError(\n \"Can't include Name in update-trail action\")\n attrs['Name'] = 'PolicyValidation'\n return shape_validate(\n attrs,\n self.shape,\n self.manager.resource_type.service)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('cloudtrail')\n shadow_check = IsShadow({'state': False}, self.manager)\n shadow_check.embedded = True\n resources = shadow_check.process(resources)\n\n for r in resources:\n client.update_trail(\n Name=r['Name'],\n **self.data['attributes'])\n\n\[email protected]_registry.register('set-logging')\nclass SetLogging(Action):\n \"\"\"Set the logging state of a trail\n\n :Example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudtrail-set-active\n resource: aws.cloudtrail\n filters:\n - type: status\n key: IsLogging\n value: False\n actions:\n - type: set-logging\n enabled: True\n \"\"\"\n schema = type_schema(\n 'set-logging', enabled={'type': 'boolean'})\n\n def get_permissions(self):\n enable = self.data.get('enabled', True)\n if enable is True:\n return ('cloudtrail:StartLogging',)\n else:\n return ('cloudtrail:StopLogging',)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('cloudtrail')\n shadow_check = IsShadow({'state': False}, self.manager)\n shadow_check.embedded = True\n resources = shadow_check.process(resources)\n enable = self.data.get('enabled', True)\n\n for r in resources:\n if enable:\n client.start_logging(Name=r['Name'])\n else:\n client.stop_logging(Name=r['Name'])\n\n\[email protected]_registry.register('delete')\nclass DeleteTrail(BaseAction):\n \"\"\" Delete a cloud trail\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: delete-cloudtrail\n resource: aws.cloudtrail\n filters:\n - type: value\n key: Name\n value: delete-me\n op: eq\n actions:\n - type: delete\n \"\"\"\n\n schema = type_schema('delete')\n permissions = ('cloudtrail:DeleteTrail',)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('cloudtrail')\n shadow_check = IsShadow({'state': False}, self.manager)\n shadow_check.embedded = True\n resources = shadow_check.process(resources)\n for r in resources:\n try:\n client.delete_trail(Name=r['Name'])\n except client.exceptions.TrailNotFoundException:\n continue\n", "path": "c7n/resources/cloudtrail.py"}]} | 2,603 | 270 |
gh_patches_debug_8504 | rasdani/github-patches | git_diff | Gallopsled__pwntools-218 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SyntaxWarning in pwnlib.util.web
This line generates a `SyntaxWarning`: https://github.com/Gallopsled/pwntools/blob/master/pwnlib/util/web.py#L27
Either we should use qualified names or only import the names that we need. My votes goes toward the former.
SyntaxWarning in pwnlib.util.web
This line generates a `SyntaxWarning`: https://github.com/Gallopsled/pwntools/blob/master/pwnlib/util/web.py#L27
Either we should use qualified names or only import the names that we need. My votes goes toward the former.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwnlib/util/web.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import os, tempfile, logging
3 from .misc import size
4 log = logging.getLogger(__name__)
5
6 def wget(url, save=None, timeout=5, **kwargs):
7 """wget(url, save=None, timeout=5) -> str
8
9 Downloads a file via HTTP/HTTPS.
10
11 Args:
12 url (str): URL to download
13 save (str or bool): Name to save as. Any truthy value
14 will auto-generate a name based on the URL.
15 timeout (int): Timeout, in seconds
16
17 Example:
18
19 >>> url = 'http://httpbin.org/robots.txt'
20 >>> with context.local(log_level='ERROR'): result = wget(url)
21 >>> result
22 'User-agent: *\nDisallow: /deny\n'
23 >>> with context.local(log_level='ERROR'): wget(url, True)
24 >>> result == file('robots.txt').read()
25 True
26 """
27 from requests import *
28
29 with log.progress("Downloading '%s'" % url) as w:
30 w.status("Making request...")
31
32 response = get(url, stream=True, **kwargs)
33
34 if not response.ok:
35 w.failure("Got code %s" % response.status_code)
36 return
37
38 total_size = int(response.headers.get('content-length',0))
39
40 w.status('0 / %s' % size(total_size))
41
42 # Find out the next largest size we can represent as
43 chunk_size = 1
44 while chunk_size < (total_size/10):
45 chunk_size *= 1000
46
47 # Count chunks as they're received
48 total_data = ''
49
50 # Loop until we have all of the data
51 for chunk in response.iter_content(chunk_size = 2**10):
52 total_data += chunk
53 if total_size:
54 w.status('%s / %s' % (size(total_data), size(total_size)))
55 else:
56 w.status('%s' % size(total_data))
57
58 # Save to the target file if provided
59 if save:
60 if not isinstance(save, (str, unicode)):
61 save = os.path.basename(url)
62 save = save or tempfile.NamedTemporaryFile(dir='.', delete=False).name
63 with file(save,'wb+') as f:
64 f.write(total_data)
65 w.success('Saved %r (%s)' % (f.name, size(total_data)))
66 else:
67 w.success('%s' % size(total_data))
68
69 return total_data
70
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pwnlib/util/web.py b/pwnlib/util/web.py
--- a/pwnlib/util/web.py
+++ b/pwnlib/util/web.py
@@ -24,12 +24,12 @@
>>> result == file('robots.txt').read()
True
"""
- from requests import *
+ import requests
with log.progress("Downloading '%s'" % url) as w:
w.status("Making request...")
- response = get(url, stream=True, **kwargs)
+ response = requests.get(url, stream=True, **kwargs)
if not response.ok:
w.failure("Got code %s" % response.status_code)
| {"golden_diff": "diff --git a/pwnlib/util/web.py b/pwnlib/util/web.py\n--- a/pwnlib/util/web.py\n+++ b/pwnlib/util/web.py\n@@ -24,12 +24,12 @@\n >>> result == file('robots.txt').read()\n True\n \"\"\"\n- from requests import *\n+ import requests\n \n with log.progress(\"Downloading '%s'\" % url) as w:\n w.status(\"Making request...\")\n \n- response = get(url, stream=True, **kwargs)\n+ response = requests.get(url, stream=True, **kwargs)\n \n if not response.ok:\n w.failure(\"Got code %s\" % response.status_code)\n", "issue": "SyntaxWarning in pwnlib.util.web\nThis line generates a `SyntaxWarning`: https://github.com/Gallopsled/pwntools/blob/master/pwnlib/util/web.py#L27\n\nEither we should use qualified names or only import the names that we need. My votes goes toward the former.\n\nSyntaxWarning in pwnlib.util.web\nThis line generates a `SyntaxWarning`: https://github.com/Gallopsled/pwntools/blob/master/pwnlib/util/web.py#L27\n\nEither we should use qualified names or only import the names that we need. My votes goes toward the former.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport os, tempfile, logging\nfrom .misc import size\nlog = logging.getLogger(__name__)\n\ndef wget(url, save=None, timeout=5, **kwargs):\n \"\"\"wget(url, save=None, timeout=5) -> str\n\n Downloads a file via HTTP/HTTPS.\n\n Args:\n url (str): URL to download\n save (str or bool): Name to save as. Any truthy value\n will auto-generate a name based on the URL.\n timeout (int): Timeout, in seconds\n\n Example:\n\n >>> url = 'http://httpbin.org/robots.txt'\n >>> with context.local(log_level='ERROR'): result = wget(url)\n >>> result\n 'User-agent: *\\nDisallow: /deny\\n'\n >>> with context.local(log_level='ERROR'): wget(url, True)\n >>> result == file('robots.txt').read()\n True\n \"\"\"\n from requests import *\n\n with log.progress(\"Downloading '%s'\" % url) as w:\n w.status(\"Making request...\")\n\n response = get(url, stream=True, **kwargs)\n\n if not response.ok:\n w.failure(\"Got code %s\" % response.status_code)\n return\n\n total_size = int(response.headers.get('content-length',0))\n\n w.status('0 / %s' % size(total_size))\n\n # Find out the next largest size we can represent as\n chunk_size = 1\n while chunk_size < (total_size/10):\n chunk_size *= 1000\n\n # Count chunks as they're received\n total_data = ''\n\n # Loop until we have all of the data\n for chunk in response.iter_content(chunk_size = 2**10):\n total_data += chunk\n if total_size:\n w.status('%s / %s' % (size(total_data), size(total_size)))\n else:\n w.status('%s' % size(total_data))\n\n # Save to the target file if provided\n if save:\n if not isinstance(save, (str, unicode)):\n save = os.path.basename(url)\n save = save or tempfile.NamedTemporaryFile(dir='.', delete=False).name\n with file(save,'wb+') as f:\n f.write(total_data)\n w.success('Saved %r (%s)' % (f.name, size(total_data)))\n else:\n w.success('%s' % size(total_data))\n\n return total_data\n\n", "path": "pwnlib/util/web.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport os, tempfile, logging\nfrom .misc import size\nlog = logging.getLogger(__name__)\n\ndef wget(url, save=None, timeout=5, **kwargs):\n \"\"\"wget(url, save=None, timeout=5) -> str\n\n Downloads a file via HTTP/HTTPS.\n\n Args:\n url (str): URL to download\n save (str or bool): Name to save as. Any truthy value\n will auto-generate a name based on the URL.\n timeout (int): Timeout, in seconds\n\n Example:\n\n >>> url = 'http://httpbin.org/robots.txt'\n >>> with context.local(log_level='ERROR'): result = wget(url)\n >>> result\n 'User-agent: *\\nDisallow: /deny\\n'\n >>> with context.local(log_level='ERROR'): wget(url, True)\n >>> result == file('robots.txt').read()\n True\n \"\"\"\n import requests\n\n with log.progress(\"Downloading '%s'\" % url) as w:\n w.status(\"Making request...\")\n\n response = requests.get(url, stream=True, **kwargs)\n\n if not response.ok:\n w.failure(\"Got code %s\" % response.status_code)\n return\n\n total_size = int(response.headers.get('content-length',0))\n\n w.status('0 / %s' % size(total_size))\n\n # Find out the next largest size we can represent as\n chunk_size = 1\n while chunk_size < (total_size/10):\n chunk_size *= 1000\n\n # Count chunks as they're received\n total_data = ''\n\n # Loop until we have all of the data\n for chunk in response.iter_content(chunk_size = 2**10):\n total_data += chunk\n if total_size:\n w.status('%s / %s' % (size(total_data), size(total_size)))\n else:\n w.status('%s' % size(total_data))\n\n # Save to the target file if provided\n if save:\n if not isinstance(save, (str, unicode)):\n save = os.path.basename(url)\n save = save or tempfile.NamedTemporaryFile(dir='.', delete=False).name\n with file(save,'wb+') as f:\n f.write(total_data)\n w.success('Saved %r (%s)' % (f.name, size(total_data)))\n else:\n w.success('%s' % size(total_data))\n\n return total_data\n\n", "path": "pwnlib/util/web.py"}]} | 1,061 | 148 |
gh_patches_debug_14960 | rasdani/github-patches | git_diff | flairNLP__flair-422 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Don't pin package dependencies in setup.py
To be removed, once it is done: Please add the appropriate label to this ticket, e.g. feature or enhancement.
**Is your feature/enhancement request related to a problem? Please describe.**
It is not considered good practice to pin package dependencies in setup.py (see additional context).
For instance, I'm forced to downgrade certain packages by installing flair.
**Describe the solution you'd like**
Just list the abstract requirements in setup.py with less restrictive version bounds.
**Additional context**
See https://packaging.python.org/discussions/install-requires-vs-requirements/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup, find_packages
2
3 setup(
4 name='flair',
5 version='0.4.0',
6 description='A very simple framework for state-of-the-art NLP',
7 long_description=open("README.md", encoding='utf-8').read(),
8 long_description_content_type="text/markdown",
9 author='Alan Akbik',
10 author_email='[email protected]',
11 url='https://github.com/zalandoresearch/flair',
12 packages=find_packages(exclude='test'), # same as name
13 license='MIT',
14 install_requires=[
15 'torch==1.0.0',
16 'gensim==3.4.0',
17 'typing==3.6.4',
18 'tqdm==4.26.0',
19 'segtok==1.5.7',
20 'matplotlib==3.0.0',
21 'mpld3==0.3',
22 'sklearn',
23 'sqlitedict==1.6.0',
24 'deprecated==1.2.4',
25 'hyperopt==0.1.1',
26 'pytorch-pretrained-bert==0.3.0'
27 ],
28 include_package_data=True,
29 python_requires='>=3.6',
30 )
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -12,18 +12,17 @@
packages=find_packages(exclude='test'), # same as name
license='MIT',
install_requires=[
- 'torch==1.0.0',
- 'gensim==3.4.0',
- 'typing==3.6.4',
- 'tqdm==4.26.0',
- 'segtok==1.5.7',
- 'matplotlib==3.0.0',
- 'mpld3==0.3',
+ 'torch>=1.0.0',
+ 'gensim>=3.4.0',
+ 'tqdm>=4.26.0',
+ 'segtok>=1.5.7',
+ 'matplotlib>=3.0.0',
+ 'mpld3>=0.3',
'sklearn',
- 'sqlitedict==1.6.0',
- 'deprecated==1.2.4',
- 'hyperopt==0.1.1',
- 'pytorch-pretrained-bert==0.3.0'
+ 'sqlitedict>=1.6.0',
+ 'deprecated>=1.2.4',
+ 'hyperopt>=0.1.1',
+ 'pytorch-pretrained-bert>=0.3.0'
],
include_package_data=True,
python_requires='>=3.6',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -12,18 +12,17 @@\n packages=find_packages(exclude='test'), # same as name\n license='MIT',\n install_requires=[\n- 'torch==1.0.0',\n- 'gensim==3.4.0',\n- 'typing==3.6.4',\n- 'tqdm==4.26.0',\n- 'segtok==1.5.7',\n- 'matplotlib==3.0.0',\n- 'mpld3==0.3',\n+ 'torch>=1.0.0',\n+ 'gensim>=3.4.0',\n+ 'tqdm>=4.26.0',\n+ 'segtok>=1.5.7',\n+ 'matplotlib>=3.0.0',\n+ 'mpld3>=0.3',\n 'sklearn',\n- 'sqlitedict==1.6.0',\n- 'deprecated==1.2.4',\n- 'hyperopt==0.1.1',\n- 'pytorch-pretrained-bert==0.3.0'\n+ 'sqlitedict>=1.6.0',\n+ 'deprecated>=1.2.4',\n+ 'hyperopt>=0.1.1',\n+ 'pytorch-pretrained-bert>=0.3.0'\n ],\n include_package_data=True,\n python_requires='>=3.6',\n", "issue": "Don't pin package dependencies in setup.py\nTo be removed, once it is done: Please add the appropriate label to this ticket, e.g. feature or enhancement.\r\n\r\n**Is your feature/enhancement request related to a problem? Please describe.**\r\n\r\nIt is not considered good practice to pin package dependencies in setup.py (see additional context).\r\n\r\nFor instance, I'm forced to downgrade certain packages by installing flair.\r\n\r\n**Describe the solution you'd like**\r\n\r\nJust list the abstract requirements in setup.py with less restrictive version bounds.\r\n\r\n**Additional context**\r\n\r\nSee https://packaging.python.org/discussions/install-requires-vs-requirements/\n", "before_files": [{"content": "from setuptools import setup, find_packages\n\nsetup(\n name='flair',\n version='0.4.0',\n description='A very simple framework for state-of-the-art NLP',\n long_description=open(\"README.md\", encoding='utf-8').read(),\n long_description_content_type=\"text/markdown\",\n author='Alan Akbik',\n author_email='[email protected]',\n url='https://github.com/zalandoresearch/flair',\n packages=find_packages(exclude='test'), # same as name\n license='MIT',\n install_requires=[\n 'torch==1.0.0',\n 'gensim==3.4.0',\n 'typing==3.6.4',\n 'tqdm==4.26.0',\n 'segtok==1.5.7',\n 'matplotlib==3.0.0',\n 'mpld3==0.3',\n 'sklearn',\n 'sqlitedict==1.6.0',\n 'deprecated==1.2.4',\n 'hyperopt==0.1.1',\n 'pytorch-pretrained-bert==0.3.0'\n ],\n include_package_data=True,\n python_requires='>=3.6',\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\n\nsetup(\n name='flair',\n version='0.4.0',\n description='A very simple framework for state-of-the-art NLP',\n long_description=open(\"README.md\", encoding='utf-8').read(),\n long_description_content_type=\"text/markdown\",\n author='Alan Akbik',\n author_email='[email protected]',\n url='https://github.com/zalandoresearch/flair',\n packages=find_packages(exclude='test'), # same as name\n license='MIT',\n install_requires=[\n 'torch>=1.0.0',\n 'gensim>=3.4.0',\n 'tqdm>=4.26.0',\n 'segtok>=1.5.7',\n 'matplotlib>=3.0.0',\n 'mpld3>=0.3',\n 'sklearn',\n 'sqlitedict>=1.6.0',\n 'deprecated>=1.2.4',\n 'hyperopt>=0.1.1',\n 'pytorch-pretrained-bert>=0.3.0'\n ],\n include_package_data=True,\n python_requires='>=3.6',\n)\n", "path": "setup.py"}]} | 712 | 339 |
gh_patches_debug_5563 | rasdani/github-patches | git_diff | mlflow__mlflow-9536 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] basic-auth init on remote database
### Describe the problem
Same issue #9399 happened when trying to initialize database which invokes this function [migrate_if_needed](https://github.com/mlflow/mlflow/blob/master/mlflow/server/auth/db/utils.py#L30)
Suggestion: Apply the same fix #9410 to force SqlAlchemy to render unobfuscated url
### Suggestion
```
alembic_cfg = _get_alembic_config(engine.url.render_as_string(hide_password=False))
```
### What component(s) does this bug affect?
- [ ] `area/artifacts`: Artifact stores and artifact logging
- [ ] `area/build`: Build and test infrastructure for MLflow
- [ ] `area/docs`: MLflow documentation pages
- [ ] `area/examples`: Example code
- [ ] `area/gateway`: AI Gateway service, Gateway client APIs, third-party Gateway integrations
- [ ] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry
- [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors
- [ ] `area/recipes`: Recipes, Recipe APIs, Recipe configs, Recipe Templates
- [ ] `area/projects`: MLproject format, project running backends
- [ ] `area/scoring`: MLflow Model server, model deployment tools, Spark UDFs
- [X] `area/server-infra`: MLflow Tracking server backend
- [ ] `area/tracking`: Tracking Service, tracking client APIs, autologging
### What interface(s) does this bug affect?
- [ ] `area/uiux`: Front-end, user experience, plotting, JavaScript, JavaScript dev server
- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models
- [X] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry
- [ ] `area/windows`: Windows support
### What language(s) does this bug affect?
- [ ] `language/r`: R APIs and clients
- [ ] `language/java`: Java APIs and clients
- [ ] `language/new`: Proposals for new client languages
### What integration(s) does this bug affect?
- [ ] `integrations/azure`: Azure and Azure ML integrations
- [ ] `integrations/sagemaker`: SageMaker integrations
- [ ] `integrations/databricks`: Databricks integrations
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlflow/server/auth/db/utils.py`
Content:
```
1 from pathlib import Path
2
3 from alembic.command import upgrade
4 from alembic.config import Config
5 from alembic.migration import MigrationContext
6 from alembic.script import ScriptDirectory
7 from sqlalchemy.engine.base import Engine
8
9
10 def _get_alembic_dir() -> str:
11 return Path(__file__).parent / "migrations"
12
13
14 def _get_alembic_config(url: str) -> Config:
15 alembic_dir = _get_alembic_dir()
16 alembic_ini_path = alembic_dir / "alembic.ini"
17 alembic_cfg = Config(alembic_ini_path)
18 alembic_cfg.set_main_option("script_location", str(alembic_dir))
19 alembic_cfg.set_main_option("sqlalchemy.url", url)
20 return alembic_cfg
21
22
23 def migrate(engine: Engine, revision: str) -> None:
24 alembic_cfg = _get_alembic_config(engine.url.render_as_string(hide_password=False))
25 with engine.begin() as conn:
26 alembic_cfg.attributes["connection"] = conn
27 upgrade(alembic_cfg, revision)
28
29
30 def migrate_if_needed(engine: Engine, revision: str) -> None:
31 alembic_cfg = _get_alembic_config(str(engine.url))
32 script_dir = ScriptDirectory.from_config(alembic_cfg)
33 with engine.begin() as conn:
34 context = MigrationContext.configure(conn)
35 if context.get_current_revision() != script_dir.get_current_head():
36 upgrade(alembic_cfg, revision)
37
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mlflow/server/auth/db/utils.py b/mlflow/server/auth/db/utils.py
--- a/mlflow/server/auth/db/utils.py
+++ b/mlflow/server/auth/db/utils.py
@@ -28,7 +28,7 @@
def migrate_if_needed(engine: Engine, revision: str) -> None:
- alembic_cfg = _get_alembic_config(str(engine.url))
+ alembic_cfg = _get_alembic_config(engine.url.render_as_string(hide_password=False))
script_dir = ScriptDirectory.from_config(alembic_cfg)
with engine.begin() as conn:
context = MigrationContext.configure(conn)
| {"golden_diff": "diff --git a/mlflow/server/auth/db/utils.py b/mlflow/server/auth/db/utils.py\n--- a/mlflow/server/auth/db/utils.py\n+++ b/mlflow/server/auth/db/utils.py\n@@ -28,7 +28,7 @@\n \n \n def migrate_if_needed(engine: Engine, revision: str) -> None:\n- alembic_cfg = _get_alembic_config(str(engine.url))\n+ alembic_cfg = _get_alembic_config(engine.url.render_as_string(hide_password=False))\n script_dir = ScriptDirectory.from_config(alembic_cfg)\n with engine.begin() as conn:\n context = MigrationContext.configure(conn)\n", "issue": "[BUG] basic-auth init on remote database\n### Describe the problem\r\n\r\nSame issue #9399 happened when trying to initialize database which invokes this function [migrate_if_needed](https://github.com/mlflow/mlflow/blob/master/mlflow/server/auth/db/utils.py#L30)\r\n\r\nSuggestion: Apply the same fix #9410 to force SqlAlchemy to render unobfuscated url\r\n\r\n### Suggestion\r\n```\r\nalembic_cfg = _get_alembic_config(engine.url.render_as_string(hide_password=False))\r\n```\r\n\r\n### What component(s) does this bug affect?\r\n\r\n- [ ] `area/artifacts`: Artifact stores and artifact logging\r\n- [ ] `area/build`: Build and test infrastructure for MLflow\r\n- [ ] `area/docs`: MLflow documentation pages\r\n- [ ] `area/examples`: Example code\r\n- [ ] `area/gateway`: AI Gateway service, Gateway client APIs, third-party Gateway integrations\r\n- [ ] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry\r\n- [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors\r\n- [ ] `area/recipes`: Recipes, Recipe APIs, Recipe configs, Recipe Templates\r\n- [ ] `area/projects`: MLproject format, project running backends\r\n- [ ] `area/scoring`: MLflow Model server, model deployment tools, Spark UDFs\r\n- [X] `area/server-infra`: MLflow Tracking server backend\r\n- [ ] `area/tracking`: Tracking Service, tracking client APIs, autologging\r\n\r\n### What interface(s) does this bug affect?\r\n\r\n- [ ] `area/uiux`: Front-end, user experience, plotting, JavaScript, JavaScript dev server\r\n- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models\r\n- [X] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry\r\n- [ ] `area/windows`: Windows support\r\n\r\n### What language(s) does this bug affect?\r\n\r\n- [ ] `language/r`: R APIs and clients\r\n- [ ] `language/java`: Java APIs and clients\r\n- [ ] `language/new`: Proposals for new client languages\r\n\r\n### What integration(s) does this bug affect?\r\n\r\n- [ ] `integrations/azure`: Azure and Azure ML integrations\r\n- [ ] `integrations/sagemaker`: SageMaker integrations\r\n- [ ] `integrations/databricks`: Databricks integrations\n", "before_files": [{"content": "from pathlib import Path\n\nfrom alembic.command import upgrade\nfrom alembic.config import Config\nfrom alembic.migration import MigrationContext\nfrom alembic.script import ScriptDirectory\nfrom sqlalchemy.engine.base import Engine\n\n\ndef _get_alembic_dir() -> str:\n return Path(__file__).parent / \"migrations\"\n\n\ndef _get_alembic_config(url: str) -> Config:\n alembic_dir = _get_alembic_dir()\n alembic_ini_path = alembic_dir / \"alembic.ini\"\n alembic_cfg = Config(alembic_ini_path)\n alembic_cfg.set_main_option(\"script_location\", str(alembic_dir))\n alembic_cfg.set_main_option(\"sqlalchemy.url\", url)\n return alembic_cfg\n\n\ndef migrate(engine: Engine, revision: str) -> None:\n alembic_cfg = _get_alembic_config(engine.url.render_as_string(hide_password=False))\n with engine.begin() as conn:\n alembic_cfg.attributes[\"connection\"] = conn\n upgrade(alembic_cfg, revision)\n\n\ndef migrate_if_needed(engine: Engine, revision: str) -> None:\n alembic_cfg = _get_alembic_config(str(engine.url))\n script_dir = ScriptDirectory.from_config(alembic_cfg)\n with engine.begin() as conn:\n context = MigrationContext.configure(conn)\n if context.get_current_revision() != script_dir.get_current_head():\n upgrade(alembic_cfg, revision)\n", "path": "mlflow/server/auth/db/utils.py"}], "after_files": [{"content": "from pathlib import Path\n\nfrom alembic.command import upgrade\nfrom alembic.config import Config\nfrom alembic.migration import MigrationContext\nfrom alembic.script import ScriptDirectory\nfrom sqlalchemy.engine.base import Engine\n\n\ndef _get_alembic_dir() -> str:\n return Path(__file__).parent / \"migrations\"\n\n\ndef _get_alembic_config(url: str) -> Config:\n alembic_dir = _get_alembic_dir()\n alembic_ini_path = alembic_dir / \"alembic.ini\"\n alembic_cfg = Config(alembic_ini_path)\n alembic_cfg.set_main_option(\"script_location\", str(alembic_dir))\n alembic_cfg.set_main_option(\"sqlalchemy.url\", url)\n return alembic_cfg\n\n\ndef migrate(engine: Engine, revision: str) -> None:\n alembic_cfg = _get_alembic_config(engine.url.render_as_string(hide_password=False))\n with engine.begin() as conn:\n alembic_cfg.attributes[\"connection\"] = conn\n upgrade(alembic_cfg, revision)\n\n\ndef migrate_if_needed(engine: Engine, revision: str) -> None:\n alembic_cfg = _get_alembic_config(engine.url.render_as_string(hide_password=False))\n script_dir = ScriptDirectory.from_config(alembic_cfg)\n with engine.begin() as conn:\n context = MigrationContext.configure(conn)\n if context.get_current_revision() != script_dir.get_current_head():\n upgrade(alembic_cfg, revision)\n", "path": "mlflow/server/auth/db/utils.py"}]} | 1,187 | 140 |
gh_patches_debug_61226 | rasdani/github-patches | git_diff | searxng__searxng-2862 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: bilibili engine is broken
<!-- PLEASE FILL THESE FIELDS, IT REALLY HELPS THE MAINTAINERS OF SearXNG -->
Something has changed, and now some fixes are needed to use the api successfully.
**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**
Repository: https://github.com/searxng/searxng
Branch: master
Version: 2023.9.27+1a66d7467+dirty
<!-- If you are running on master branch using git execute this command
in order to fetch the latest commit ID:
```
git log -1
```
If you are using searxng-docker then look at the bottom of the SearXNG page
and check for the version after "Powered by SearXNG"
Please also stipulate if you are using a forked version of SearXNG and
include a link to the fork source code.
-->
**How did you install SearXNG?**
make run
<!-- Did you install SearXNG using the official wiki or using searxng-docker
or manually by executing the searx/webapp.py file? -->
**What happened?**
<!-- A clear and concise description of what the bug is. -->
**How To Reproduce**
<!-- How can we reproduce this issue? (as minimally and as precisely as possible) -->
**Expected behavior**
<!-- A clear and concise description of what you expected to happen. -->
**Screenshots & Logs**
<!-- If applicable, add screenshots, logs to help explain your problem. -->
**Additional context**
<!-- Add any other context about the problem here. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/engines/bilibili.py`
Content:
```
1 # SPDX-License-Identifier: AGPL-3.0-or-later
2 # lint: pylint
3 """Bilibili is a Chinese video sharing website.
4
5 .. _Bilibili: https://www.bilibili.com
6 """
7
8 import random
9 import string
10 from urllib.parse import urlencode
11 from datetime import datetime, timedelta
12
13 # Engine metadata
14 about = {
15 "website": "https://www.bilibili.com",
16 "wikidata_id": "Q3077586",
17 "official_api_documentation": None,
18 "use_official_api": False,
19 "require_api_key": False,
20 "results": "JSON",
21 }
22
23 # Engine configuration
24 paging = True
25 results_per_page = 20
26 categories = ["videos"]
27
28 # Search URL
29 base_url = "https://api.bilibili.com/x/web-interface/wbi/search/type"
30
31 cookie = {
32 "innersign": "0",
33 "buvid3": "".join(random.choice(string.hexdigits) for _ in range(16)) + "infoc",
34 "i-wanna-go-back": "-1",
35 "b_ut": "7",
36 "FEED_LIVE_VERSION": "V8",
37 "header_theme_version": "undefined",
38 "home_feed_column": "4",
39 }
40
41
42 def request(query, params):
43 query_params = {
44 "__refresh__": "true",
45 "page": params["pageno"],
46 "page_size": results_per_page,
47 "single_column": "0",
48 "keyword": query,
49 "search_type": "video",
50 }
51
52 params["url"] = f"{base_url}?{urlencode(query_params)}"
53 params["cookies"] = cookie
54
55 return params
56
57
58 # Format the video duration
59 def format_duration(duration):
60 minutes, seconds = map(int, duration.split(":"))
61 total_seconds = minutes * 60 + seconds
62
63 formatted_duration = str(timedelta(seconds=total_seconds))[2:] if 0 <= total_seconds < 3600 else ""
64
65 return formatted_duration
66
67
68 def response(resp):
69 search_res = resp.json()
70
71 results = []
72
73 for item in search_res.get("data", {}).get("result", []):
74 title = item["title"]
75 url = item["arcurl"]
76 thumbnail = item["pic"]
77 description = item["description"]
78 author = item["author"]
79 video_id = item["aid"]
80 unix_date = item["pubdate"]
81
82 formatted_date = datetime.utcfromtimestamp(unix_date)
83 formatted_duration = format_duration(item["duration"])
84 iframe_url = f"https://player.bilibili.com/player.html?aid={video_id}&high_quality=1&autoplay=false&danmaku=0"
85
86 results.append(
87 {
88 "title": title,
89 "url": url,
90 "content": description,
91 "author": author,
92 "publishedDate": formatted_date,
93 "length": formatted_duration,
94 "thumbnail": thumbnail,
95 "iframe_src": iframe_url,
96 "template": "videos.html",
97 }
98 )
99
100 return results
101
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/searx/engines/bilibili.py b/searx/engines/bilibili.py
--- a/searx/engines/bilibili.py
+++ b/searx/engines/bilibili.py
@@ -26,7 +26,7 @@
categories = ["videos"]
# Search URL
-base_url = "https://api.bilibili.com/x/web-interface/wbi/search/type"
+base_url = "https://api.bilibili.com/x/web-interface/search/type"
cookie = {
"innersign": "0",
| {"golden_diff": "diff --git a/searx/engines/bilibili.py b/searx/engines/bilibili.py\n--- a/searx/engines/bilibili.py\n+++ b/searx/engines/bilibili.py\n@@ -26,7 +26,7 @@\n categories = [\"videos\"]\n \n # Search URL\n-base_url = \"https://api.bilibili.com/x/web-interface/wbi/search/type\"\n+base_url = \"https://api.bilibili.com/x/web-interface/search/type\"\n \n cookie = {\n \"innersign\": \"0\",\n", "issue": "Bug: bilibili engine is broken\n<!-- PLEASE FILL THESE FIELDS, IT REALLY HELPS THE MAINTAINERS OF SearXNG -->\r\n\r\nSomething has changed, and now some fixes are needed to use the api successfully.\r\n\r\n**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**\r\nRepository: https://github.com/searxng/searxng\r\nBranch: master\r\nVersion: 2023.9.27+1a66d7467+dirty\r\n<!-- If you are running on master branch using git execute this command\r\nin order to fetch the latest commit ID:\r\n```\r\ngit log -1\r\n``` \r\nIf you are using searxng-docker then look at the bottom of the SearXNG page\r\nand check for the version after \"Powered by SearXNG\"\r\n\r\nPlease also stipulate if you are using a forked version of SearXNG and\r\ninclude a link to the fork source code.\r\n-->\r\n**How did you install SearXNG?**\r\nmake run\r\n<!-- Did you install SearXNG using the official wiki or using searxng-docker\r\nor manually by executing the searx/webapp.py file? -->\r\n**What happened?**\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\n**How To Reproduce**\r\n<!-- How can we reproduce this issue? (as minimally and as precisely as possible) -->\r\n\r\n**Expected behavior**\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n**Screenshots & Logs**\r\n<!-- If applicable, add screenshots, logs to help explain your problem. -->\r\n\r\n**Additional context**\r\n<!-- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n# lint: pylint\n\"\"\"Bilibili is a Chinese video sharing website.\n\n.. _Bilibili: https://www.bilibili.com\n\"\"\"\n\nimport random\nimport string\nfrom urllib.parse import urlencode\nfrom datetime import datetime, timedelta\n\n# Engine metadata\nabout = {\n \"website\": \"https://www.bilibili.com\",\n \"wikidata_id\": \"Q3077586\",\n \"official_api_documentation\": None,\n \"use_official_api\": False,\n \"require_api_key\": False,\n \"results\": \"JSON\",\n}\n\n# Engine configuration\npaging = True\nresults_per_page = 20\ncategories = [\"videos\"]\n\n# Search URL\nbase_url = \"https://api.bilibili.com/x/web-interface/wbi/search/type\"\n\ncookie = {\n \"innersign\": \"0\",\n \"buvid3\": \"\".join(random.choice(string.hexdigits) for _ in range(16)) + \"infoc\",\n \"i-wanna-go-back\": \"-1\",\n \"b_ut\": \"7\",\n \"FEED_LIVE_VERSION\": \"V8\",\n \"header_theme_version\": \"undefined\",\n \"home_feed_column\": \"4\",\n}\n\n\ndef request(query, params):\n query_params = {\n \"__refresh__\": \"true\",\n \"page\": params[\"pageno\"],\n \"page_size\": results_per_page,\n \"single_column\": \"0\",\n \"keyword\": query,\n \"search_type\": \"video\",\n }\n\n params[\"url\"] = f\"{base_url}?{urlencode(query_params)}\"\n params[\"cookies\"] = cookie\n\n return params\n\n\n# Format the video duration\ndef format_duration(duration):\n minutes, seconds = map(int, duration.split(\":\"))\n total_seconds = minutes * 60 + seconds\n\n formatted_duration = str(timedelta(seconds=total_seconds))[2:] if 0 <= total_seconds < 3600 else \"\"\n\n return formatted_duration\n\n\ndef response(resp):\n search_res = resp.json()\n\n results = []\n\n for item in search_res.get(\"data\", {}).get(\"result\", []):\n title = item[\"title\"]\n url = item[\"arcurl\"]\n thumbnail = item[\"pic\"]\n description = item[\"description\"]\n author = item[\"author\"]\n video_id = item[\"aid\"]\n unix_date = item[\"pubdate\"]\n\n formatted_date = datetime.utcfromtimestamp(unix_date)\n formatted_duration = format_duration(item[\"duration\"])\n iframe_url = f\"https://player.bilibili.com/player.html?aid={video_id}&high_quality=1&autoplay=false&danmaku=0\"\n\n results.append(\n {\n \"title\": title,\n \"url\": url,\n \"content\": description,\n \"author\": author,\n \"publishedDate\": formatted_date,\n \"length\": formatted_duration,\n \"thumbnail\": thumbnail,\n \"iframe_src\": iframe_url,\n \"template\": \"videos.html\",\n }\n )\n\n return results\n", "path": "searx/engines/bilibili.py"}], "after_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n# lint: pylint\n\"\"\"Bilibili is a Chinese video sharing website.\n\n.. _Bilibili: https://www.bilibili.com\n\"\"\"\n\nimport random\nimport string\nfrom urllib.parse import urlencode\nfrom datetime import datetime, timedelta\n\n# Engine metadata\nabout = {\n \"website\": \"https://www.bilibili.com\",\n \"wikidata_id\": \"Q3077586\",\n \"official_api_documentation\": None,\n \"use_official_api\": False,\n \"require_api_key\": False,\n \"results\": \"JSON\",\n}\n\n# Engine configuration\npaging = True\nresults_per_page = 20\ncategories = [\"videos\"]\n\n# Search URL\nbase_url = \"https://api.bilibili.com/x/web-interface/search/type\"\n\ncookie = {\n \"innersign\": \"0\",\n \"buvid3\": \"\".join(random.choice(string.hexdigits) for _ in range(16)) + \"infoc\",\n \"i-wanna-go-back\": \"-1\",\n \"b_ut\": \"7\",\n \"FEED_LIVE_VERSION\": \"V8\",\n \"header_theme_version\": \"undefined\",\n \"home_feed_column\": \"4\",\n}\n\n\ndef request(query, params):\n query_params = {\n \"__refresh__\": \"true\",\n \"page\": params[\"pageno\"],\n \"page_size\": results_per_page,\n \"single_column\": \"0\",\n \"keyword\": query,\n \"search_type\": \"video\",\n }\n\n params[\"url\"] = f\"{base_url}?{urlencode(query_params)}\"\n params[\"cookies\"] = cookie\n\n return params\n\n\n# Format the video duration\ndef format_duration(duration):\n minutes, seconds = map(int, duration.split(\":\"))\n total_seconds = minutes * 60 + seconds\n\n formatted_duration = str(timedelta(seconds=total_seconds))[2:] if 0 <= total_seconds < 3600 else \"\"\n\n return formatted_duration\n\n\ndef response(resp):\n search_res = resp.json()\n\n results = []\n\n for item in search_res.get(\"data\", {}).get(\"result\", []):\n title = item[\"title\"]\n url = item[\"arcurl\"]\n thumbnail = item[\"pic\"]\n description = item[\"description\"]\n author = item[\"author\"]\n video_id = item[\"aid\"]\n unix_date = item[\"pubdate\"]\n\n formatted_date = datetime.utcfromtimestamp(unix_date)\n formatted_duration = format_duration(item[\"duration\"])\n iframe_url = f\"https://player.bilibili.com/player.html?aid={video_id}&high_quality=1&autoplay=false&danmaku=0\"\n\n results.append(\n {\n \"title\": title,\n \"url\": url,\n \"content\": description,\n \"author\": author,\n \"publishedDate\": formatted_date,\n \"length\": formatted_duration,\n \"thumbnail\": thumbnail,\n \"iframe_src\": iframe_url,\n \"template\": \"videos.html\",\n }\n )\n\n return results\n", "path": "searx/engines/bilibili.py"}]} | 1,488 | 124 |
gh_patches_debug_57128 | rasdani/github-patches | git_diff | liqd__adhocracy4-58 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Extend linting to javascript and jsx files
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `adhocracy4/reports/emails.py`
Content:
```
1 from django.contrib.auth import get_user_model
2 from django.core import urlresolvers
3
4 from adhocracy4 import emails
5
6 User = get_user_model()
7
8
9 class ReportModeratorEmail(emails.ModeratorNotification):
10 template_name = 'a4reports/emails/report_moderators'
11
12
13 class ReportCreatorEmail(emails.Email):
14 template_name = 'a4reports/emails/report_creator'
15
16 def get_receivers(self):
17 return [self.object.content_object.creator]
18
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/adhocracy4/reports/emails.py b/adhocracy4/reports/emails.py
--- a/adhocracy4/reports/emails.py
+++ b/adhocracy4/reports/emails.py
@@ -1,5 +1,4 @@
from django.contrib.auth import get_user_model
-from django.core import urlresolvers
from adhocracy4 import emails
| {"golden_diff": "diff --git a/adhocracy4/reports/emails.py b/adhocracy4/reports/emails.py\n--- a/adhocracy4/reports/emails.py\n+++ b/adhocracy4/reports/emails.py\n@@ -1,5 +1,4 @@\n from django.contrib.auth import get_user_model\n-from django.core import urlresolvers\n \n from adhocracy4 import emails\n", "issue": "Extend linting to javascript and jsx files\n\n", "before_files": [{"content": "from django.contrib.auth import get_user_model\nfrom django.core import urlresolvers\n\nfrom adhocracy4 import emails\n\nUser = get_user_model()\n\n\nclass ReportModeratorEmail(emails.ModeratorNotification):\n template_name = 'a4reports/emails/report_moderators'\n\n\nclass ReportCreatorEmail(emails.Email):\n template_name = 'a4reports/emails/report_creator'\n\n def get_receivers(self):\n return [self.object.content_object.creator]\n", "path": "adhocracy4/reports/emails.py"}], "after_files": [{"content": "from django.contrib.auth import get_user_model\n\nfrom adhocracy4 import emails\n\nUser = get_user_model()\n\n\nclass ReportModeratorEmail(emails.ModeratorNotification):\n template_name = 'a4reports/emails/report_moderators'\n\n\nclass ReportCreatorEmail(emails.Email):\n template_name = 'a4reports/emails/report_creator'\n\n def get_receivers(self):\n return [self.object.content_object.creator]\n", "path": "adhocracy4/reports/emails.py"}]} | 401 | 83 |
gh_patches_debug_24071 | rasdani/github-patches | git_diff | open-mmlab__mmdetection-5654 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error get params DETR/ Deformable DETR
Despite my attempts to modify, also just testing with the basic config detr file.
Maybe this issue has already been raised?
mmdet==2.13.0
mmcv=1.3.3
```python
python tools/analysis_tools/get_flops.py configs/detr/detr_r50_8x2_150e_coco.py
```
```python
/home/bluav/mmdetection/mmdet/models/backbones/resnet.py:400: UserWarning: DeprecationWarning: pretrained is a deprecated, please use "init_cfg" instead
warnings.warn('DeprecationWarning: pretrained is a deprecated, '
Warning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!
Warning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!
Warning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!
Warning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!
Warning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!
Warning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!
Warning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!
Warning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!
Warning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!
Warning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!
Warning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!
Warning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!
Warning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!
Traceback (most recent call last):
File "tools/analysis_tools/get_flops.py", line 81, in <module>
main()
File "tools/analysis_tools/get_flops.py", line 71, in main
flops, params = get_model_complexity_info(model, input_shape)
File "/home/bluav/.conda/envs/open-mmlab/lib/python3.7/site-packages/mmcv/cnn/utils/flops_counter.py", line 104, in get_model_complexity_info
_ = flops_model(batch)
File "/home/bluav/.conda/envs/open-mmlab/lib/python3.7/site-packages/torch/nn/modules/module.py", line 889, in _call_impl
result = self.forward(*input, **kwargs)
File "/home/bluav/mmdetection/mmdet/models/detectors/single_stage.py", line 48, in forward_dummy
outs = self.bbox_head(x)
File "/home/bluav/.conda/envs/open-mmlab/lib/python3.7/site-packages/torch/nn/modules/module.py", line 889, in _call_impl
result = self.forward(*input, **kwargs)
TypeError: forward() missing 1 required positional argument: 'img_metas'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mmdet/models/detectors/detr.py`
Content:
```
1 import torch
2
3 from ..builder import DETECTORS
4 from .single_stage import SingleStageDetector
5
6
7 @DETECTORS.register_module()
8 class DETR(SingleStageDetector):
9 r"""Implementation of `DETR: End-to-End Object Detection with
10 Transformers <https://arxiv.org/pdf/2005.12872>`_"""
11
12 def __init__(self,
13 backbone,
14 bbox_head,
15 train_cfg=None,
16 test_cfg=None,
17 pretrained=None,
18 init_cfg=None):
19 super(DETR, self).__init__(backbone, None, bbox_head, train_cfg,
20 test_cfg, pretrained, init_cfg)
21
22 # over-write `onnx_export` because:
23 # (1) the forward of bbox_head requires img_metas
24 # (2) the different behavior (e.g. construction of `masks`) between
25 # torch and ONNX model, during the forward of bbox_head
26 def onnx_export(self, img, img_metas):
27 """Test function for exporting to ONNX, without test time augmentation.
28
29 Args:
30 img (torch.Tensor): input images.
31 img_metas (list[dict]): List of image information.
32
33 Returns:
34 tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]
35 and class labels of shape [N, num_det].
36 """
37 x = self.extract_feat(img)
38 # forward of this head requires img_metas
39 outs = self.bbox_head.forward_onnx(x, img_metas)
40 # get shape as tensor
41 img_shape = torch._shape_as_tensor(img)[2:]
42 img_metas[0]['img_shape_for_onnx'] = img_shape
43
44 det_bboxes, det_labels = self.bbox_head.onnx_export(*outs, img_metas)
45
46 return det_bboxes, det_labels
47
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mmdet/models/detectors/detr.py b/mmdet/models/detectors/detr.py
--- a/mmdet/models/detectors/detr.py
+++ b/mmdet/models/detectors/detr.py
@@ -1,3 +1,5 @@
+import warnings
+
import torch
from ..builder import DETECTORS
@@ -19,6 +21,27 @@
super(DETR, self).__init__(backbone, None, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
+ # over-write `forward_dummy` because:
+ # the forward of bbox_head requires img_metas
+ def forward_dummy(self, img):
+ """Used for computing network flops.
+
+ See `mmdetection/tools/analysis_tools/get_flops.py`
+ """
+ warnings.warn('Warning! MultiheadAttention in DETR does not '
+ 'support flops computation! Do not use the '
+ 'results in your papers!')
+
+ batch_size, _, height, width = img.shape
+ dummy_img_metas = [
+ dict(
+ batch_input_shape=(height, width),
+ img_shape=(height, width, 3)) for _ in range(batch_size)
+ ]
+ x = self.extract_feat(img)
+ outs = self.bbox_head(x, dummy_img_metas)
+ return outs
+
# over-write `onnx_export` because:
# (1) the forward of bbox_head requires img_metas
# (2) the different behavior (e.g. construction of `masks`) between
| {"golden_diff": "diff --git a/mmdet/models/detectors/detr.py b/mmdet/models/detectors/detr.py\n--- a/mmdet/models/detectors/detr.py\n+++ b/mmdet/models/detectors/detr.py\n@@ -1,3 +1,5 @@\n+import warnings\n+\n import torch\n \n from ..builder import DETECTORS\n@@ -19,6 +21,27 @@\n super(DETR, self).__init__(backbone, None, bbox_head, train_cfg,\n test_cfg, pretrained, init_cfg)\n \n+ # over-write `forward_dummy` because:\n+ # the forward of bbox_head requires img_metas\n+ def forward_dummy(self, img):\n+ \"\"\"Used for computing network flops.\n+\n+ See `mmdetection/tools/analysis_tools/get_flops.py`\n+ \"\"\"\n+ warnings.warn('Warning! MultiheadAttention in DETR does not '\n+ 'support flops computation! Do not use the '\n+ 'results in your papers!')\n+\n+ batch_size, _, height, width = img.shape\n+ dummy_img_metas = [\n+ dict(\n+ batch_input_shape=(height, width),\n+ img_shape=(height, width, 3)) for _ in range(batch_size)\n+ ]\n+ x = self.extract_feat(img)\n+ outs = self.bbox_head(x, dummy_img_metas)\n+ return outs\n+\n # over-write `onnx_export` because:\n # (1) the forward of bbox_head requires img_metas\n # (2) the different behavior (e.g. construction of `masks`) between\n", "issue": "Error get params DETR/ Deformable DETR\nDespite my attempts to modify, also just testing with the basic config detr file. \r\nMaybe this issue has already been raised?\r\nmmdet==2.13.0\r\nmmcv=1.3.3\r\n\r\n```python\r\npython tools/analysis_tools/get_flops.py configs/detr/detr_r50_8x2_150e_coco.py\r\n```\r\n\r\n```python\r\n/home/bluav/mmdetection/mmdet/models/backbones/resnet.py:400: UserWarning: DeprecationWarning: pretrained is a deprecated, please use \"init_cfg\" instead\r\n warnings.warn('DeprecationWarning: pretrained is a deprecated, '\r\nWarning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!\r\nWarning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!\r\nWarning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!\r\nWarning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!\r\nWarning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!\r\nWarning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!\r\nWarning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!\r\nWarning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!\r\nWarning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!\r\nWarning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!\r\nWarning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!\r\nWarning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!\r\nWarning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!\r\nTraceback (most recent call last):\r\n File \"tools/analysis_tools/get_flops.py\", line 81, in <module>\r\n main()\r\n File \"tools/analysis_tools/get_flops.py\", line 71, in main\r\n flops, params = get_model_complexity_info(model, input_shape)\r\n File \"/home/bluav/.conda/envs/open-mmlab/lib/python3.7/site-packages/mmcv/cnn/utils/flops_counter.py\", line 104, in get_model_complexity_info\r\n _ = flops_model(batch)\r\n File \"/home/bluav/.conda/envs/open-mmlab/lib/python3.7/site-packages/torch/nn/modules/module.py\", line 889, in _call_impl\r\n result = self.forward(*input, **kwargs)\r\n File \"/home/bluav/mmdetection/mmdet/models/detectors/single_stage.py\", line 48, in forward_dummy\r\n outs = self.bbox_head(x)\r\n File \"/home/bluav/.conda/envs/open-mmlab/lib/python3.7/site-packages/torch/nn/modules/module.py\", line 889, in _call_impl\r\n result = self.forward(*input, **kwargs)\r\nTypeError: forward() missing 1 required positional argument: 'img_metas'\r\n```\r\n\n", "before_files": [{"content": "import torch\n\nfrom ..builder import DETECTORS\nfrom .single_stage import SingleStageDetector\n\n\[email protected]_module()\nclass DETR(SingleStageDetector):\n r\"\"\"Implementation of `DETR: End-to-End Object Detection with\n Transformers <https://arxiv.org/pdf/2005.12872>`_\"\"\"\n\n def __init__(self,\n backbone,\n bbox_head,\n train_cfg=None,\n test_cfg=None,\n pretrained=None,\n init_cfg=None):\n super(DETR, self).__init__(backbone, None, bbox_head, train_cfg,\n test_cfg, pretrained, init_cfg)\n\n # over-write `onnx_export` because:\n # (1) the forward of bbox_head requires img_metas\n # (2) the different behavior (e.g. construction of `masks`) between\n # torch and ONNX model, during the forward of bbox_head\n def onnx_export(self, img, img_metas):\n \"\"\"Test function for exporting to ONNX, without test time augmentation.\n\n Args:\n img (torch.Tensor): input images.\n img_metas (list[dict]): List of image information.\n\n Returns:\n tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]\n and class labels of shape [N, num_det].\n \"\"\"\n x = self.extract_feat(img)\n # forward of this head requires img_metas\n outs = self.bbox_head.forward_onnx(x, img_metas)\n # get shape as tensor\n img_shape = torch._shape_as_tensor(img)[2:]\n img_metas[0]['img_shape_for_onnx'] = img_shape\n\n det_bboxes, det_labels = self.bbox_head.onnx_export(*outs, img_metas)\n\n return det_bboxes, det_labels\n", "path": "mmdet/models/detectors/detr.py"}], "after_files": [{"content": "import warnings\n\nimport torch\n\nfrom ..builder import DETECTORS\nfrom .single_stage import SingleStageDetector\n\n\[email protected]_module()\nclass DETR(SingleStageDetector):\n r\"\"\"Implementation of `DETR: End-to-End Object Detection with\n Transformers <https://arxiv.org/pdf/2005.12872>`_\"\"\"\n\n def __init__(self,\n backbone,\n bbox_head,\n train_cfg=None,\n test_cfg=None,\n pretrained=None,\n init_cfg=None):\n super(DETR, self).__init__(backbone, None, bbox_head, train_cfg,\n test_cfg, pretrained, init_cfg)\n\n # over-write `forward_dummy` because:\n # the forward of bbox_head requires img_metas\n def forward_dummy(self, img):\n \"\"\"Used for computing network flops.\n\n See `mmdetection/tools/analysis_tools/get_flops.py`\n \"\"\"\n warnings.warn('Warning! MultiheadAttention in DETR does not '\n 'support flops computation! Do not use the '\n 'results in your papers!')\n\n batch_size, _, height, width = img.shape\n dummy_img_metas = [\n dict(\n batch_input_shape=(height, width),\n img_shape=(height, width, 3)) for _ in range(batch_size)\n ]\n x = self.extract_feat(img)\n outs = self.bbox_head(x, dummy_img_metas)\n return outs\n\n # over-write `onnx_export` because:\n # (1) the forward of bbox_head requires img_metas\n # (2) the different behavior (e.g. construction of `masks`) between\n # torch and ONNX model, during the forward of bbox_head\n def onnx_export(self, img, img_metas):\n \"\"\"Test function for exporting to ONNX, without test time augmentation.\n\n Args:\n img (torch.Tensor): input images.\n img_metas (list[dict]): List of image information.\n\n Returns:\n tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]\n and class labels of shape [N, num_det].\n \"\"\"\n x = self.extract_feat(img)\n # forward of this head requires img_metas\n outs = self.bbox_head.forward_onnx(x, img_metas)\n # get shape as tensor\n img_shape = torch._shape_as_tensor(img)[2:]\n img_metas[0]['img_shape_for_onnx'] = img_shape\n\n det_bboxes, det_labels = self.bbox_head.onnx_export(*outs, img_metas)\n\n return det_bboxes, det_labels\n", "path": "mmdet/models/detectors/detr.py"}]} | 1,524 | 355 |
gh_patches_debug_12016 | rasdani/github-patches | git_diff | celery__celery-450 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
os.kill is not available in windows before python 2.7
As per the topic, the current celery implementation (>=2.3.0) crashes on windows using python 2.5 and 2.6, because it uses os.kill which is not available in windows before python 2.7
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `celery/concurrency/processes/__init__.py`
Content:
```
1 """
2
3 Process Pools.
4
5 """
6 import platform
7 import signal as _signal
8
9 from os import kill as _kill
10
11 from celery.concurrency.base import BasePool
12 from celery.concurrency.processes.pool import Pool, RUN
13
14 if platform.system() == "Windows": # pragma: no cover
15 # On Windows os.kill calls TerminateProcess which cannot be
16 # handled by # any process, so this is needed to terminate the task
17 # *and its children* (if any).
18 from celery.concurrency.processes import _win
19 _kill = _win.kill_processtree # noqa
20
21
22 class TaskPool(BasePool):
23 """Process Pool for processing tasks in parallel.
24
25 :param processes: see :attr:`processes`.
26 :param logger: see :attr:`logger`.
27
28
29 .. attribute:: limit
30
31 The number of processes that can run simultaneously.
32
33 .. attribute:: logger
34
35 The logger used for debugging.
36
37 """
38 Pool = Pool
39
40 def on_start(self):
41 """Run the task pool.
42
43 Will pre-fork all workers so they're ready to accept tasks.
44
45 """
46 self._pool = self.Pool(processes=self.limit, **self.options)
47 self.on_apply = self._pool.apply_async
48
49 def on_stop(self):
50 """Gracefully stop the pool."""
51 if self._pool is not None and self._pool._state == RUN:
52 self._pool.close()
53 self._pool.join()
54 self._pool = None
55
56 def on_terminate(self):
57 """Force terminate the pool."""
58 if self._pool is not None:
59 self._pool.terminate()
60 self._pool = None
61
62 def terminate_job(self, pid, signal=None):
63 _kill(pid, signal or _signal.SIGTERM)
64
65 def grow(self, n=1):
66 return self._pool.grow(n)
67
68 def shrink(self, n=1):
69 return self._pool.shrink(n)
70
71 def _get_info(self):
72 return {"max-concurrency": self.limit,
73 "processes": [p.pid for p in self._pool._pool],
74 "max-tasks-per-child": self._pool._maxtasksperchild,
75 "put-guarded-by-semaphore": self.putlocks,
76 "timeouts": (self._pool.soft_timeout, self._pool.timeout)}
77
78 @property
79 def num_processes(self):
80 return self._pool._processes
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/celery/concurrency/processes/__init__.py b/celery/concurrency/processes/__init__.py
--- a/celery/concurrency/processes/__init__.py
+++ b/celery/concurrency/processes/__init__.py
@@ -6,8 +6,6 @@
import platform
import signal as _signal
-from os import kill as _kill
-
from celery.concurrency.base import BasePool
from celery.concurrency.processes.pool import Pool, RUN
@@ -17,6 +15,8 @@
# *and its children* (if any).
from celery.concurrency.processes import _win
_kill = _win.kill_processtree # noqa
+else:
+ from os import kill as _kill
class TaskPool(BasePool):
| {"golden_diff": "diff --git a/celery/concurrency/processes/__init__.py b/celery/concurrency/processes/__init__.py\n--- a/celery/concurrency/processes/__init__.py\n+++ b/celery/concurrency/processes/__init__.py\n@@ -6,8 +6,6 @@\n import platform\n import signal as _signal\n \n-from os import kill as _kill\n-\n from celery.concurrency.base import BasePool\n from celery.concurrency.processes.pool import Pool, RUN\n \n@@ -17,6 +15,8 @@\n # *and its children* (if any).\n from celery.concurrency.processes import _win\n _kill = _win.kill_processtree # noqa\n+else:\n+ from os import kill as _kill\n \n \n class TaskPool(BasePool):\n", "issue": "os.kill is not available in windows before python 2.7\nAs per the topic, the current celery implementation (>=2.3.0) crashes on windows using python 2.5 and 2.6, because it uses os.kill which is not available in windows before python 2.7\n\n", "before_files": [{"content": "\"\"\"\n\nProcess Pools.\n\n\"\"\"\nimport platform\nimport signal as _signal\n\nfrom os import kill as _kill\n\nfrom celery.concurrency.base import BasePool\nfrom celery.concurrency.processes.pool import Pool, RUN\n\nif platform.system() == \"Windows\": # pragma: no cover\n # On Windows os.kill calls TerminateProcess which cannot be\n # handled by # any process, so this is needed to terminate the task\n # *and its children* (if any).\n from celery.concurrency.processes import _win\n _kill = _win.kill_processtree # noqa\n\n\nclass TaskPool(BasePool):\n \"\"\"Process Pool for processing tasks in parallel.\n\n :param processes: see :attr:`processes`.\n :param logger: see :attr:`logger`.\n\n\n .. attribute:: limit\n\n The number of processes that can run simultaneously.\n\n .. attribute:: logger\n\n The logger used for debugging.\n\n \"\"\"\n Pool = Pool\n\n def on_start(self):\n \"\"\"Run the task pool.\n\n Will pre-fork all workers so they're ready to accept tasks.\n\n \"\"\"\n self._pool = self.Pool(processes=self.limit, **self.options)\n self.on_apply = self._pool.apply_async\n\n def on_stop(self):\n \"\"\"Gracefully stop the pool.\"\"\"\n if self._pool is not None and self._pool._state == RUN:\n self._pool.close()\n self._pool.join()\n self._pool = None\n\n def on_terminate(self):\n \"\"\"Force terminate the pool.\"\"\"\n if self._pool is not None:\n self._pool.terminate()\n self._pool = None\n\n def terminate_job(self, pid, signal=None):\n _kill(pid, signal or _signal.SIGTERM)\n\n def grow(self, n=1):\n return self._pool.grow(n)\n\n def shrink(self, n=1):\n return self._pool.shrink(n)\n\n def _get_info(self):\n return {\"max-concurrency\": self.limit,\n \"processes\": [p.pid for p in self._pool._pool],\n \"max-tasks-per-child\": self._pool._maxtasksperchild,\n \"put-guarded-by-semaphore\": self.putlocks,\n \"timeouts\": (self._pool.soft_timeout, self._pool.timeout)}\n\n @property\n def num_processes(self):\n return self._pool._processes\n", "path": "celery/concurrency/processes/__init__.py"}], "after_files": [{"content": "\"\"\"\n\nProcess Pools.\n\n\"\"\"\nimport platform\nimport signal as _signal\n\nfrom celery.concurrency.base import BasePool\nfrom celery.concurrency.processes.pool import Pool, RUN\n\nif platform.system() == \"Windows\": # pragma: no cover\n # On Windows os.kill calls TerminateProcess which cannot be\n # handled by # any process, so this is needed to terminate the task\n # *and its children* (if any).\n from celery.concurrency.processes import _win\n _kill = _win.kill_processtree # noqa\nelse:\n from os import kill as _kill\n\n\nclass TaskPool(BasePool):\n \"\"\"Process Pool for processing tasks in parallel.\n\n :param processes: see :attr:`processes`.\n :param logger: see :attr:`logger`.\n\n\n .. attribute:: limit\n\n The number of processes that can run simultaneously.\n\n .. attribute:: logger\n\n The logger used for debugging.\n\n \"\"\"\n Pool = Pool\n\n def on_start(self):\n \"\"\"Run the task pool.\n\n Will pre-fork all workers so they're ready to accept tasks.\n\n \"\"\"\n self._pool = self.Pool(processes=self.limit, **self.options)\n self.on_apply = self._pool.apply_async\n\n def on_stop(self):\n \"\"\"Gracefully stop the pool.\"\"\"\n if self._pool is not None and self._pool._state == RUN:\n self._pool.close()\n self._pool.join()\n self._pool = None\n\n def on_terminate(self):\n \"\"\"Force terminate the pool.\"\"\"\n if self._pool is not None:\n self._pool.terminate()\n self._pool = None\n\n def terminate_job(self, pid, signal=None):\n _kill(pid, signal or _signal.SIGTERM)\n\n def grow(self, n=1):\n return self._pool.grow(n)\n\n def shrink(self, n=1):\n return self._pool.shrink(n)\n\n def _get_info(self):\n return {\"max-concurrency\": self.limit,\n \"processes\": [p.pid for p in self._pool._pool],\n \"max-tasks-per-child\": self._pool._maxtasksperchild,\n \"put-guarded-by-semaphore\": self.putlocks,\n \"timeouts\": (self._pool.soft_timeout, self._pool.timeout)}\n\n @property\n def num_processes(self):\n return self._pool._processes\n", "path": "celery/concurrency/processes/__init__.py"}]} | 1,009 | 174 |
gh_patches_debug_3985 | rasdani/github-patches | git_diff | facebookresearch__hydra-2694 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug] Failing lint: Need type annotation for "matches"
See `lint` CI job failure on PR #2689.
```
hydra/plugins/completion_plugin.py:110: error: Need type annotation for "matches" (hint: "matches: List[<type>] = ...") [var-annotated]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hydra/plugins/completion_plugin.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
3 # TODO: Test with /miniconda3/envs/hydra36/bin/python , seems to be running python for some reason.
4 # TODO: Add tests for completion with +prefix (should suggest config groups that are not listed)
5 # TODO: Test completion when defaults has a missing mandatory item
6
7
8 import os
9 import re
10 import sys
11 from abc import abstractmethod
12
13 from hydra.errors import ConfigCompositionException
14 from omegaconf import (
15 Container,
16 DictConfig,
17 MissingMandatoryValue,
18 OmegaConf,
19 ListConfig,
20 )
21 from typing import Any, List, Optional, Tuple
22
23 from hydra.core.config_loader import ConfigLoader
24 from hydra.core.object_type import ObjectType
25 from hydra.plugins.plugin import Plugin
26 from hydra.types import RunMode
27
28
29 class CompletionPlugin(Plugin):
30 def __init__(self, config_loader: ConfigLoader) -> None:
31 self.config_loader = config_loader
32
33 @abstractmethod
34 def install(self) -> None:
35 ...
36
37 @abstractmethod
38 def uninstall(self) -> None:
39 ...
40
41 @staticmethod
42 @abstractmethod
43 def provides() -> str:
44 """
45 :return: the name of the shell this plugin provides completion for
46 """
47 ...
48
49 @abstractmethod
50 def query(self, config_name: Optional[str]) -> None:
51 ...
52
53 @staticmethod
54 @abstractmethod
55 def help(command: str) -> str:
56 """
57 :param command: "install" or "uninstall"
58 :return: command the user can run to install or uninstall this shell completion on the appropriate shell
59 """
60 ...
61
62 @staticmethod
63 def _get_filename(filename: str) -> Tuple[Optional[str], Optional[str]]:
64 last = filename.rfind("=")
65 if last != -1:
66 key_eq = filename[0 : last + 1]
67 filename = filename[last + 1 :]
68 prefixes = [".", "/", "\\", "./", ".\\"]
69 if sys.platform.startswith("win"):
70 for drive in range(ord("a"), ord("z")):
71 prefixes.append(f"{chr(drive)}:")
72
73 if not filename:
74 return None, None
75 for prefix in prefixes:
76 if filename.lower().startswith(prefix):
77 return key_eq, filename
78 return None, None
79
80 @staticmethod
81 def complete_files(word: str) -> List[str]:
82 if os.path.isdir(word):
83 dirname = word
84 files = os.listdir(word)
85 file_prefix = ""
86 else:
87 dirname = os.path.dirname(word)
88 if os.path.isdir(dirname):
89 files = os.listdir(dirname)
90 else:
91 files = []
92 file_prefix = os.path.basename(word)
93 ret = []
94 for file in files:
95 if file.startswith(file_prefix):
96 ret.append(os.path.join(dirname, file))
97 return ret
98
99 @staticmethod
100 def _get_matches(config: Container, word: str) -> List[str]:
101 def str_rep(in_key: Any, in_value: Any) -> str:
102 if OmegaConf.is_config(in_value):
103 return f"{in_key}."
104 else:
105 return f"{in_key}="
106
107 if config is None:
108 return []
109 elif OmegaConf.is_config(config):
110 matches = []
111 if word.endswith(".") or word.endswith("="):
112 exact_key = word[0:-1]
113 try:
114 conf_node = OmegaConf.select(
115 config, exact_key, throw_on_missing=True
116 )
117 except MissingMandatoryValue:
118 conf_node = ""
119 if conf_node is not None:
120 if OmegaConf.is_config(conf_node):
121 key_matches = CompletionPlugin._get_matches(conf_node, "")
122 else:
123 # primitive
124 if isinstance(conf_node, bool):
125 conf_node = str(conf_node).lower()
126 key_matches = [conf_node]
127 else:
128 key_matches = []
129
130 matches.extend([f"{word}{match}" for match in key_matches])
131 else:
132 last_dot = word.rfind(".")
133 if last_dot != -1:
134 base_key = word[0:last_dot]
135 partial_key = word[last_dot + 1 :]
136 conf_node = OmegaConf.select(config, base_key)
137 key_matches = CompletionPlugin._get_matches(conf_node, partial_key)
138 matches.extend([f"{base_key}.{match}" for match in key_matches])
139 else:
140 if isinstance(config, DictConfig):
141 for key, value in config.items_ex(resolve=False):
142 str_key = str(key)
143 if str_key.startswith(word):
144 matches.append(str_rep(key, value))
145 elif OmegaConf.is_list(config):
146 assert isinstance(config, ListConfig)
147 for idx in range(len(config)):
148 try:
149 value = config[idx]
150 if str(idx).startswith(word):
151 matches.append(str_rep(idx, value))
152 except MissingMandatoryValue:
153 matches.append(str_rep(idx, ""))
154
155 else:
156 assert False, f"Object is not an instance of config : {type(config)}"
157
158 return matches
159
160 def _query_config_groups(
161 self, word: str, config_name: Optional[str], words: List[str]
162 ) -> Tuple[List[str], bool]:
163 is_addition = word.startswith("+")
164 is_deletion = word.startswith("~")
165 if is_addition or is_deletion:
166 prefix, word = word[0], word[1:]
167 else:
168 prefix = ""
169 last_eq_index = word.rfind("=")
170 last_slash_index = word.rfind("/")
171 exact_match: bool = False
172 if last_eq_index != -1:
173 parent_group = word[0:last_eq_index]
174 results_filter = ObjectType.CONFIG
175 else:
176 results_filter = ObjectType.GROUP
177 if last_slash_index == -1:
178 parent_group = ""
179 else:
180 parent_group = word[0:last_slash_index]
181
182 all_matched_groups = self.config_loader.get_group_options(
183 group_name=parent_group,
184 results_filter=results_filter,
185 config_name=config_name,
186 overrides=words,
187 )
188 matched_groups: List[str] = []
189 if results_filter == ObjectType.CONFIG:
190 for match in all_matched_groups:
191 name = f"{parent_group}={match}" if parent_group != "" else match
192 if name.startswith(word):
193 matched_groups.append(name)
194 exact_match = True
195 elif results_filter == ObjectType.GROUP:
196 for match in all_matched_groups:
197 name = f"{parent_group}/{match}" if parent_group != "" else match
198 if name.startswith(word):
199 files = self.config_loader.get_group_options(
200 group_name=name,
201 results_filter=ObjectType.CONFIG,
202 config_name=config_name,
203 overrides=words,
204 )
205 dirs = self.config_loader.get_group_options(
206 group_name=name,
207 results_filter=ObjectType.GROUP,
208 config_name=config_name,
209 overrides=words,
210 )
211 if len(dirs) == 0 and len(files) > 0 and not is_deletion:
212 name = name + "="
213 elif len(dirs) > 0 and len(files) == 0:
214 name = name + "/"
215 matched_groups.append(name)
216
217 matched_groups = [f"{prefix}{group}" for group in matched_groups]
218 return matched_groups, exact_match
219
220 def _query(self, config_name: Optional[str], line: str) -> List[str]:
221 from .._internal.utils import get_args
222
223 new_word = len(line) == 0 or line[-1] == " "
224 parsed_args = get_args(line.split())
225 words = parsed_args.overrides
226 if new_word or len(words) == 0:
227 word = ""
228 else:
229 word = words[-1]
230 words = words[0:-1]
231
232 fname_prefix, filename = CompletionPlugin._get_filename(word)
233 if filename is not None:
234 assert fname_prefix is not None
235 result = CompletionPlugin.complete_files(filename)
236 result = [fname_prefix + file for file in result]
237 else:
238 matched_groups, exact_match = self._query_config_groups(
239 word, config_name=config_name, words=words
240 )
241 config_matches: List[str] = []
242 if not exact_match:
243 run_mode = RunMode.MULTIRUN if parsed_args.multirun else RunMode.RUN
244 config_matches = []
245 try:
246 config = self.config_loader.load_configuration(
247 config_name=config_name, overrides=words, run_mode=run_mode
248 )
249 config_matches = CompletionPlugin._get_matches(config, word)
250 except ConfigCompositionException:
251 # if config fails to load for whatever reason, do not provide config matches.
252 # possible reasons:
253 # - missing entry in defaults list (- group: ???) and not populated in command line
254 # - a config file is not found
255 # etc.
256 pass
257
258 result = list(set(matched_groups + config_matches))
259
260 return sorted(result)
261
262 @staticmethod
263 def strip_python_or_app_name(line: str) -> str:
264 """
265 Take the command line received from shell completion, and strip the app name from it
266 which could be at the form of python script.py or some_app.
267 it also corrects the key (COMP_INDEX) to reflect the same location in the striped command line.
268 :param line: input line, may contain python file.py followed=by_args..
269 :return: tuple(args line, key of cursor in args line)
270 """
271 python_args = r"^\s*[\w\/]*python[3]?\s*[\w/\.]*\s*(.*)"
272 app_args = r"^\s*[\w_\-=\./]+\s*(.*)"
273 match = re.match(python_args, line)
274 if match:
275 return match.group(1)
276 else:
277 match = re.match(app_args, line)
278 if match:
279 return match.group(1)
280 else:
281 raise RuntimeError(f"Error parsing line '{line}'")
282
283
284 class DefaultCompletionPlugin(CompletionPlugin):
285 """
286 A concrete instance of CompletionPlugin that is used for testing.
287 """
288
289 def install(self) -> None:
290 raise NotImplementedError
291
292 def uninstall(self) -> None:
293 raise NotImplementedError
294
295 @staticmethod
296 def provides() -> str:
297 raise NotImplementedError
298
299 def query(self, config_name: Optional[str]) -> None:
300 raise NotImplementedError
301
302 @staticmethod
303 def help(command: str) -> str:
304 raise NotImplementedError
305
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/hydra/plugins/completion_plugin.py b/hydra/plugins/completion_plugin.py
--- a/hydra/plugins/completion_plugin.py
+++ b/hydra/plugins/completion_plugin.py
@@ -107,7 +107,7 @@
if config is None:
return []
elif OmegaConf.is_config(config):
- matches = []
+ matches: List[str] = []
if word.endswith(".") or word.endswith("="):
exact_key = word[0:-1]
try:
| {"golden_diff": "diff --git a/hydra/plugins/completion_plugin.py b/hydra/plugins/completion_plugin.py\n--- a/hydra/plugins/completion_plugin.py\n+++ b/hydra/plugins/completion_plugin.py\n@@ -107,7 +107,7 @@\n if config is None:\n return []\n elif OmegaConf.is_config(config):\n- matches = []\n+ matches: List[str] = []\n if word.endswith(\".\") or word.endswith(\"=\"):\n exact_key = word[0:-1]\n try:\n", "issue": "[Bug] Failing lint: Need type annotation for \"matches\"\nSee `lint` CI job failure on PR #2689.\r\n\r\n```\r\nhydra/plugins/completion_plugin.py:110: error: Need type annotation for \"matches\" (hint: \"matches: List[<type>] = ...\") [var-annotated]\r\n```\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\n# TODO: Test with /miniconda3/envs/hydra36/bin/python , seems to be running python for some reason.\n# TODO: Add tests for completion with +prefix (should suggest config groups that are not listed)\n# TODO: Test completion when defaults has a missing mandatory item\n\n\nimport os\nimport re\nimport sys\nfrom abc import abstractmethod\n\nfrom hydra.errors import ConfigCompositionException\nfrom omegaconf import (\n Container,\n DictConfig,\n MissingMandatoryValue,\n OmegaConf,\n ListConfig,\n)\nfrom typing import Any, List, Optional, Tuple\n\nfrom hydra.core.config_loader import ConfigLoader\nfrom hydra.core.object_type import ObjectType\nfrom hydra.plugins.plugin import Plugin\nfrom hydra.types import RunMode\n\n\nclass CompletionPlugin(Plugin):\n def __init__(self, config_loader: ConfigLoader) -> None:\n self.config_loader = config_loader\n\n @abstractmethod\n def install(self) -> None:\n ...\n\n @abstractmethod\n def uninstall(self) -> None:\n ...\n\n @staticmethod\n @abstractmethod\n def provides() -> str:\n \"\"\"\n :return: the name of the shell this plugin provides completion for\n \"\"\"\n ...\n\n @abstractmethod\n def query(self, config_name: Optional[str]) -> None:\n ...\n\n @staticmethod\n @abstractmethod\n def help(command: str) -> str:\n \"\"\"\n :param command: \"install\" or \"uninstall\"\n :return: command the user can run to install or uninstall this shell completion on the appropriate shell\n \"\"\"\n ...\n\n @staticmethod\n def _get_filename(filename: str) -> Tuple[Optional[str], Optional[str]]:\n last = filename.rfind(\"=\")\n if last != -1:\n key_eq = filename[0 : last + 1]\n filename = filename[last + 1 :]\n prefixes = [\".\", \"/\", \"\\\\\", \"./\", \".\\\\\"]\n if sys.platform.startswith(\"win\"):\n for drive in range(ord(\"a\"), ord(\"z\")):\n prefixes.append(f\"{chr(drive)}:\")\n\n if not filename:\n return None, None\n for prefix in prefixes:\n if filename.lower().startswith(prefix):\n return key_eq, filename\n return None, None\n\n @staticmethod\n def complete_files(word: str) -> List[str]:\n if os.path.isdir(word):\n dirname = word\n files = os.listdir(word)\n file_prefix = \"\"\n else:\n dirname = os.path.dirname(word)\n if os.path.isdir(dirname):\n files = os.listdir(dirname)\n else:\n files = []\n file_prefix = os.path.basename(word)\n ret = []\n for file in files:\n if file.startswith(file_prefix):\n ret.append(os.path.join(dirname, file))\n return ret\n\n @staticmethod\n def _get_matches(config: Container, word: str) -> List[str]:\n def str_rep(in_key: Any, in_value: Any) -> str:\n if OmegaConf.is_config(in_value):\n return f\"{in_key}.\"\n else:\n return f\"{in_key}=\"\n\n if config is None:\n return []\n elif OmegaConf.is_config(config):\n matches = []\n if word.endswith(\".\") or word.endswith(\"=\"):\n exact_key = word[0:-1]\n try:\n conf_node = OmegaConf.select(\n config, exact_key, throw_on_missing=True\n )\n except MissingMandatoryValue:\n conf_node = \"\"\n if conf_node is not None:\n if OmegaConf.is_config(conf_node):\n key_matches = CompletionPlugin._get_matches(conf_node, \"\")\n else:\n # primitive\n if isinstance(conf_node, bool):\n conf_node = str(conf_node).lower()\n key_matches = [conf_node]\n else:\n key_matches = []\n\n matches.extend([f\"{word}{match}\" for match in key_matches])\n else:\n last_dot = word.rfind(\".\")\n if last_dot != -1:\n base_key = word[0:last_dot]\n partial_key = word[last_dot + 1 :]\n conf_node = OmegaConf.select(config, base_key)\n key_matches = CompletionPlugin._get_matches(conf_node, partial_key)\n matches.extend([f\"{base_key}.{match}\" for match in key_matches])\n else:\n if isinstance(config, DictConfig):\n for key, value in config.items_ex(resolve=False):\n str_key = str(key)\n if str_key.startswith(word):\n matches.append(str_rep(key, value))\n elif OmegaConf.is_list(config):\n assert isinstance(config, ListConfig)\n for idx in range(len(config)):\n try:\n value = config[idx]\n if str(idx).startswith(word):\n matches.append(str_rep(idx, value))\n except MissingMandatoryValue:\n matches.append(str_rep(idx, \"\"))\n\n else:\n assert False, f\"Object is not an instance of config : {type(config)}\"\n\n return matches\n\n def _query_config_groups(\n self, word: str, config_name: Optional[str], words: List[str]\n ) -> Tuple[List[str], bool]:\n is_addition = word.startswith(\"+\")\n is_deletion = word.startswith(\"~\")\n if is_addition or is_deletion:\n prefix, word = word[0], word[1:]\n else:\n prefix = \"\"\n last_eq_index = word.rfind(\"=\")\n last_slash_index = word.rfind(\"/\")\n exact_match: bool = False\n if last_eq_index != -1:\n parent_group = word[0:last_eq_index]\n results_filter = ObjectType.CONFIG\n else:\n results_filter = ObjectType.GROUP\n if last_slash_index == -1:\n parent_group = \"\"\n else:\n parent_group = word[0:last_slash_index]\n\n all_matched_groups = self.config_loader.get_group_options(\n group_name=parent_group,\n results_filter=results_filter,\n config_name=config_name,\n overrides=words,\n )\n matched_groups: List[str] = []\n if results_filter == ObjectType.CONFIG:\n for match in all_matched_groups:\n name = f\"{parent_group}={match}\" if parent_group != \"\" else match\n if name.startswith(word):\n matched_groups.append(name)\n exact_match = True\n elif results_filter == ObjectType.GROUP:\n for match in all_matched_groups:\n name = f\"{parent_group}/{match}\" if parent_group != \"\" else match\n if name.startswith(word):\n files = self.config_loader.get_group_options(\n group_name=name,\n results_filter=ObjectType.CONFIG,\n config_name=config_name,\n overrides=words,\n )\n dirs = self.config_loader.get_group_options(\n group_name=name,\n results_filter=ObjectType.GROUP,\n config_name=config_name,\n overrides=words,\n )\n if len(dirs) == 0 and len(files) > 0 and not is_deletion:\n name = name + \"=\"\n elif len(dirs) > 0 and len(files) == 0:\n name = name + \"/\"\n matched_groups.append(name)\n\n matched_groups = [f\"{prefix}{group}\" for group in matched_groups]\n return matched_groups, exact_match\n\n def _query(self, config_name: Optional[str], line: str) -> List[str]:\n from .._internal.utils import get_args\n\n new_word = len(line) == 0 or line[-1] == \" \"\n parsed_args = get_args(line.split())\n words = parsed_args.overrides\n if new_word or len(words) == 0:\n word = \"\"\n else:\n word = words[-1]\n words = words[0:-1]\n\n fname_prefix, filename = CompletionPlugin._get_filename(word)\n if filename is not None:\n assert fname_prefix is not None\n result = CompletionPlugin.complete_files(filename)\n result = [fname_prefix + file for file in result]\n else:\n matched_groups, exact_match = self._query_config_groups(\n word, config_name=config_name, words=words\n )\n config_matches: List[str] = []\n if not exact_match:\n run_mode = RunMode.MULTIRUN if parsed_args.multirun else RunMode.RUN\n config_matches = []\n try:\n config = self.config_loader.load_configuration(\n config_name=config_name, overrides=words, run_mode=run_mode\n )\n config_matches = CompletionPlugin._get_matches(config, word)\n except ConfigCompositionException:\n # if config fails to load for whatever reason, do not provide config matches.\n # possible reasons:\n # - missing entry in defaults list (- group: ???) and not populated in command line\n # - a config file is not found\n # etc.\n pass\n\n result = list(set(matched_groups + config_matches))\n\n return sorted(result)\n\n @staticmethod\n def strip_python_or_app_name(line: str) -> str:\n \"\"\"\n Take the command line received from shell completion, and strip the app name from it\n which could be at the form of python script.py or some_app.\n it also corrects the key (COMP_INDEX) to reflect the same location in the striped command line.\n :param line: input line, may contain python file.py followed=by_args..\n :return: tuple(args line, key of cursor in args line)\n \"\"\"\n python_args = r\"^\\s*[\\w\\/]*python[3]?\\s*[\\w/\\.]*\\s*(.*)\"\n app_args = r\"^\\s*[\\w_\\-=\\./]+\\s*(.*)\"\n match = re.match(python_args, line)\n if match:\n return match.group(1)\n else:\n match = re.match(app_args, line)\n if match:\n return match.group(1)\n else:\n raise RuntimeError(f\"Error parsing line '{line}'\")\n\n\nclass DefaultCompletionPlugin(CompletionPlugin):\n \"\"\"\n A concrete instance of CompletionPlugin that is used for testing.\n \"\"\"\n\n def install(self) -> None:\n raise NotImplementedError\n\n def uninstall(self) -> None:\n raise NotImplementedError\n\n @staticmethod\n def provides() -> str:\n raise NotImplementedError\n\n def query(self, config_name: Optional[str]) -> None:\n raise NotImplementedError\n\n @staticmethod\n def help(command: str) -> str:\n raise NotImplementedError\n", "path": "hydra/plugins/completion_plugin.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\n# TODO: Test with /miniconda3/envs/hydra36/bin/python , seems to be running python for some reason.\n# TODO: Add tests for completion with +prefix (should suggest config groups that are not listed)\n# TODO: Test completion when defaults has a missing mandatory item\n\n\nimport os\nimport re\nimport sys\nfrom abc import abstractmethod\n\nfrom hydra.errors import ConfigCompositionException\nfrom omegaconf import (\n Container,\n DictConfig,\n MissingMandatoryValue,\n OmegaConf,\n ListConfig,\n)\nfrom typing import Any, List, Optional, Tuple\n\nfrom hydra.core.config_loader import ConfigLoader\nfrom hydra.core.object_type import ObjectType\nfrom hydra.plugins.plugin import Plugin\nfrom hydra.types import RunMode\n\n\nclass CompletionPlugin(Plugin):\n def __init__(self, config_loader: ConfigLoader) -> None:\n self.config_loader = config_loader\n\n @abstractmethod\n def install(self) -> None:\n ...\n\n @abstractmethod\n def uninstall(self) -> None:\n ...\n\n @staticmethod\n @abstractmethod\n def provides() -> str:\n \"\"\"\n :return: the name of the shell this plugin provides completion for\n \"\"\"\n ...\n\n @abstractmethod\n def query(self, config_name: Optional[str]) -> None:\n ...\n\n @staticmethod\n @abstractmethod\n def help(command: str) -> str:\n \"\"\"\n :param command: \"install\" or \"uninstall\"\n :return: command the user can run to install or uninstall this shell completion on the appropriate shell\n \"\"\"\n ...\n\n @staticmethod\n def _get_filename(filename: str) -> Tuple[Optional[str], Optional[str]]:\n last = filename.rfind(\"=\")\n if last != -1:\n key_eq = filename[0 : last + 1]\n filename = filename[last + 1 :]\n prefixes = [\".\", \"/\", \"\\\\\", \"./\", \".\\\\\"]\n if sys.platform.startswith(\"win\"):\n for drive in range(ord(\"a\"), ord(\"z\")):\n prefixes.append(f\"{chr(drive)}:\")\n\n if not filename:\n return None, None\n for prefix in prefixes:\n if filename.lower().startswith(prefix):\n return key_eq, filename\n return None, None\n\n @staticmethod\n def complete_files(word: str) -> List[str]:\n if os.path.isdir(word):\n dirname = word\n files = os.listdir(word)\n file_prefix = \"\"\n else:\n dirname = os.path.dirname(word)\n if os.path.isdir(dirname):\n files = os.listdir(dirname)\n else:\n files = []\n file_prefix = os.path.basename(word)\n ret = []\n for file in files:\n if file.startswith(file_prefix):\n ret.append(os.path.join(dirname, file))\n return ret\n\n @staticmethod\n def _get_matches(config: Container, word: str) -> List[str]:\n def str_rep(in_key: Any, in_value: Any) -> str:\n if OmegaConf.is_config(in_value):\n return f\"{in_key}.\"\n else:\n return f\"{in_key}=\"\n\n if config is None:\n return []\n elif OmegaConf.is_config(config):\n matches: List[str] = []\n if word.endswith(\".\") or word.endswith(\"=\"):\n exact_key = word[0:-1]\n try:\n conf_node = OmegaConf.select(\n config, exact_key, throw_on_missing=True\n )\n except MissingMandatoryValue:\n conf_node = \"\"\n if conf_node is not None:\n if OmegaConf.is_config(conf_node):\n key_matches = CompletionPlugin._get_matches(conf_node, \"\")\n else:\n # primitive\n if isinstance(conf_node, bool):\n conf_node = str(conf_node).lower()\n key_matches = [conf_node]\n else:\n key_matches = []\n\n matches.extend([f\"{word}{match}\" for match in key_matches])\n else:\n last_dot = word.rfind(\".\")\n if last_dot != -1:\n base_key = word[0:last_dot]\n partial_key = word[last_dot + 1 :]\n conf_node = OmegaConf.select(config, base_key)\n key_matches = CompletionPlugin._get_matches(conf_node, partial_key)\n matches.extend([f\"{base_key}.{match}\" for match in key_matches])\n else:\n if isinstance(config, DictConfig):\n for key, value in config.items_ex(resolve=False):\n str_key = str(key)\n if str_key.startswith(word):\n matches.append(str_rep(key, value))\n elif OmegaConf.is_list(config):\n assert isinstance(config, ListConfig)\n for idx in range(len(config)):\n try:\n value = config[idx]\n if str(idx).startswith(word):\n matches.append(str_rep(idx, value))\n except MissingMandatoryValue:\n matches.append(str_rep(idx, \"\"))\n\n else:\n assert False, f\"Object is not an instance of config : {type(config)}\"\n\n return matches\n\n def _query_config_groups(\n self, word: str, config_name: Optional[str], words: List[str]\n ) -> Tuple[List[str], bool]:\n is_addition = word.startswith(\"+\")\n is_deletion = word.startswith(\"~\")\n if is_addition or is_deletion:\n prefix, word = word[0], word[1:]\n else:\n prefix = \"\"\n last_eq_index = word.rfind(\"=\")\n last_slash_index = word.rfind(\"/\")\n exact_match: bool = False\n if last_eq_index != -1:\n parent_group = word[0:last_eq_index]\n results_filter = ObjectType.CONFIG\n else:\n results_filter = ObjectType.GROUP\n if last_slash_index == -1:\n parent_group = \"\"\n else:\n parent_group = word[0:last_slash_index]\n\n all_matched_groups = self.config_loader.get_group_options(\n group_name=parent_group,\n results_filter=results_filter,\n config_name=config_name,\n overrides=words,\n )\n matched_groups: List[str] = []\n if results_filter == ObjectType.CONFIG:\n for match in all_matched_groups:\n name = f\"{parent_group}={match}\" if parent_group != \"\" else match\n if name.startswith(word):\n matched_groups.append(name)\n exact_match = True\n elif results_filter == ObjectType.GROUP:\n for match in all_matched_groups:\n name = f\"{parent_group}/{match}\" if parent_group != \"\" else match\n if name.startswith(word):\n files = self.config_loader.get_group_options(\n group_name=name,\n results_filter=ObjectType.CONFIG,\n config_name=config_name,\n overrides=words,\n )\n dirs = self.config_loader.get_group_options(\n group_name=name,\n results_filter=ObjectType.GROUP,\n config_name=config_name,\n overrides=words,\n )\n if len(dirs) == 0 and len(files) > 0 and not is_deletion:\n name = name + \"=\"\n elif len(dirs) > 0 and len(files) == 0:\n name = name + \"/\"\n matched_groups.append(name)\n\n matched_groups = [f\"{prefix}{group}\" for group in matched_groups]\n return matched_groups, exact_match\n\n def _query(self, config_name: Optional[str], line: str) -> List[str]:\n from .._internal.utils import get_args\n\n new_word = len(line) == 0 or line[-1] == \" \"\n parsed_args = get_args(line.split())\n words = parsed_args.overrides\n if new_word or len(words) == 0:\n word = \"\"\n else:\n word = words[-1]\n words = words[0:-1]\n\n fname_prefix, filename = CompletionPlugin._get_filename(word)\n if filename is not None:\n assert fname_prefix is not None\n result = CompletionPlugin.complete_files(filename)\n result = [fname_prefix + file for file in result]\n else:\n matched_groups, exact_match = self._query_config_groups(\n word, config_name=config_name, words=words\n )\n config_matches: List[str] = []\n if not exact_match:\n run_mode = RunMode.MULTIRUN if parsed_args.multirun else RunMode.RUN\n config_matches = []\n try:\n config = self.config_loader.load_configuration(\n config_name=config_name, overrides=words, run_mode=run_mode\n )\n config_matches = CompletionPlugin._get_matches(config, word)\n except ConfigCompositionException:\n # if config fails to load for whatever reason, do not provide config matches.\n # possible reasons:\n # - missing entry in defaults list (- group: ???) and not populated in command line\n # - a config file is not found\n # etc.\n pass\n\n result = list(set(matched_groups + config_matches))\n\n return sorted(result)\n\n @staticmethod\n def strip_python_or_app_name(line: str) -> str:\n \"\"\"\n Take the command line received from shell completion, and strip the app name from it\n which could be at the form of python script.py or some_app.\n it also corrects the key (COMP_INDEX) to reflect the same location in the striped command line.\n :param line: input line, may contain python file.py followed=by_args..\n :return: tuple(args line, key of cursor in args line)\n \"\"\"\n python_args = r\"^\\s*[\\w\\/]*python[3]?\\s*[\\w/\\.]*\\s*(.*)\"\n app_args = r\"^\\s*[\\w_\\-=\\./]+\\s*(.*)\"\n match = re.match(python_args, line)\n if match:\n return match.group(1)\n else:\n match = re.match(app_args, line)\n if match:\n return match.group(1)\n else:\n raise RuntimeError(f\"Error parsing line '{line}'\")\n\n\nclass DefaultCompletionPlugin(CompletionPlugin):\n \"\"\"\n A concrete instance of CompletionPlugin that is used for testing.\n \"\"\"\n\n def install(self) -> None:\n raise NotImplementedError\n\n def uninstall(self) -> None:\n raise NotImplementedError\n\n @staticmethod\n def provides() -> str:\n raise NotImplementedError\n\n def query(self, config_name: Optional[str]) -> None:\n raise NotImplementedError\n\n @staticmethod\n def help(command: str) -> str:\n raise NotImplementedError\n", "path": "hydra/plugins/completion_plugin.py"}]} | 3,373 | 112 |
gh_patches_debug_27317 | rasdani/github-patches | git_diff | bridgecrewio__checkov-3126 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
How to Check a Data Source connected to a Resource
**Describe the issue**
I want to check if the attribute "group" of the resource "azuredevops_group_membership" refers to a data source "azuredevops_group" with the attribute "name" = "Build Administrators" for example.
**Examples**
Snippet from [terraform registry](https://registry.terraform.io/providers/microsoft/azuredevops/latest/docs/resources/group_membership)
```terraform
data "azuredevops_group" "example" {
project_id = azuredevops_project.example.id
name = "Build Administrators"
}
resource "azuredevops_group_membership" "example" {
group = data.azuredevops_group.example.descriptor
members = [
azuredevops_user_entitlement.example.descriptor
]
}
```
I tryed creating a custom policy in python but I didn't understand how I could make this work, I was only able to create a policy to check if the attribute name of the data azuredevops_group was equal to "Build Administrators":
```python
from typing import Dict, List, Any
from checkov.terraform.checks.data.base_check import BaseDataCheck
from checkov.common.models.enums import CheckResult, CheckCategories
class NoBuildAdministratorCreated(BaseDataCheck):
def __init__(self) -> None:
name = 'Ensure no build administrator is created on file'
id = "CKV_ADO_9000"
supported_data = ["azuredevops_group"]
categories = [CheckCategories.GENERAL_SECURITY]
super().__init__(name=name, id=id, categories=categories, supported_data=supported_data)
def scan_data_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:
if (conf.get("name", "Build Administrators")):
return CheckResult.FAILED
return CheckResult.PASSED
check = NoBuildAdministratorCreated()
```
**Version (please complete the following information):**
- Checkov Version 2.0.1223
**Additional context**
My goal is to check if people are creating admin groups inside of a terraform file. I'm kinda new to reading documentations and code libraries of open source projects so I'm having a bit of a hard time understanding how to use the checkov python scan functions to create custom policies. So any advice or code example to help me understand better how it works and what is this **conf** would be much appreciated, thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/common/checks_infra/solvers/attribute_solvers/base_attribute_solver.py`
Content:
```
1 import concurrent.futures
2 import re
3 from typing import List, Tuple, Dict, Any, Optional, Pattern
4
5 from networkx import DiGraph
6
7 from checkov.common.graph.checks_infra.enums import SolverType
8 from checkov.common.graph.checks_infra.solvers.base_solver import BaseSolver
9
10 from concurrent.futures import ThreadPoolExecutor
11
12 from checkov.common.graph.graph_builder import CustomAttributes
13 from checkov.common.graph.graph_builder.graph_components.block_types import BlockType
14 from checkov.common.util.var_utils import is_terraform_variable_dependent, is_cloudformation_variable_dependent
15
16 WILDCARD_PATTERN = re.compile(r"(\S+[.][*][.]*)+")
17
18
19 class BaseAttributeSolver(BaseSolver):
20 operator = ""
21
22 def __init__(self, resource_types: List[str], attribute: Optional[str], value: Any) -> None:
23 super().__init__(SolverType.ATTRIBUTE)
24 self.resource_types = resource_types
25 self.attribute = attribute
26 self.value = value
27
28 def run(self, graph_connector: DiGraph) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
29 executer = ThreadPoolExecutor()
30 jobs = []
31 passed_vertices: List[Dict[str, Any]] = []
32 failed_vertices: List[Dict[str, Any]] = []
33 for _, data in graph_connector.nodes(data=True):
34 if (not self.resource_types or data.get(CustomAttributes.RESOURCE_TYPE) in self.resource_types) \
35 and data.get(CustomAttributes.BLOCK_TYPE) == BlockType.RESOURCE:
36 jobs.append(executer.submit(self._process_node, data, passed_vertices, failed_vertices))
37
38 concurrent.futures.wait(jobs)
39 return passed_vertices, failed_vertices
40
41 def get_operation(self, vertex: Dict[str, Any]) -> bool:
42 if self.attribute and re.match(WILDCARD_PATTERN, self.attribute):
43 attribute_patterns = self.get_attribute_patterns(self.attribute)
44 attribute_matches = [
45 attr
46 for attr in vertex
47 if any(re.match(re.compile(attribute_pattern), attr) for attribute_pattern in attribute_patterns)
48 ]
49 if attribute_matches:
50 return self.resource_type_pred(vertex, self.resource_types) and any(
51 self._get_operation(vertex=vertex, attribute=attr) for attr in attribute_matches
52 )
53 return self.resource_type_pred(vertex, self.resource_types) and self._get_operation(
54 vertex=vertex, attribute=self.attribute
55 )
56
57 def _get_operation(self, vertex: Dict[str, Any], attribute: Optional[str]) -> bool:
58 raise NotImplementedError
59
60 def _process_node(
61 self, data: Dict[str, Any], passed_vartices: List[Dict[str, Any]], failed_vertices: List[Dict[str, Any]]
62 ) -> None:
63 if not self.resource_type_pred(data, self.resource_types):
64 return
65 if self.get_operation(vertex=data):
66 passed_vartices.append(data)
67 else:
68 failed_vertices.append(data)
69
70 @staticmethod
71 def get_attribute_patterns(attribute: str) -> Tuple[Pattern[str], Pattern[str]]:
72 index_pattern = r"[\d]+"
73 split_by_dots = attribute.split(".")
74
75 pattern_parts = []
76 pattern_parts_without_index = []
77 for attr_part in split_by_dots:
78 if attr_part == "*":
79 pattern_parts.append(index_pattern)
80 else:
81 attr_part_pattern = f"({attr_part})"
82 pattern_parts.append(attr_part_pattern)
83 pattern_parts_without_index.append(attr_part_pattern)
84
85 pattern = "[.]".join(pattern_parts)
86 pattern_with_index = re.compile(pattern)
87
88 pattern = "[.]".join(pattern_parts_without_index)
89 pattern_without_index = re.compile(pattern)
90
91 return pattern_with_index, pattern_without_index
92
93 @staticmethod
94 def _is_variable_dependant(value: Any, source: str) -> bool:
95 if source == 'Terraform' and is_terraform_variable_dependent(value):
96 return True
97 # TODO add logic for CloudFormation
98 # elif source == 'CloudFormation' and is_cloudformation_variable_dependent(value):
99 # return True
100
101 return False
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/common/checks_infra/solvers/attribute_solvers/base_attribute_solver.py b/checkov/common/checks_infra/solvers/attribute_solvers/base_attribute_solver.py
--- a/checkov/common/checks_infra/solvers/attribute_solvers/base_attribute_solver.py
+++ b/checkov/common/checks_infra/solvers/attribute_solvers/base_attribute_solver.py
@@ -12,7 +12,9 @@
from checkov.common.graph.graph_builder import CustomAttributes
from checkov.common.graph.graph_builder.graph_components.block_types import BlockType
from checkov.common.util.var_utils import is_terraform_variable_dependent, is_cloudformation_variable_dependent
+from checkov.terraform.graph_builder.graph_components.block_types import BlockType as TerraformBlockType
+SUPPORTED_BLOCK_TYPES = {BlockType.RESOURCE, TerraformBlockType.DATA}
WILDCARD_PATTERN = re.compile(r"(\S+[.][*][.]*)+")
@@ -32,7 +34,7 @@
failed_vertices: List[Dict[str, Any]] = []
for _, data in graph_connector.nodes(data=True):
if (not self.resource_types or data.get(CustomAttributes.RESOURCE_TYPE) in self.resource_types) \
- and data.get(CustomAttributes.BLOCK_TYPE) == BlockType.RESOURCE:
+ and data.get(CustomAttributes.BLOCK_TYPE) in SUPPORTED_BLOCK_TYPES:
jobs.append(executer.submit(self._process_node, data, passed_vertices, failed_vertices))
concurrent.futures.wait(jobs)
| {"golden_diff": "diff --git a/checkov/common/checks_infra/solvers/attribute_solvers/base_attribute_solver.py b/checkov/common/checks_infra/solvers/attribute_solvers/base_attribute_solver.py\n--- a/checkov/common/checks_infra/solvers/attribute_solvers/base_attribute_solver.py\n+++ b/checkov/common/checks_infra/solvers/attribute_solvers/base_attribute_solver.py\n@@ -12,7 +12,9 @@\n from checkov.common.graph.graph_builder import CustomAttributes\n from checkov.common.graph.graph_builder.graph_components.block_types import BlockType\n from checkov.common.util.var_utils import is_terraform_variable_dependent, is_cloudformation_variable_dependent\n+from checkov.terraform.graph_builder.graph_components.block_types import BlockType as TerraformBlockType\n \n+SUPPORTED_BLOCK_TYPES = {BlockType.RESOURCE, TerraformBlockType.DATA}\n WILDCARD_PATTERN = re.compile(r\"(\\S+[.][*][.]*)+\")\n \n \n@@ -32,7 +34,7 @@\n failed_vertices: List[Dict[str, Any]] = []\n for _, data in graph_connector.nodes(data=True):\n if (not self.resource_types or data.get(CustomAttributes.RESOURCE_TYPE) in self.resource_types) \\\n- and data.get(CustomAttributes.BLOCK_TYPE) == BlockType.RESOURCE:\n+ and data.get(CustomAttributes.BLOCK_TYPE) in SUPPORTED_BLOCK_TYPES:\n jobs.append(executer.submit(self._process_node, data, passed_vertices, failed_vertices))\n \n concurrent.futures.wait(jobs)\n", "issue": "How to Check a Data Source connected to a Resource\n**Describe the issue**\r\nI want to check if the attribute \"group\" of the resource \"azuredevops_group_membership\" refers to a data source \"azuredevops_group\" with the attribute \"name\" = \"Build Administrators\" for example.\r\n\r\n**Examples**\r\nSnippet from [terraform registry](https://registry.terraform.io/providers/microsoft/azuredevops/latest/docs/resources/group_membership)\r\n```terraform\r\ndata \"azuredevops_group\" \"example\" {\r\n project_id = azuredevops_project.example.id\r\n name = \"Build Administrators\"\r\n}\r\n\r\nresource \"azuredevops_group_membership\" \"example\" {\r\n group = data.azuredevops_group.example.descriptor\r\n members = [\r\n azuredevops_user_entitlement.example.descriptor\r\n ]\r\n}\r\n```\r\nI tryed creating a custom policy in python but I didn't understand how I could make this work, I was only able to create a policy to check if the attribute name of the data azuredevops_group was equal to \"Build Administrators\":\r\n\r\n```python\r\nfrom typing import Dict, List, Any\r\n\r\nfrom checkov.terraform.checks.data.base_check import BaseDataCheck\r\nfrom checkov.common.models.enums import CheckResult, CheckCategories\r\n\r\nclass NoBuildAdministratorCreated(BaseDataCheck):\r\n def __init__(self) -> None:\r\n name = 'Ensure no build administrator is created on file'\r\n id = \"CKV_ADO_9000\"\r\n supported_data = [\"azuredevops_group\"]\r\n categories = [CheckCategories.GENERAL_SECURITY]\r\n super().__init__(name=name, id=id, categories=categories, supported_data=supported_data)\r\n\r\n def scan_data_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:\r\n\r\n if (conf.get(\"name\", \"Build Administrators\")):\r\n return CheckResult.FAILED\r\n \r\n return CheckResult.PASSED\r\n\r\ncheck = NoBuildAdministratorCreated()\r\n```\r\n\r\n**Version (please complete the following information):**\r\n - Checkov Version 2.0.1223\r\n\r\n**Additional context**\r\nMy goal is to check if people are creating admin groups inside of a terraform file. I'm kinda new to reading documentations and code libraries of open source projects so I'm having a bit of a hard time understanding how to use the checkov python scan functions to create custom policies. So any advice or code example to help me understand better how it works and what is this **conf** would be much appreciated, thanks!\r\n\n", "before_files": [{"content": "import concurrent.futures\nimport re\nfrom typing import List, Tuple, Dict, Any, Optional, Pattern\n\nfrom networkx import DiGraph\n\nfrom checkov.common.graph.checks_infra.enums import SolverType\nfrom checkov.common.graph.checks_infra.solvers.base_solver import BaseSolver\n\nfrom concurrent.futures import ThreadPoolExecutor\n\nfrom checkov.common.graph.graph_builder import CustomAttributes\nfrom checkov.common.graph.graph_builder.graph_components.block_types import BlockType\nfrom checkov.common.util.var_utils import is_terraform_variable_dependent, is_cloudformation_variable_dependent\n\nWILDCARD_PATTERN = re.compile(r\"(\\S+[.][*][.]*)+\")\n\n\nclass BaseAttributeSolver(BaseSolver):\n operator = \"\"\n\n def __init__(self, resource_types: List[str], attribute: Optional[str], value: Any) -> None:\n super().__init__(SolverType.ATTRIBUTE)\n self.resource_types = resource_types\n self.attribute = attribute\n self.value = value\n\n def run(self, graph_connector: DiGraph) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:\n executer = ThreadPoolExecutor()\n jobs = []\n passed_vertices: List[Dict[str, Any]] = []\n failed_vertices: List[Dict[str, Any]] = []\n for _, data in graph_connector.nodes(data=True):\n if (not self.resource_types or data.get(CustomAttributes.RESOURCE_TYPE) in self.resource_types) \\\n and data.get(CustomAttributes.BLOCK_TYPE) == BlockType.RESOURCE:\n jobs.append(executer.submit(self._process_node, data, passed_vertices, failed_vertices))\n\n concurrent.futures.wait(jobs)\n return passed_vertices, failed_vertices\n\n def get_operation(self, vertex: Dict[str, Any]) -> bool:\n if self.attribute and re.match(WILDCARD_PATTERN, self.attribute):\n attribute_patterns = self.get_attribute_patterns(self.attribute)\n attribute_matches = [\n attr\n for attr in vertex\n if any(re.match(re.compile(attribute_pattern), attr) for attribute_pattern in attribute_patterns)\n ]\n if attribute_matches:\n return self.resource_type_pred(vertex, self.resource_types) and any(\n self._get_operation(vertex=vertex, attribute=attr) for attr in attribute_matches\n )\n return self.resource_type_pred(vertex, self.resource_types) and self._get_operation(\n vertex=vertex, attribute=self.attribute\n )\n\n def _get_operation(self, vertex: Dict[str, Any], attribute: Optional[str]) -> bool:\n raise NotImplementedError\n\n def _process_node(\n self, data: Dict[str, Any], passed_vartices: List[Dict[str, Any]], failed_vertices: List[Dict[str, Any]]\n ) -> None:\n if not self.resource_type_pred(data, self.resource_types):\n return\n if self.get_operation(vertex=data):\n passed_vartices.append(data)\n else:\n failed_vertices.append(data)\n\n @staticmethod\n def get_attribute_patterns(attribute: str) -> Tuple[Pattern[str], Pattern[str]]:\n index_pattern = r\"[\\d]+\"\n split_by_dots = attribute.split(\".\")\n\n pattern_parts = []\n pattern_parts_without_index = []\n for attr_part in split_by_dots:\n if attr_part == \"*\":\n pattern_parts.append(index_pattern)\n else:\n attr_part_pattern = f\"({attr_part})\"\n pattern_parts.append(attr_part_pattern)\n pattern_parts_without_index.append(attr_part_pattern)\n\n pattern = \"[.]\".join(pattern_parts)\n pattern_with_index = re.compile(pattern)\n\n pattern = \"[.]\".join(pattern_parts_without_index)\n pattern_without_index = re.compile(pattern)\n\n return pattern_with_index, pattern_without_index\n\n @staticmethod\n def _is_variable_dependant(value: Any, source: str) -> bool:\n if source == 'Terraform' and is_terraform_variable_dependent(value):\n return True\n # TODO add logic for CloudFormation\n # elif source == 'CloudFormation' and is_cloudformation_variable_dependent(value):\n # return True\n\n return False\n", "path": "checkov/common/checks_infra/solvers/attribute_solvers/base_attribute_solver.py"}], "after_files": [{"content": "import concurrent.futures\nimport re\nfrom typing import List, Tuple, Dict, Any, Optional, Pattern\n\nfrom networkx import DiGraph\n\nfrom checkov.common.graph.checks_infra.enums import SolverType\nfrom checkov.common.graph.checks_infra.solvers.base_solver import BaseSolver\n\nfrom concurrent.futures import ThreadPoolExecutor\n\nfrom checkov.common.graph.graph_builder import CustomAttributes\nfrom checkov.common.graph.graph_builder.graph_components.block_types import BlockType\nfrom checkov.common.util.var_utils import is_terraform_variable_dependent, is_cloudformation_variable_dependent\nfrom checkov.terraform.graph_builder.graph_components.block_types import BlockType as TerraformBlockType\n\nSUPPORTED_BLOCK_TYPES = {BlockType.RESOURCE, TerraformBlockType.DATA}\nWILDCARD_PATTERN = re.compile(r\"(\\S+[.][*][.]*)+\")\n\n\nclass BaseAttributeSolver(BaseSolver):\n operator = \"\"\n\n def __init__(self, resource_types: List[str], attribute: Optional[str], value: Any) -> None:\n super().__init__(SolverType.ATTRIBUTE)\n self.resource_types = resource_types\n self.attribute = attribute\n self.value = value\n\n def run(self, graph_connector: DiGraph) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:\n executer = ThreadPoolExecutor()\n jobs = []\n passed_vertices: List[Dict[str, Any]] = []\n failed_vertices: List[Dict[str, Any]] = []\n for _, data in graph_connector.nodes(data=True):\n if (not self.resource_types or data.get(CustomAttributes.RESOURCE_TYPE) in self.resource_types) \\\n and data.get(CustomAttributes.BLOCK_TYPE) in SUPPORTED_BLOCK_TYPES:\n jobs.append(executer.submit(self._process_node, data, passed_vertices, failed_vertices))\n\n concurrent.futures.wait(jobs)\n return passed_vertices, failed_vertices\n\n def get_operation(self, vertex: Dict[str, Any]) -> bool:\n if self.attribute and re.match(WILDCARD_PATTERN, self.attribute):\n attribute_patterns = self.get_attribute_patterns(self.attribute)\n attribute_matches = [\n attr\n for attr in vertex\n if any(re.match(re.compile(attribute_pattern), attr) for attribute_pattern in attribute_patterns)\n ]\n if attribute_matches:\n return self.resource_type_pred(vertex, self.resource_types) and any(\n self._get_operation(vertex=vertex, attribute=attr) for attr in attribute_matches\n )\n return self.resource_type_pred(vertex, self.resource_types) and self._get_operation(\n vertex=vertex, attribute=self.attribute\n )\n\n def _get_operation(self, vertex: Dict[str, Any], attribute: Optional[str]) -> bool:\n raise NotImplementedError\n\n def _process_node(\n self, data: Dict[str, Any], passed_vartices: List[Dict[str, Any]], failed_vertices: List[Dict[str, Any]]\n ) -> None:\n if not self.resource_type_pred(data, self.resource_types):\n return\n if self.get_operation(vertex=data):\n passed_vartices.append(data)\n else:\n failed_vertices.append(data)\n\n @staticmethod\n def get_attribute_patterns(attribute: str) -> Tuple[Pattern[str], Pattern[str]]:\n index_pattern = r\"[\\d]+\"\n split_by_dots = attribute.split(\".\")\n\n pattern_parts = []\n pattern_parts_without_index = []\n for attr_part in split_by_dots:\n if attr_part == \"*\":\n pattern_parts.append(index_pattern)\n else:\n attr_part_pattern = f\"({attr_part})\"\n pattern_parts.append(attr_part_pattern)\n pattern_parts_without_index.append(attr_part_pattern)\n\n pattern = \"[.]\".join(pattern_parts)\n pattern_with_index = re.compile(pattern)\n\n pattern = \"[.]\".join(pattern_parts_without_index)\n pattern_without_index = re.compile(pattern)\n\n return pattern_with_index, pattern_without_index\n\n @staticmethod\n def _is_variable_dependant(value: Any, source: str) -> bool:\n if source == 'Terraform' and is_terraform_variable_dependent(value):\n return True\n # TODO add logic for CloudFormation\n # elif source == 'CloudFormation' and is_cloudformation_variable_dependent(value):\n # return True\n\n return False\n", "path": "checkov/common/checks_infra/solvers/attribute_solvers/base_attribute_solver.py"}]} | 1,863 | 323 |
gh_patches_debug_35281 | rasdani/github-patches | git_diff | piskvorky__gensim-1819 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`datatype` parameter in `load_word2vec_format` doesn't work as expected
#### Description
Using `datatype=np.float64` in a `KeyedVectors.load_word2vec_call` doesn't work as expected, the loaded floats seem to lose precision. The datatype for `syn0` is still `float64` though, so it seems that they are cast to float32 first while loading, then cast to float64 when creating the array.
#### Steps/Code/Corpus to Reproduce
Using this file -
[test.kv.txt](https://github.com/RaRe-Technologies/gensim/files/1434953/test.kv.txt)
```python
from gensim.models.keyedvectors import KeyedVectors
import numpy as np
kv = KeyedVectors.load_word2vec_format('test.kv.txt', datatype=np.float64)
print(kv['horse.n.01'][0] == -0.0008546282343595379)
# False
print(kv['horse.n.01'].dtype)
# float64
```
#### Expected Results
```python
print(kv['horse.n.01'][0] == -0.0008546282343595379)
# True
```
#### Actual Results
```python
print(kv['horse.n.01'][0] == -0.0008546282343595379)
# False
```
Looking at the code and making a quick hack [here](https://github.com/RaRe-Technologies/gensim/blob/develop/gensim/models/keyedvectors.py#L253), changing..
```
word, weights = parts[0], [REAL(x) for x in parts[1:]]
```
to..
```
word, weights = parts[0], [datatype(x) for x in parts[1:]]
```
..leads to the correct result. However, I imagine there are other cases to be covered as well.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gensim/models/utils_any2vec.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Author: Shiva Manne <[email protected]>
5 # Copyright (C) 2018 RaRe Technologies s.r.o.
6
7 """This module contains various general functions useful for any2vec models."""
8
9 import logging
10 import numpy as np
11 from gensim import utils
12
13 from numpy import zeros, dtype, float32 as REAL, ascontiguousarray, fromstring
14
15 from six.moves import xrange
16 from six import iteritems
17
18 logger = logging.getLogger(__name__)
19
20
21 def _compute_ngrams(word, min_n, max_n):
22 """Returns the list of all possible ngrams for a given word.
23
24 Parameters
25 ----------
26 word : str
27 The word whose ngrams need to be computed
28 min_n : int
29 minimum character length of the ngrams
30 max_n : int
31 maximum character length of the ngrams
32
33 Returns
34 -------
35 :obj:`list` of :obj:`str`
36 List of character ngrams
37
38 """
39 BOW, EOW = ('<', '>') # Used by FastText to attach to all words as prefix and suffix
40 extended_word = BOW + word + EOW
41 ngrams = []
42 for ngram_length in range(min_n, min(len(extended_word), max_n) + 1):
43 for i in range(0, len(extended_word) - ngram_length + 1):
44 ngrams.append(extended_word[i:i + ngram_length])
45 return ngrams
46
47
48 def _ft_hash(string):
49 """Reproduces [hash method](https://github.com/facebookresearch/fastText/blob/master/src/dictionary.cc)
50 used in [1]_.
51
52 Parameter
53 ---------
54 string : str
55 The string whose hash needs to be calculated
56
57 Returns
58 -------
59 int
60 The hash of the string
61
62 """
63 # Runtime warnings for integer overflow are raised, this is expected behaviour. These warnings are suppressed.
64 old_settings = np.seterr(all='ignore')
65 h = np.uint32(2166136261)
66 for c in string:
67 h = h ^ np.uint32(ord(c))
68 h = h * np.uint32(16777619)
69 np.seterr(**old_settings)
70 return h
71
72
73 def _save_word2vec_format(fname, vocab, vectors, fvocab=None, binary=False, total_vec=None):
74 """Store the input-hidden weight matrix in the same format used by the original
75 C word2vec-tool, for compatibility.
76
77 Parameters
78 ----------
79 fname : str
80 The file path used to save the vectors in
81 vocab : dict
82 The vocabulary of words
83 vectors : numpy.array
84 The vectors to be stored
85 fvocab : str
86 Optional file path used to save the vocabulary
87 binary : bool
88 If True, the data wil be saved in binary word2vec format, else it will be saved in plain text.
89 total_vec : int
90 Optional parameter to explicitly specify total no. of vectors
91 (in case word vectors are appended with document vectors afterwards)
92
93 """
94 if not (vocab or vectors):
95 raise RuntimeError("no input")
96 if total_vec is None:
97 total_vec = len(vocab)
98 vector_size = vectors.shape[1]
99 if fvocab is not None:
100 logger.info("storing vocabulary in %s", fvocab)
101 with utils.smart_open(fvocab, 'wb') as vout:
102 for word, vocab_ in sorted(iteritems(vocab), key=lambda item: -item[1].count):
103 vout.write(utils.to_utf8("%s %s\n" % (word, vocab_.count)))
104 logger.info("storing %sx%s projection weights into %s", total_vec, vector_size, fname)
105 assert (len(vocab), vector_size) == vectors.shape
106 with utils.smart_open(fname, 'wb') as fout:
107 fout.write(utils.to_utf8("%s %s\n" % (total_vec, vector_size)))
108 # store in sorted order: most frequent words at the top
109 for word, vocab_ in sorted(iteritems(vocab), key=lambda item: -item[1].count):
110 row = vectors[vocab_.index]
111 if binary:
112 fout.write(utils.to_utf8(word) + b" " + row.tostring())
113 else:
114 fout.write(utils.to_utf8("%s %s\n" % (word, ' '.join("%f" % val for val in row))))
115
116
117 def _load_word2vec_format(cls, fname, fvocab=None, binary=False, encoding='utf8', unicode_errors='strict',
118 limit=None, datatype=REAL):
119 """Load the input-hidden weight matrix from the original C word2vec-tool format.
120
121 Note that the information stored in the file is incomplete (the binary tree is missing),
122 so while you can query for word similarity etc., you cannot continue training
123 with a model loaded this way.
124
125 Parameters
126 ----------
127 fname : str
128 The file path to the saved word2vec-format file.
129 fvocab : str
130 Optional file path to the vocabulary.Word counts are read from `fvocab` filename,
131 if set (this is the file generated by `-save-vocab` flag of the original C tool).
132 binary : bool
133 If True, indicates whether the data is in binary word2vec format.
134 encoding : str
135 If you trained the C model using non-utf8 encoding for words, specify that
136 encoding in `encoding`.
137 unicode_errors : str
138 default 'strict', is a string suitable to be passed as the `errors`
139 argument to the unicode() (Python 2.x) or str() (Python 3.x) function. If your source
140 file may include word tokens truncated in the middle of a multibyte unicode character
141 (as is common from the original word2vec.c tool), 'ignore' or 'replace' may help.
142 limit : int
143 Sets a maximum number of word-vectors to read from the file. The default,
144 None, means read all.
145 datatype : :class: `numpy.float*`
146 (Experimental) Can coerce dimensions to a non-default float type (such
147 as np.float16) to save memory. (Such types may result in much slower bulk operations
148 or incompatibility with optimized routines.)
149
150 Returns
151 -------
152 :obj: `cls`
153 Returns the loaded model as an instance of :class: `cls`.
154
155 """
156 from gensim.models.keyedvectors import Vocab
157 counts = None
158 if fvocab is not None:
159 logger.info("loading word counts from %s", fvocab)
160 counts = {}
161 with utils.smart_open(fvocab) as fin:
162 for line in fin:
163 word, count = utils.to_unicode(line).strip().split()
164 counts[word] = int(count)
165
166 logger.info("loading projection weights from %s", fname)
167 with utils.smart_open(fname) as fin:
168 header = utils.to_unicode(fin.readline(), encoding=encoding)
169 vocab_size, vector_size = (int(x) for x in header.split()) # throws for invalid file format
170 if limit:
171 vocab_size = min(vocab_size, limit)
172 result = cls(vector_size)
173 result.vector_size = vector_size
174 result.vectors = zeros((vocab_size, vector_size), dtype=datatype)
175
176 def add_word(word, weights):
177 word_id = len(result.vocab)
178 if word in result.vocab:
179 logger.warning("duplicate word '%s' in %s, ignoring all but first", word, fname)
180 return
181 if counts is None:
182 # most common scenario: no vocab file given. just make up some bogus counts, in descending order
183 result.vocab[word] = Vocab(index=word_id, count=vocab_size - word_id)
184 elif word in counts:
185 # use count from the vocab file
186 result.vocab[word] = Vocab(index=word_id, count=counts[word])
187 else:
188 # vocab file given, but word is missing -- set count to None (TODO: or raise?)
189 logger.warning("vocabulary file is incomplete: '%s' is missing", word)
190 result.vocab[word] = Vocab(index=word_id, count=None)
191 result.vectors[word_id] = weights
192 result.index2word.append(word)
193
194 if binary:
195 binary_len = dtype(REAL).itemsize * vector_size
196 for _ in xrange(vocab_size):
197 # mixed text and binary: read text first, then binary
198 word = []
199 while True:
200 ch = fin.read(1)
201 if ch == b' ':
202 break
203 if ch == b'':
204 raise EOFError("unexpected end of input; is count incorrect or file otherwise damaged?")
205 if ch != b'\n': # ignore newlines in front of words (some binary files have)
206 word.append(ch)
207 word = utils.to_unicode(b''.join(word), encoding=encoding, errors=unicode_errors)
208 weights = fromstring(fin.read(binary_len), dtype=REAL)
209 add_word(word, weights)
210 else:
211 for line_no in xrange(vocab_size):
212 line = fin.readline()
213 if line == b'':
214 raise EOFError("unexpected end of input; is count incorrect or file otherwise damaged?")
215 parts = utils.to_unicode(line.rstrip(), encoding=encoding, errors=unicode_errors).split(" ")
216 if len(parts) != vector_size + 1:
217 raise ValueError("invalid vector on line %s (is this really the text format?)" % line_no)
218 word, weights = parts[0], [REAL(x) for x in parts[1:]]
219 add_word(word, weights)
220 if result.vectors.shape[0] != len(result.vocab):
221 logger.info(
222 "duplicate words detected, shrinking matrix size from %i to %i",
223 result.vectors.shape[0], len(result.vocab)
224 )
225 result.vectors = ascontiguousarray(result.vectors[: len(result.vocab)])
226 assert (len(result.vocab), vector_size) == result.vectors.shape
227
228 logger.info("loaded %s matrix from %s", result.vectors.shape, fname)
229 return result
230
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gensim/models/utils_any2vec.py b/gensim/models/utils_any2vec.py
--- a/gensim/models/utils_any2vec.py
+++ b/gensim/models/utils_any2vec.py
@@ -109,9 +109,10 @@
for word, vocab_ in sorted(iteritems(vocab), key=lambda item: -item[1].count):
row = vectors[vocab_.index]
if binary:
+ row = row.astype(REAL)
fout.write(utils.to_utf8(word) + b" " + row.tostring())
else:
- fout.write(utils.to_utf8("%s %s\n" % (word, ' '.join("%f" % val for val in row))))
+ fout.write(utils.to_utf8("%s %s\n" % (word, ' '.join(repr(val) for val in row))))
def _load_word2vec_format(cls, fname, fvocab=None, binary=False, encoding='utf8', unicode_errors='strict',
@@ -205,7 +206,7 @@
if ch != b'\n': # ignore newlines in front of words (some binary files have)
word.append(ch)
word = utils.to_unicode(b''.join(word), encoding=encoding, errors=unicode_errors)
- weights = fromstring(fin.read(binary_len), dtype=REAL)
+ weights = fromstring(fin.read(binary_len), dtype=REAL).astype(datatype)
add_word(word, weights)
else:
for line_no in xrange(vocab_size):
@@ -215,7 +216,7 @@
parts = utils.to_unicode(line.rstrip(), encoding=encoding, errors=unicode_errors).split(" ")
if len(parts) != vector_size + 1:
raise ValueError("invalid vector on line %s (is this really the text format?)" % line_no)
- word, weights = parts[0], [REAL(x) for x in parts[1:]]
+ word, weights = parts[0], [datatype(x) for x in parts[1:]]
add_word(word, weights)
if result.vectors.shape[0] != len(result.vocab):
logger.info(
| {"golden_diff": "diff --git a/gensim/models/utils_any2vec.py b/gensim/models/utils_any2vec.py\n--- a/gensim/models/utils_any2vec.py\n+++ b/gensim/models/utils_any2vec.py\n@@ -109,9 +109,10 @@\n for word, vocab_ in sorted(iteritems(vocab), key=lambda item: -item[1].count):\n row = vectors[vocab_.index]\n if binary:\n+ row = row.astype(REAL)\n fout.write(utils.to_utf8(word) + b\" \" + row.tostring())\n else:\n- fout.write(utils.to_utf8(\"%s %s\\n\" % (word, ' '.join(\"%f\" % val for val in row))))\n+ fout.write(utils.to_utf8(\"%s %s\\n\" % (word, ' '.join(repr(val) for val in row))))\n \n \n def _load_word2vec_format(cls, fname, fvocab=None, binary=False, encoding='utf8', unicode_errors='strict',\n@@ -205,7 +206,7 @@\n if ch != b'\\n': # ignore newlines in front of words (some binary files have)\n word.append(ch)\n word = utils.to_unicode(b''.join(word), encoding=encoding, errors=unicode_errors)\n- weights = fromstring(fin.read(binary_len), dtype=REAL)\n+ weights = fromstring(fin.read(binary_len), dtype=REAL).astype(datatype)\n add_word(word, weights)\n else:\n for line_no in xrange(vocab_size):\n@@ -215,7 +216,7 @@\n parts = utils.to_unicode(line.rstrip(), encoding=encoding, errors=unicode_errors).split(\" \")\n if len(parts) != vector_size + 1:\n raise ValueError(\"invalid vector on line %s (is this really the text format?)\" % line_no)\n- word, weights = parts[0], [REAL(x) for x in parts[1:]]\n+ word, weights = parts[0], [datatype(x) for x in parts[1:]]\n add_word(word, weights)\n if result.vectors.shape[0] != len(result.vocab):\n logger.info(\n", "issue": "`datatype` parameter in `load_word2vec_format` doesn't work as expected\n#### Description\r\nUsing `datatype=np.float64` in a `KeyedVectors.load_word2vec_call` doesn't work as expected, the loaded floats seem to lose precision. The datatype for `syn0` is still `float64` though, so it seems that they are cast to float32 first while loading, then cast to float64 when creating the array.\r\n\r\n#### Steps/Code/Corpus to Reproduce\r\nUsing this file - \r\n[test.kv.txt](https://github.com/RaRe-Technologies/gensim/files/1434953/test.kv.txt)\r\n\r\n```python\r\nfrom gensim.models.keyedvectors import KeyedVectors\r\nimport numpy as np\r\n\r\nkv = KeyedVectors.load_word2vec_format('test.kv.txt', datatype=np.float64)\r\nprint(kv['horse.n.01'][0] == -0.0008546282343595379)\r\n# False\r\nprint(kv['horse.n.01'].dtype)\r\n# float64\r\n```\r\n#### Expected Results\r\n```python\r\nprint(kv['horse.n.01'][0] == -0.0008546282343595379)\r\n# True\r\n```\r\n#### Actual Results\r\n```python\r\nprint(kv['horse.n.01'][0] == -0.0008546282343595379)\r\n# False\r\n```\r\n\r\nLooking at the code and making a quick hack [here](https://github.com/RaRe-Technologies/gensim/blob/develop/gensim/models/keyedvectors.py#L253), changing..\r\n```\r\nword, weights = parts[0], [REAL(x) for x in parts[1:]]\r\n```\r\nto..\r\n```\r\nword, weights = parts[0], [datatype(x) for x in parts[1:]]\r\n```\r\n..leads to the correct result. However, I imagine there are other cases to be covered as well.\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Author: Shiva Manne <[email protected]>\n# Copyright (C) 2018 RaRe Technologies s.r.o.\n\n\"\"\"This module contains various general functions useful for any2vec models.\"\"\"\n\nimport logging\nimport numpy as np\nfrom gensim import utils\n\nfrom numpy import zeros, dtype, float32 as REAL, ascontiguousarray, fromstring\n\nfrom six.moves import xrange\nfrom six import iteritems\n\nlogger = logging.getLogger(__name__)\n\n\ndef _compute_ngrams(word, min_n, max_n):\n \"\"\"Returns the list of all possible ngrams for a given word.\n\n Parameters\n ----------\n word : str\n The word whose ngrams need to be computed\n min_n : int\n minimum character length of the ngrams\n max_n : int\n maximum character length of the ngrams\n\n Returns\n -------\n :obj:`list` of :obj:`str`\n List of character ngrams\n\n \"\"\"\n BOW, EOW = ('<', '>') # Used by FastText to attach to all words as prefix and suffix\n extended_word = BOW + word + EOW\n ngrams = []\n for ngram_length in range(min_n, min(len(extended_word), max_n) + 1):\n for i in range(0, len(extended_word) - ngram_length + 1):\n ngrams.append(extended_word[i:i + ngram_length])\n return ngrams\n\n\ndef _ft_hash(string):\n \"\"\"Reproduces [hash method](https://github.com/facebookresearch/fastText/blob/master/src/dictionary.cc)\n used in [1]_.\n\n Parameter\n ---------\n string : str\n The string whose hash needs to be calculated\n\n Returns\n -------\n int\n The hash of the string\n\n \"\"\"\n # Runtime warnings for integer overflow are raised, this is expected behaviour. These warnings are suppressed.\n old_settings = np.seterr(all='ignore')\n h = np.uint32(2166136261)\n for c in string:\n h = h ^ np.uint32(ord(c))\n h = h * np.uint32(16777619)\n np.seterr(**old_settings)\n return h\n\n\ndef _save_word2vec_format(fname, vocab, vectors, fvocab=None, binary=False, total_vec=None):\n \"\"\"Store the input-hidden weight matrix in the same format used by the original\n C word2vec-tool, for compatibility.\n\n Parameters\n ----------\n fname : str\n The file path used to save the vectors in\n vocab : dict\n The vocabulary of words\n vectors : numpy.array\n The vectors to be stored\n fvocab : str\n Optional file path used to save the vocabulary\n binary : bool\n If True, the data wil be saved in binary word2vec format, else it will be saved in plain text.\n total_vec : int\n Optional parameter to explicitly specify total no. of vectors\n (in case word vectors are appended with document vectors afterwards)\n\n \"\"\"\n if not (vocab or vectors):\n raise RuntimeError(\"no input\")\n if total_vec is None:\n total_vec = len(vocab)\n vector_size = vectors.shape[1]\n if fvocab is not None:\n logger.info(\"storing vocabulary in %s\", fvocab)\n with utils.smart_open(fvocab, 'wb') as vout:\n for word, vocab_ in sorted(iteritems(vocab), key=lambda item: -item[1].count):\n vout.write(utils.to_utf8(\"%s %s\\n\" % (word, vocab_.count)))\n logger.info(\"storing %sx%s projection weights into %s\", total_vec, vector_size, fname)\n assert (len(vocab), vector_size) == vectors.shape\n with utils.smart_open(fname, 'wb') as fout:\n fout.write(utils.to_utf8(\"%s %s\\n\" % (total_vec, vector_size)))\n # store in sorted order: most frequent words at the top\n for word, vocab_ in sorted(iteritems(vocab), key=lambda item: -item[1].count):\n row = vectors[vocab_.index]\n if binary:\n fout.write(utils.to_utf8(word) + b\" \" + row.tostring())\n else:\n fout.write(utils.to_utf8(\"%s %s\\n\" % (word, ' '.join(\"%f\" % val for val in row))))\n\n\ndef _load_word2vec_format(cls, fname, fvocab=None, binary=False, encoding='utf8', unicode_errors='strict',\n limit=None, datatype=REAL):\n \"\"\"Load the input-hidden weight matrix from the original C word2vec-tool format.\n\n Note that the information stored in the file is incomplete (the binary tree is missing),\n so while you can query for word similarity etc., you cannot continue training\n with a model loaded this way.\n\n Parameters\n ----------\n fname : str\n The file path to the saved word2vec-format file.\n fvocab : str\n Optional file path to the vocabulary.Word counts are read from `fvocab` filename,\n if set (this is the file generated by `-save-vocab` flag of the original C tool).\n binary : bool\n If True, indicates whether the data is in binary word2vec format.\n encoding : str\n If you trained the C model using non-utf8 encoding for words, specify that\n encoding in `encoding`.\n unicode_errors : str\n default 'strict', is a string suitable to be passed as the `errors`\n argument to the unicode() (Python 2.x) or str() (Python 3.x) function. If your source\n file may include word tokens truncated in the middle of a multibyte unicode character\n (as is common from the original word2vec.c tool), 'ignore' or 'replace' may help.\n limit : int\n Sets a maximum number of word-vectors to read from the file. The default,\n None, means read all.\n datatype : :class: `numpy.float*`\n (Experimental) Can coerce dimensions to a non-default float type (such\n as np.float16) to save memory. (Such types may result in much slower bulk operations\n or incompatibility with optimized routines.)\n\n Returns\n -------\n :obj: `cls`\n Returns the loaded model as an instance of :class: `cls`.\n\n \"\"\"\n from gensim.models.keyedvectors import Vocab\n counts = None\n if fvocab is not None:\n logger.info(\"loading word counts from %s\", fvocab)\n counts = {}\n with utils.smart_open(fvocab) as fin:\n for line in fin:\n word, count = utils.to_unicode(line).strip().split()\n counts[word] = int(count)\n\n logger.info(\"loading projection weights from %s\", fname)\n with utils.smart_open(fname) as fin:\n header = utils.to_unicode(fin.readline(), encoding=encoding)\n vocab_size, vector_size = (int(x) for x in header.split()) # throws for invalid file format\n if limit:\n vocab_size = min(vocab_size, limit)\n result = cls(vector_size)\n result.vector_size = vector_size\n result.vectors = zeros((vocab_size, vector_size), dtype=datatype)\n\n def add_word(word, weights):\n word_id = len(result.vocab)\n if word in result.vocab:\n logger.warning(\"duplicate word '%s' in %s, ignoring all but first\", word, fname)\n return\n if counts is None:\n # most common scenario: no vocab file given. just make up some bogus counts, in descending order\n result.vocab[word] = Vocab(index=word_id, count=vocab_size - word_id)\n elif word in counts:\n # use count from the vocab file\n result.vocab[word] = Vocab(index=word_id, count=counts[word])\n else:\n # vocab file given, but word is missing -- set count to None (TODO: or raise?)\n logger.warning(\"vocabulary file is incomplete: '%s' is missing\", word)\n result.vocab[word] = Vocab(index=word_id, count=None)\n result.vectors[word_id] = weights\n result.index2word.append(word)\n\n if binary:\n binary_len = dtype(REAL).itemsize * vector_size\n for _ in xrange(vocab_size):\n # mixed text and binary: read text first, then binary\n word = []\n while True:\n ch = fin.read(1)\n if ch == b' ':\n break\n if ch == b'':\n raise EOFError(\"unexpected end of input; is count incorrect or file otherwise damaged?\")\n if ch != b'\\n': # ignore newlines in front of words (some binary files have)\n word.append(ch)\n word = utils.to_unicode(b''.join(word), encoding=encoding, errors=unicode_errors)\n weights = fromstring(fin.read(binary_len), dtype=REAL)\n add_word(word, weights)\n else:\n for line_no in xrange(vocab_size):\n line = fin.readline()\n if line == b'':\n raise EOFError(\"unexpected end of input; is count incorrect or file otherwise damaged?\")\n parts = utils.to_unicode(line.rstrip(), encoding=encoding, errors=unicode_errors).split(\" \")\n if len(parts) != vector_size + 1:\n raise ValueError(\"invalid vector on line %s (is this really the text format?)\" % line_no)\n word, weights = parts[0], [REAL(x) for x in parts[1:]]\n add_word(word, weights)\n if result.vectors.shape[0] != len(result.vocab):\n logger.info(\n \"duplicate words detected, shrinking matrix size from %i to %i\",\n result.vectors.shape[0], len(result.vocab)\n )\n result.vectors = ascontiguousarray(result.vectors[: len(result.vocab)])\n assert (len(result.vocab), vector_size) == result.vectors.shape\n\n logger.info(\"loaded %s matrix from %s\", result.vectors.shape, fname)\n return result\n", "path": "gensim/models/utils_any2vec.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Author: Shiva Manne <[email protected]>\n# Copyright (C) 2018 RaRe Technologies s.r.o.\n\n\"\"\"This module contains various general functions useful for any2vec models.\"\"\"\n\nimport logging\nimport numpy as np\nfrom gensim import utils\n\nfrom numpy import zeros, dtype, float32 as REAL, ascontiguousarray, fromstring\n\nfrom six.moves import xrange\nfrom six import iteritems\n\nlogger = logging.getLogger(__name__)\n\n\ndef _compute_ngrams(word, min_n, max_n):\n \"\"\"Returns the list of all possible ngrams for a given word.\n\n Parameters\n ----------\n word : str\n The word whose ngrams need to be computed\n min_n : int\n minimum character length of the ngrams\n max_n : int\n maximum character length of the ngrams\n\n Returns\n -------\n :obj:`list` of :obj:`str`\n List of character ngrams\n\n \"\"\"\n BOW, EOW = ('<', '>') # Used by FastText to attach to all words as prefix and suffix\n extended_word = BOW + word + EOW\n ngrams = []\n for ngram_length in range(min_n, min(len(extended_word), max_n) + 1):\n for i in range(0, len(extended_word) - ngram_length + 1):\n ngrams.append(extended_word[i:i + ngram_length])\n return ngrams\n\n\ndef _ft_hash(string):\n \"\"\"Reproduces [hash method](https://github.com/facebookresearch/fastText/blob/master/src/dictionary.cc)\n used in [1]_.\n\n Parameter\n ---------\n string : str\n The string whose hash needs to be calculated\n\n Returns\n -------\n int\n The hash of the string\n\n \"\"\"\n # Runtime warnings for integer overflow are raised, this is expected behaviour. These warnings are suppressed.\n old_settings = np.seterr(all='ignore')\n h = np.uint32(2166136261)\n for c in string:\n h = h ^ np.uint32(ord(c))\n h = h * np.uint32(16777619)\n np.seterr(**old_settings)\n return h\n\n\ndef _save_word2vec_format(fname, vocab, vectors, fvocab=None, binary=False, total_vec=None):\n \"\"\"Store the input-hidden weight matrix in the same format used by the original\n C word2vec-tool, for compatibility.\n\n Parameters\n ----------\n fname : str\n The file path used to save the vectors in\n vocab : dict\n The vocabulary of words\n vectors : numpy.array\n The vectors to be stored\n fvocab : str\n Optional file path used to save the vocabulary\n binary : bool\n If True, the data wil be saved in binary word2vec format, else it will be saved in plain text.\n total_vec : int\n Optional parameter to explicitly specify total no. of vectors\n (in case word vectors are appended with document vectors afterwards)\n\n \"\"\"\n if not (vocab or vectors):\n raise RuntimeError(\"no input\")\n if total_vec is None:\n total_vec = len(vocab)\n vector_size = vectors.shape[1]\n if fvocab is not None:\n logger.info(\"storing vocabulary in %s\", fvocab)\n with utils.smart_open(fvocab, 'wb') as vout:\n for word, vocab_ in sorted(iteritems(vocab), key=lambda item: -item[1].count):\n vout.write(utils.to_utf8(\"%s %s\\n\" % (word, vocab_.count)))\n logger.info(\"storing %sx%s projection weights into %s\", total_vec, vector_size, fname)\n assert (len(vocab), vector_size) == vectors.shape\n with utils.smart_open(fname, 'wb') as fout:\n fout.write(utils.to_utf8(\"%s %s\\n\" % (total_vec, vector_size)))\n # store in sorted order: most frequent words at the top\n for word, vocab_ in sorted(iteritems(vocab), key=lambda item: -item[1].count):\n row = vectors[vocab_.index]\n if binary:\n row = row.astype(REAL)\n fout.write(utils.to_utf8(word) + b\" \" + row.tostring())\n else:\n fout.write(utils.to_utf8(\"%s %s\\n\" % (word, ' '.join(repr(val) for val in row))))\n\n\ndef _load_word2vec_format(cls, fname, fvocab=None, binary=False, encoding='utf8', unicode_errors='strict',\n limit=None, datatype=REAL):\n \"\"\"Load the input-hidden weight matrix from the original C word2vec-tool format.\n\n Note that the information stored in the file is incomplete (the binary tree is missing),\n so while you can query for word similarity etc., you cannot continue training\n with a model loaded this way.\n\n Parameters\n ----------\n fname : str\n The file path to the saved word2vec-format file.\n fvocab : str\n Optional file path to the vocabulary.Word counts are read from `fvocab` filename,\n if set (this is the file generated by `-save-vocab` flag of the original C tool).\n binary : bool\n If True, indicates whether the data is in binary word2vec format.\n encoding : str\n If you trained the C model using non-utf8 encoding for words, specify that\n encoding in `encoding`.\n unicode_errors : str\n default 'strict', is a string suitable to be passed as the `errors`\n argument to the unicode() (Python 2.x) or str() (Python 3.x) function. If your source\n file may include word tokens truncated in the middle of a multibyte unicode character\n (as is common from the original word2vec.c tool), 'ignore' or 'replace' may help.\n limit : int\n Sets a maximum number of word-vectors to read from the file. The default,\n None, means read all.\n datatype : :class: `numpy.float*`\n (Experimental) Can coerce dimensions to a non-default float type (such\n as np.float16) to save memory. (Such types may result in much slower bulk operations\n or incompatibility with optimized routines.)\n\n Returns\n -------\n :obj: `cls`\n Returns the loaded model as an instance of :class: `cls`.\n\n \"\"\"\n from gensim.models.keyedvectors import Vocab\n counts = None\n if fvocab is not None:\n logger.info(\"loading word counts from %s\", fvocab)\n counts = {}\n with utils.smart_open(fvocab) as fin:\n for line in fin:\n word, count = utils.to_unicode(line).strip().split()\n counts[word] = int(count)\n\n logger.info(\"loading projection weights from %s\", fname)\n with utils.smart_open(fname) as fin:\n header = utils.to_unicode(fin.readline(), encoding=encoding)\n vocab_size, vector_size = (int(x) for x in header.split()) # throws for invalid file format\n if limit:\n vocab_size = min(vocab_size, limit)\n result = cls(vector_size)\n result.vector_size = vector_size\n result.vectors = zeros((vocab_size, vector_size), dtype=datatype)\n\n def add_word(word, weights):\n word_id = len(result.vocab)\n if word in result.vocab:\n logger.warning(\"duplicate word '%s' in %s, ignoring all but first\", word, fname)\n return\n if counts is None:\n # most common scenario: no vocab file given. just make up some bogus counts, in descending order\n result.vocab[word] = Vocab(index=word_id, count=vocab_size - word_id)\n elif word in counts:\n # use count from the vocab file\n result.vocab[word] = Vocab(index=word_id, count=counts[word])\n else:\n # vocab file given, but word is missing -- set count to None (TODO: or raise?)\n logger.warning(\"vocabulary file is incomplete: '%s' is missing\", word)\n result.vocab[word] = Vocab(index=word_id, count=None)\n result.vectors[word_id] = weights\n result.index2word.append(word)\n\n if binary:\n binary_len = dtype(REAL).itemsize * vector_size\n for _ in xrange(vocab_size):\n # mixed text and binary: read text first, then binary\n word = []\n while True:\n ch = fin.read(1)\n if ch == b' ':\n break\n if ch == b'':\n raise EOFError(\"unexpected end of input; is count incorrect or file otherwise damaged?\")\n if ch != b'\\n': # ignore newlines in front of words (some binary files have)\n word.append(ch)\n word = utils.to_unicode(b''.join(word), encoding=encoding, errors=unicode_errors)\n weights = fromstring(fin.read(binary_len), dtype=REAL).astype(datatype)\n add_word(word, weights)\n else:\n for line_no in xrange(vocab_size):\n line = fin.readline()\n if line == b'':\n raise EOFError(\"unexpected end of input; is count incorrect or file otherwise damaged?\")\n parts = utils.to_unicode(line.rstrip(), encoding=encoding, errors=unicode_errors).split(\" \")\n if len(parts) != vector_size + 1:\n raise ValueError(\"invalid vector on line %s (is this really the text format?)\" % line_no)\n word, weights = parts[0], [datatype(x) for x in parts[1:]]\n add_word(word, weights)\n if result.vectors.shape[0] != len(result.vocab):\n logger.info(\n \"duplicate words detected, shrinking matrix size from %i to %i\",\n result.vectors.shape[0], len(result.vocab)\n )\n result.vectors = ascontiguousarray(result.vectors[: len(result.vocab)])\n assert (len(result.vocab), vector_size) == result.vectors.shape\n\n logger.info(\"loaded %s matrix from %s\", result.vectors.shape, fname)\n return result\n", "path": "gensim/models/utils_any2vec.py"}]} | 3,524 | 475 |
gh_patches_debug_24819 | rasdani/github-patches | git_diff | svthalia__concrexit-3092 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix reference face upload UI
The reference face UI should display better buttons for deleting a reference face, and for displaying whether a reference face is processing or accepted / rejected.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/facedetection/views.py`
Content:
```
1 from django.conf import settings
2 from django.contrib import messages
3 from django.contrib.auth.mixins import LoginRequiredMixin
4 from django.core.exceptions import ValidationError
5 from django.http import HttpResponseRedirect
6 from django.shortcuts import redirect
7 from django.urls import reverse_lazy
8 from django.utils import timezone
9 from django.utils.translation import gettext_lazy as _
10 from django.views.generic import DeleteView, FormView, ListView
11
12 from photos.models import Photo
13 from thaliawebsite.views import PagedView
14
15 from .forms import ReferenceFaceUploadForm
16 from .models import ReferenceFace
17
18
19 class YourPhotosView(LoginRequiredMixin, PagedView):
20 model = Photo
21 paginate_by = 16
22 template_name = "facedetection/your-photos.html"
23 context_object_name = "photos"
24
25 def get(self, request, *args, **kwargs):
26 if not request.member or request.member.current_membership is None:
27 messages.error(request, _("You need to be a member to use this feature."))
28 return redirect("index")
29
30 return super().get(request, *args, **kwargs)
31
32 def get_queryset(self):
33 member = self.request.member
34 photos = (
35 Photo.objects.select_related("album")
36 .filter(album__hidden=False, hidden=False)
37 .filter(
38 facedetectionphoto__encodings__matches__reference__user=self.request.member
39 )
40 )
41
42 # Filter out matches from long before the member's first membership.
43 albums_since = member.earliest_membership.since - timezone.timedelta(days=31)
44 photos.filter(album__date__gte=albums_since)
45
46 # Filter out matches from after the member's last membership.
47 if member.latest_membership.until is not None:
48 photos = photos.filter(album__date__lte=member.latest_membership.until)
49
50 return photos.select_properties("num_likes").order_by("-album__date")
51
52 def get_context_data(self, **kwargs):
53 context = super().get_context_data(**kwargs)
54
55 context[
56 "has_processing_reference_faces"
57 ] = self.request.member.reference_faces.filter(
58 status=ReferenceFace.Status.PROCESSING,
59 marked_for_deletion_at__isnull=True,
60 ).exists()
61
62 context[
63 "has_rejected_reference_faces"
64 ] = self.request.member.reference_faces.filter(
65 status=ReferenceFace.Status.REJECTED,
66 marked_for_deletion_at__isnull=True,
67 ).exists()
68
69 context["has_reference_faces"] = self.request.member.reference_faces.filter(
70 marked_for_deletion_at__isnull=True
71 ).exists()
72
73 return context
74
75
76 class ReferenceFaceView(LoginRequiredMixin, ListView):
77 template_name = "facedetection/reference-faces.html"
78 context_object_name = "reference_faces"
79
80 def get_context_data(self, **kwargs):
81 context = super().get_context_data(**kwargs)
82 context[
83 "reference_faces_limit"
84 ] = settings.FACEDETECTION_MAX_NUM_REFERENCE_FACES
85 context[
86 "storage_period_after_delete"
87 ] = settings.FACEDETECTION_REFERENCE_FACE_STORAGE_PERIOD_AFTER_DELETE_DAYS
88 context["reference_faces_limit_reached"] = bool(
89 self.request.member.reference_faces.filter(
90 marked_for_deletion_at__isnull=True
91 ).count()
92 >= settings.FACEDETECTION_MAX_NUM_REFERENCE_FACES
93 )
94 return context
95
96 def get_queryset(self):
97 return self.request.member.reference_faces.filter(
98 marked_for_deletion_at__isnull=True
99 ).all()
100
101
102 class ReferenceFaceUploadView(LoginRequiredMixin, FormView):
103 template_name = "facedetection/reference-face-upload.html"
104 form_class = ReferenceFaceUploadForm
105 success_url = reverse_lazy("facedetection:reference-faces")
106
107 def dispatch(self, request, *args, **kwargs):
108 if not request.member or request.member.current_membership is None:
109 messages.error(request, "You need to be a member to use this feature.")
110 return redirect("index")
111 return super().dispatch(request, *args, **kwargs)
112
113 def form_valid(self, form):
114 try:
115 form.save(user=self.request.member)
116 except ValidationError as e:
117 for error in e:
118 messages.error(self.request, error)
119 return self.form_invalid(form)
120 messages.success(self.request, "Your reference face has been uploaded.")
121 return super().form_valid(form)
122
123
124 class ReferenceFaceDeleteView(LoginRequiredMixin, DeleteView):
125 model = ReferenceFace
126 success_url = reverse_lazy("facedetection:reference-faces")
127
128 def get_context_data(self, **kwargs):
129 context = super().get_context_data(**kwargs)
130 context[
131 "storage_period_after_delete"
132 ] = settings.FACEDETECTION_REFERENCE_FACE_STORAGE_PERIOD_AFTER_DELETE_DAYS
133 return context
134
135 def get_queryset(self):
136 return self.request.member.reference_faces.filter(
137 marked_for_deletion_at__isnull=True
138 ).all()
139
140 def form_valid(self, form):
141 success_url = self.get_success_url()
142
143 instance = self.get_object()
144 instance.marked_for_deletion_at = timezone.now()
145 instance.save()
146 messages.success(self.request, "Your reference face has been deleted.")
147
148 return HttpResponseRedirect(success_url)
149
```
Path: `website/facedetection/services.py`
Content:
```
1 import json
2 import logging
3 from typing import Union
4
5 from django.conf import settings
6 from django.db.models import Q
7 from django.utils import timezone
8
9 import boto3
10 from sentry_sdk import capture_exception
11
12 from photos.models import Photo
13 from utils.media.services import get_thumbnail_url
14
15 from .models import FaceDetectionPhoto, ReferenceFace
16
17 logger = logging.getLogger(__name__)
18
19
20 def execute_data_minimisation(dry_run=False):
21 """Delete old reference faces.
22
23 This deletes reference faces that have been marked for deletion by the user for
24 some time, as well as reference faces of users that have not logged in for a year.
25 """
26 delete_period_inactive_member = timezone.now() - timezone.timedelta(days=365)
27 delete_period_marked_for_deletion = timezone.now() - timezone.timedelta(
28 days=settings.FACEDETECTION_REFERENCE_FACE_STORAGE_PERIOD_AFTER_DELETE_DAYS
29 )
30
31 queryset = ReferenceFace.objects.filter(
32 Q(marked_for_deletion_at__lte=delete_period_marked_for_deletion)
33 | Q(member__last_login__lte=delete_period_inactive_member)
34 )
35
36 if not dry_run:
37 for reference_face in queryset:
38 reference_face.delete() # Don't run the queryset method, this will also delete the file
39
40 return queryset
41
42
43 def _serialize_lambda_source(source: Union[ReferenceFace, FaceDetectionPhoto]):
44 """Serialize a source object to be sent to the lambda function."""
45 if isinstance(source, ReferenceFace):
46 return {
47 "type": "reference",
48 "pk": source.pk,
49 "token": source.token,
50 "photo_url": get_thumbnail_url(
51 source.file,
52 "medium",
53 absolute_url=True,
54 # Lambda calls can be queued for up to 6 hours by default, so
55 # we make sure the url it uses is valid for at least that long.
56 expire_seconds=60 * 60 * 7,
57 ),
58 }
59 if isinstance(source, FaceDetectionPhoto):
60 return {
61 "type": "photo",
62 "pk": source.pk,
63 "token": source.token,
64 "photo_url": get_thumbnail_url(
65 source.photo.file,
66 "large",
67 absolute_url=True,
68 expire_seconds=60 * 60 * 7,
69 ),
70 }
71 raise ValueError("source must be a ReferenceFace or FaceDetectionPhoto")
72
73
74 def _trigger_facedetection_lambda_batch(
75 sources: list[Union[ReferenceFace, FaceDetectionPhoto]]
76 ):
77 """Submit a batch of sources to the facedetection lambda function.
78
79 If submitting the sources fails, this is logged and
80 reported to Sentry, but no exception is raised.
81 """
82 payload = {
83 "api_url": settings.BASE_URL,
84 "sources": [_serialize_lambda_source(source) for source in sources],
85 }
86
87 for source in sources:
88 source.submitted_at = timezone.now()
89 source.save()
90
91 try:
92 lambda_client = boto3.client(
93 service_name="lambda",
94 aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
95 aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,
96 )
97
98 response = lambda_client.invoke(
99 FunctionName=settings.FACEDETECTION_LAMBDA_ARN,
100 InvocationType="Event",
101 Payload=json.dumps(payload),
102 )
103
104 if response["StatusCode"] != 202:
105 # pylint: disable=broad-exception-raised
106 raise Exception("Lambda response was not 202.")
107
108 # pylint: disable=broad-exception-caught
109 except Exception as e:
110 logger.error(
111 "Submitting sources to lambda failed. Reason: %s", str(e), exc_info=True
112 )
113 capture_exception(e)
114
115
116 def trigger_facedetection_lambda(
117 sources: list[Union[ReferenceFace, FaceDetectionPhoto]]
118 ):
119 """Submit a sources to the facedetection lambda function for processing.
120
121 This function will check if the sources are valid and, if a lambda function has
122 been configured, try to submit the sources to the lambda function in batches.
123
124 If no lambda function has been configured, or submitting (a batch of) sources fails,
125 this is ignored. The sources can be submitted again later.
126 """
127 if len(sources) == 0:
128 raise ValueError("No sources to process.")
129
130 if any(source.status != source.Status.PROCESSING for source in sources):
131 raise ValueError("A source has already been processed.")
132
133 if settings.FACEDETECTION_LAMBDA_ARN is None:
134 logger.warning(
135 "No Lambda ARN has been configured. Sources will not be processed."
136 )
137 return
138
139 batch_size = settings.FACEDETECTION_LAMBDA_BATCH_SIZE
140 for batch in [
141 sources[i : i + batch_size] for i in range(0, len(sources), batch_size)
142 ]:
143 _trigger_facedetection_lambda_batch(batch)
144
145
146 def resubmit_reference_faces() -> list[ReferenceFace]:
147 """Resubmit reference faces that (should) have already been submitted but aren't done.
148
149 Returns a list of reference faces that have been resubmitted.
150 """
151 submitted_before = timezone.now() - timezone.timedelta(hours=7)
152 references = list(
153 ReferenceFace.objects.filter(
154 status=ReferenceFace.Status.PROCESSING,
155 ).filter(Q(submitted_at__lte=submitted_before) | Q(submitted_at__isnull=True))
156 )
157 if references:
158 trigger_facedetection_lambda(references)
159 return references
160
161
162 def resubmit_photos() -> list[FaceDetectionPhoto]:
163 """Resubmit photos that (should) have already been submitted but aren't done.
164
165 Returns a list of photos that have been resubmitted.
166 """
167 submitted_before = timezone.now() - timezone.timedelta(hours=7)
168 photos = list(
169 FaceDetectionPhoto.objects.filter(
170 status=FaceDetectionPhoto.Status.PROCESSING,
171 )
172 .filter(Q(submitted_at__lte=submitted_before) | Q(submitted_at__isnull=True))
173 .select_related("photo")
174 )
175 if photos:
176 trigger_facedetection_lambda(photos)
177 return photos
178
179
180 def submit_new_photos() -> int:
181 """Submit photos for which no FaceDetectionPhoto exists yet.
182
183 Returns the number of new photos that have been submitted.
184 """
185 count = 0
186 if not Photo.objects.filter(facedetectionphoto__isnull=True).exists():
187 return count
188
189 # We have another level of batching (outside of trigger_facedetection_lambda)
190 # for performance and responsive output when there are thousands of photos.
191 while Photo.objects.filter(facedetectionphoto__isnull=True).exists():
192 photos = FaceDetectionPhoto.objects.bulk_create(
193 [
194 FaceDetectionPhoto(photo=photo)
195 for photo in Photo.objects.filter(facedetectionphoto__isnull=True)[:400]
196 ]
197 )
198
199 trigger_facedetection_lambda(photos)
200 count += len(photos)
201
202 return count
203
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/facedetection/services.py b/website/facedetection/services.py
--- a/website/facedetection/services.py
+++ b/website/facedetection/services.py
@@ -30,7 +30,7 @@
queryset = ReferenceFace.objects.filter(
Q(marked_for_deletion_at__lte=delete_period_marked_for_deletion)
- | Q(member__last_login__lte=delete_period_inactive_member)
+ | Q(user__last_login__lte=delete_period_inactive_member)
)
if not dry_run:
diff --git a/website/facedetection/views.py b/website/facedetection/views.py
--- a/website/facedetection/views.py
+++ b/website/facedetection/views.py
@@ -75,7 +75,6 @@
class ReferenceFaceView(LoginRequiredMixin, ListView):
template_name = "facedetection/reference-faces.html"
- context_object_name = "reference_faces"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
@@ -91,6 +90,12 @@
).count()
>= settings.FACEDETECTION_MAX_NUM_REFERENCE_FACES
)
+ context[
+ "has_rejected_reference_faces"
+ ] = self.request.member.reference_faces.filter(
+ status=ReferenceFace.Status.REJECTED,
+ marked_for_deletion_at__isnull=True,
+ ).exists()
return context
def get_queryset(self):
| {"golden_diff": "diff --git a/website/facedetection/services.py b/website/facedetection/services.py\n--- a/website/facedetection/services.py\n+++ b/website/facedetection/services.py\n@@ -30,7 +30,7 @@\n \n queryset = ReferenceFace.objects.filter(\n Q(marked_for_deletion_at__lte=delete_period_marked_for_deletion)\n- | Q(member__last_login__lte=delete_period_inactive_member)\n+ | Q(user__last_login__lte=delete_period_inactive_member)\n )\n \n if not dry_run:\ndiff --git a/website/facedetection/views.py b/website/facedetection/views.py\n--- a/website/facedetection/views.py\n+++ b/website/facedetection/views.py\n@@ -75,7 +75,6 @@\n \n class ReferenceFaceView(LoginRequiredMixin, ListView):\n template_name = \"facedetection/reference-faces.html\"\n- context_object_name = \"reference_faces\"\n \n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n@@ -91,6 +90,12 @@\n ).count()\n >= settings.FACEDETECTION_MAX_NUM_REFERENCE_FACES\n )\n+ context[\n+ \"has_rejected_reference_faces\"\n+ ] = self.request.member.reference_faces.filter(\n+ status=ReferenceFace.Status.REJECTED,\n+ marked_for_deletion_at__isnull=True,\n+ ).exists()\n return context\n \n def get_queryset(self):\n", "issue": "Fix reference face upload UI\nThe reference face UI should display better buttons for deleting a reference face, and for displaying whether a reference face is processing or accepted / rejected.\n", "before_files": [{"content": "from django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.core.exceptions import ValidationError\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import redirect\nfrom django.urls import reverse_lazy\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.generic import DeleteView, FormView, ListView\n\nfrom photos.models import Photo\nfrom thaliawebsite.views import PagedView\n\nfrom .forms import ReferenceFaceUploadForm\nfrom .models import ReferenceFace\n\n\nclass YourPhotosView(LoginRequiredMixin, PagedView):\n model = Photo\n paginate_by = 16\n template_name = \"facedetection/your-photos.html\"\n context_object_name = \"photos\"\n\n def get(self, request, *args, **kwargs):\n if not request.member or request.member.current_membership is None:\n messages.error(request, _(\"You need to be a member to use this feature.\"))\n return redirect(\"index\")\n\n return super().get(request, *args, **kwargs)\n\n def get_queryset(self):\n member = self.request.member\n photos = (\n Photo.objects.select_related(\"album\")\n .filter(album__hidden=False, hidden=False)\n .filter(\n facedetectionphoto__encodings__matches__reference__user=self.request.member\n )\n )\n\n # Filter out matches from long before the member's first membership.\n albums_since = member.earliest_membership.since - timezone.timedelta(days=31)\n photos.filter(album__date__gte=albums_since)\n\n # Filter out matches from after the member's last membership.\n if member.latest_membership.until is not None:\n photos = photos.filter(album__date__lte=member.latest_membership.until)\n\n return photos.select_properties(\"num_likes\").order_by(\"-album__date\")\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n context[\n \"has_processing_reference_faces\"\n ] = self.request.member.reference_faces.filter(\n status=ReferenceFace.Status.PROCESSING,\n marked_for_deletion_at__isnull=True,\n ).exists()\n\n context[\n \"has_rejected_reference_faces\"\n ] = self.request.member.reference_faces.filter(\n status=ReferenceFace.Status.REJECTED,\n marked_for_deletion_at__isnull=True,\n ).exists()\n\n context[\"has_reference_faces\"] = self.request.member.reference_faces.filter(\n marked_for_deletion_at__isnull=True\n ).exists()\n\n return context\n\n\nclass ReferenceFaceView(LoginRequiredMixin, ListView):\n template_name = \"facedetection/reference-faces.html\"\n context_object_name = \"reference_faces\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\n \"reference_faces_limit\"\n ] = settings.FACEDETECTION_MAX_NUM_REFERENCE_FACES\n context[\n \"storage_period_after_delete\"\n ] = settings.FACEDETECTION_REFERENCE_FACE_STORAGE_PERIOD_AFTER_DELETE_DAYS\n context[\"reference_faces_limit_reached\"] = bool(\n self.request.member.reference_faces.filter(\n marked_for_deletion_at__isnull=True\n ).count()\n >= settings.FACEDETECTION_MAX_NUM_REFERENCE_FACES\n )\n return context\n\n def get_queryset(self):\n return self.request.member.reference_faces.filter(\n marked_for_deletion_at__isnull=True\n ).all()\n\n\nclass ReferenceFaceUploadView(LoginRequiredMixin, FormView):\n template_name = \"facedetection/reference-face-upload.html\"\n form_class = ReferenceFaceUploadForm\n success_url = reverse_lazy(\"facedetection:reference-faces\")\n\n def dispatch(self, request, *args, **kwargs):\n if not request.member or request.member.current_membership is None:\n messages.error(request, \"You need to be a member to use this feature.\")\n return redirect(\"index\")\n return super().dispatch(request, *args, **kwargs)\n\n def form_valid(self, form):\n try:\n form.save(user=self.request.member)\n except ValidationError as e:\n for error in e:\n messages.error(self.request, error)\n return self.form_invalid(form)\n messages.success(self.request, \"Your reference face has been uploaded.\")\n return super().form_valid(form)\n\n\nclass ReferenceFaceDeleteView(LoginRequiredMixin, DeleteView):\n model = ReferenceFace\n success_url = reverse_lazy(\"facedetection:reference-faces\")\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\n \"storage_period_after_delete\"\n ] = settings.FACEDETECTION_REFERENCE_FACE_STORAGE_PERIOD_AFTER_DELETE_DAYS\n return context\n\n def get_queryset(self):\n return self.request.member.reference_faces.filter(\n marked_for_deletion_at__isnull=True\n ).all()\n\n def form_valid(self, form):\n success_url = self.get_success_url()\n\n instance = self.get_object()\n instance.marked_for_deletion_at = timezone.now()\n instance.save()\n messages.success(self.request, \"Your reference face has been deleted.\")\n\n return HttpResponseRedirect(success_url)\n", "path": "website/facedetection/views.py"}, {"content": "import json\nimport logging\nfrom typing import Union\n\nfrom django.conf import settings\nfrom django.db.models import Q\nfrom django.utils import timezone\n\nimport boto3\nfrom sentry_sdk import capture_exception\n\nfrom photos.models import Photo\nfrom utils.media.services import get_thumbnail_url\n\nfrom .models import FaceDetectionPhoto, ReferenceFace\n\nlogger = logging.getLogger(__name__)\n\n\ndef execute_data_minimisation(dry_run=False):\n \"\"\"Delete old reference faces.\n\n This deletes reference faces that have been marked for deletion by the user for\n some time, as well as reference faces of users that have not logged in for a year.\n \"\"\"\n delete_period_inactive_member = timezone.now() - timezone.timedelta(days=365)\n delete_period_marked_for_deletion = timezone.now() - timezone.timedelta(\n days=settings.FACEDETECTION_REFERENCE_FACE_STORAGE_PERIOD_AFTER_DELETE_DAYS\n )\n\n queryset = ReferenceFace.objects.filter(\n Q(marked_for_deletion_at__lte=delete_period_marked_for_deletion)\n | Q(member__last_login__lte=delete_period_inactive_member)\n )\n\n if not dry_run:\n for reference_face in queryset:\n reference_face.delete() # Don't run the queryset method, this will also delete the file\n\n return queryset\n\n\ndef _serialize_lambda_source(source: Union[ReferenceFace, FaceDetectionPhoto]):\n \"\"\"Serialize a source object to be sent to the lambda function.\"\"\"\n if isinstance(source, ReferenceFace):\n return {\n \"type\": \"reference\",\n \"pk\": source.pk,\n \"token\": source.token,\n \"photo_url\": get_thumbnail_url(\n source.file,\n \"medium\",\n absolute_url=True,\n # Lambda calls can be queued for up to 6 hours by default, so\n # we make sure the url it uses is valid for at least that long.\n expire_seconds=60 * 60 * 7,\n ),\n }\n if isinstance(source, FaceDetectionPhoto):\n return {\n \"type\": \"photo\",\n \"pk\": source.pk,\n \"token\": source.token,\n \"photo_url\": get_thumbnail_url(\n source.photo.file,\n \"large\",\n absolute_url=True,\n expire_seconds=60 * 60 * 7,\n ),\n }\n raise ValueError(\"source must be a ReferenceFace or FaceDetectionPhoto\")\n\n\ndef _trigger_facedetection_lambda_batch(\n sources: list[Union[ReferenceFace, FaceDetectionPhoto]]\n):\n \"\"\"Submit a batch of sources to the facedetection lambda function.\n\n If submitting the sources fails, this is logged and\n reported to Sentry, but no exception is raised.\n \"\"\"\n payload = {\n \"api_url\": settings.BASE_URL,\n \"sources\": [_serialize_lambda_source(source) for source in sources],\n }\n\n for source in sources:\n source.submitted_at = timezone.now()\n source.save()\n\n try:\n lambda_client = boto3.client(\n service_name=\"lambda\",\n aws_access_key_id=settings.AWS_ACCESS_KEY_ID,\n aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,\n )\n\n response = lambda_client.invoke(\n FunctionName=settings.FACEDETECTION_LAMBDA_ARN,\n InvocationType=\"Event\",\n Payload=json.dumps(payload),\n )\n\n if response[\"StatusCode\"] != 202:\n # pylint: disable=broad-exception-raised\n raise Exception(\"Lambda response was not 202.\")\n\n # pylint: disable=broad-exception-caught\n except Exception as e:\n logger.error(\n \"Submitting sources to lambda failed. Reason: %s\", str(e), exc_info=True\n )\n capture_exception(e)\n\n\ndef trigger_facedetection_lambda(\n sources: list[Union[ReferenceFace, FaceDetectionPhoto]]\n):\n \"\"\"Submit a sources to the facedetection lambda function for processing.\n\n This function will check if the sources are valid and, if a lambda function has\n been configured, try to submit the sources to the lambda function in batches.\n\n If no lambda function has been configured, or submitting (a batch of) sources fails,\n this is ignored. The sources can be submitted again later.\n \"\"\"\n if len(sources) == 0:\n raise ValueError(\"No sources to process.\")\n\n if any(source.status != source.Status.PROCESSING for source in sources):\n raise ValueError(\"A source has already been processed.\")\n\n if settings.FACEDETECTION_LAMBDA_ARN is None:\n logger.warning(\n \"No Lambda ARN has been configured. Sources will not be processed.\"\n )\n return\n\n batch_size = settings.FACEDETECTION_LAMBDA_BATCH_SIZE\n for batch in [\n sources[i : i + batch_size] for i in range(0, len(sources), batch_size)\n ]:\n _trigger_facedetection_lambda_batch(batch)\n\n\ndef resubmit_reference_faces() -> list[ReferenceFace]:\n \"\"\"Resubmit reference faces that (should) have already been submitted but aren't done.\n\n Returns a list of reference faces that have been resubmitted.\n \"\"\"\n submitted_before = timezone.now() - timezone.timedelta(hours=7)\n references = list(\n ReferenceFace.objects.filter(\n status=ReferenceFace.Status.PROCESSING,\n ).filter(Q(submitted_at__lte=submitted_before) | Q(submitted_at__isnull=True))\n )\n if references:\n trigger_facedetection_lambda(references)\n return references\n\n\ndef resubmit_photos() -> list[FaceDetectionPhoto]:\n \"\"\"Resubmit photos that (should) have already been submitted but aren't done.\n\n Returns a list of photos that have been resubmitted.\n \"\"\"\n submitted_before = timezone.now() - timezone.timedelta(hours=7)\n photos = list(\n FaceDetectionPhoto.objects.filter(\n status=FaceDetectionPhoto.Status.PROCESSING,\n )\n .filter(Q(submitted_at__lte=submitted_before) | Q(submitted_at__isnull=True))\n .select_related(\"photo\")\n )\n if photos:\n trigger_facedetection_lambda(photos)\n return photos\n\n\ndef submit_new_photos() -> int:\n \"\"\"Submit photos for which no FaceDetectionPhoto exists yet.\n\n Returns the number of new photos that have been submitted.\n \"\"\"\n count = 0\n if not Photo.objects.filter(facedetectionphoto__isnull=True).exists():\n return count\n\n # We have another level of batching (outside of trigger_facedetection_lambda)\n # for performance and responsive output when there are thousands of photos.\n while Photo.objects.filter(facedetectionphoto__isnull=True).exists():\n photos = FaceDetectionPhoto.objects.bulk_create(\n [\n FaceDetectionPhoto(photo=photo)\n for photo in Photo.objects.filter(facedetectionphoto__isnull=True)[:400]\n ]\n )\n\n trigger_facedetection_lambda(photos)\n count += len(photos)\n\n return count\n", "path": "website/facedetection/services.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.core.exceptions import ValidationError\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import redirect\nfrom django.urls import reverse_lazy\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.generic import DeleteView, FormView, ListView\n\nfrom photos.models import Photo\nfrom thaliawebsite.views import PagedView\n\nfrom .forms import ReferenceFaceUploadForm\nfrom .models import ReferenceFace\n\n\nclass YourPhotosView(LoginRequiredMixin, PagedView):\n model = Photo\n paginate_by = 16\n template_name = \"facedetection/your-photos.html\"\n context_object_name = \"photos\"\n\n def get(self, request, *args, **kwargs):\n if not request.member or request.member.current_membership is None:\n messages.error(request, _(\"You need to be a member to use this feature.\"))\n return redirect(\"index\")\n\n return super().get(request, *args, **kwargs)\n\n def get_queryset(self):\n member = self.request.member\n photos = (\n Photo.objects.select_related(\"album\")\n .filter(album__hidden=False, hidden=False)\n .filter(\n facedetectionphoto__encodings__matches__reference__user=self.request.member\n )\n )\n\n # Filter out matches from long before the member's first membership.\n albums_since = member.earliest_membership.since - timezone.timedelta(days=31)\n photos.filter(album__date__gte=albums_since)\n\n # Filter out matches from after the member's last membership.\n if member.latest_membership.until is not None:\n photos = photos.filter(album__date__lte=member.latest_membership.until)\n\n return photos.select_properties(\"num_likes\").order_by(\"-album__date\")\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n context[\n \"has_processing_reference_faces\"\n ] = self.request.member.reference_faces.filter(\n status=ReferenceFace.Status.PROCESSING,\n marked_for_deletion_at__isnull=True,\n ).exists()\n\n context[\n \"has_rejected_reference_faces\"\n ] = self.request.member.reference_faces.filter(\n status=ReferenceFace.Status.REJECTED,\n marked_for_deletion_at__isnull=True,\n ).exists()\n\n context[\"has_reference_faces\"] = self.request.member.reference_faces.filter(\n marked_for_deletion_at__isnull=True\n ).exists()\n\n return context\n\n\nclass ReferenceFaceView(LoginRequiredMixin, ListView):\n template_name = \"facedetection/reference-faces.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\n \"reference_faces_limit\"\n ] = settings.FACEDETECTION_MAX_NUM_REFERENCE_FACES\n context[\n \"storage_period_after_delete\"\n ] = settings.FACEDETECTION_REFERENCE_FACE_STORAGE_PERIOD_AFTER_DELETE_DAYS\n context[\"reference_faces_limit_reached\"] = bool(\n self.request.member.reference_faces.filter(\n marked_for_deletion_at__isnull=True\n ).count()\n >= settings.FACEDETECTION_MAX_NUM_REFERENCE_FACES\n )\n context[\n \"has_rejected_reference_faces\"\n ] = self.request.member.reference_faces.filter(\n status=ReferenceFace.Status.REJECTED,\n marked_for_deletion_at__isnull=True,\n ).exists()\n return context\n\n def get_queryset(self):\n return self.request.member.reference_faces.filter(\n marked_for_deletion_at__isnull=True\n ).all()\n\n\nclass ReferenceFaceUploadView(LoginRequiredMixin, FormView):\n template_name = \"facedetection/reference-face-upload.html\"\n form_class = ReferenceFaceUploadForm\n success_url = reverse_lazy(\"facedetection:reference-faces\")\n\n def dispatch(self, request, *args, **kwargs):\n if not request.member or request.member.current_membership is None:\n messages.error(request, \"You need to be a member to use this feature.\")\n return redirect(\"index\")\n return super().dispatch(request, *args, **kwargs)\n\n def form_valid(self, form):\n try:\n form.save(user=self.request.member)\n except ValidationError as e:\n for error in e:\n messages.error(self.request, error)\n return self.form_invalid(form)\n messages.success(self.request, \"Your reference face has been uploaded.\")\n return super().form_valid(form)\n\n\nclass ReferenceFaceDeleteView(LoginRequiredMixin, DeleteView):\n model = ReferenceFace\n success_url = reverse_lazy(\"facedetection:reference-faces\")\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\n \"storage_period_after_delete\"\n ] = settings.FACEDETECTION_REFERENCE_FACE_STORAGE_PERIOD_AFTER_DELETE_DAYS\n return context\n\n def get_queryset(self):\n return self.request.member.reference_faces.filter(\n marked_for_deletion_at__isnull=True\n ).all()\n\n def form_valid(self, form):\n success_url = self.get_success_url()\n\n instance = self.get_object()\n instance.marked_for_deletion_at = timezone.now()\n instance.save()\n messages.success(self.request, \"Your reference face has been deleted.\")\n\n return HttpResponseRedirect(success_url)\n", "path": "website/facedetection/views.py"}, {"content": "import json\nimport logging\nfrom typing import Union\n\nfrom django.conf import settings\nfrom django.db.models import Q\nfrom django.utils import timezone\n\nimport boto3\nfrom sentry_sdk import capture_exception\n\nfrom photos.models import Photo\nfrom utils.media.services import get_thumbnail_url\n\nfrom .models import FaceDetectionPhoto, ReferenceFace\n\nlogger = logging.getLogger(__name__)\n\n\ndef execute_data_minimisation(dry_run=False):\n \"\"\"Delete old reference faces.\n\n This deletes reference faces that have been marked for deletion by the user for\n some time, as well as reference faces of users that have not logged in for a year.\n \"\"\"\n delete_period_inactive_member = timezone.now() - timezone.timedelta(days=365)\n delete_period_marked_for_deletion = timezone.now() - timezone.timedelta(\n days=settings.FACEDETECTION_REFERENCE_FACE_STORAGE_PERIOD_AFTER_DELETE_DAYS\n )\n\n queryset = ReferenceFace.objects.filter(\n Q(marked_for_deletion_at__lte=delete_period_marked_for_deletion)\n | Q(user__last_login__lte=delete_period_inactive_member)\n )\n\n if not dry_run:\n for reference_face in queryset:\n reference_face.delete() # Don't run the queryset method, this will also delete the file\n\n return queryset\n\n\ndef _serialize_lambda_source(source: Union[ReferenceFace, FaceDetectionPhoto]):\n \"\"\"Serialize a source object to be sent to the lambda function.\"\"\"\n if isinstance(source, ReferenceFace):\n return {\n \"type\": \"reference\",\n \"pk\": source.pk,\n \"token\": source.token,\n \"photo_url\": get_thumbnail_url(\n source.file,\n \"medium\",\n absolute_url=True,\n # Lambda calls can be queued for up to 6 hours by default, so\n # we make sure the url it uses is valid for at least that long.\n expire_seconds=60 * 60 * 7,\n ),\n }\n if isinstance(source, FaceDetectionPhoto):\n return {\n \"type\": \"photo\",\n \"pk\": source.pk,\n \"token\": source.token,\n \"photo_url\": get_thumbnail_url(\n source.photo.file,\n \"large\",\n absolute_url=True,\n expire_seconds=60 * 60 * 7,\n ),\n }\n raise ValueError(\"source must be a ReferenceFace or FaceDetectionPhoto\")\n\n\ndef _trigger_facedetection_lambda_batch(\n sources: list[Union[ReferenceFace, FaceDetectionPhoto]]\n):\n \"\"\"Submit a batch of sources to the facedetection lambda function.\n\n If submitting the sources fails, this is logged and\n reported to Sentry, but no exception is raised.\n \"\"\"\n payload = {\n \"api_url\": settings.BASE_URL,\n \"sources\": [_serialize_lambda_source(source) for source in sources],\n }\n\n for source in sources:\n source.submitted_at = timezone.now()\n source.save()\n\n try:\n lambda_client = boto3.client(\n service_name=\"lambda\",\n aws_access_key_id=settings.AWS_ACCESS_KEY_ID,\n aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,\n )\n\n response = lambda_client.invoke(\n FunctionName=settings.FACEDETECTION_LAMBDA_ARN,\n InvocationType=\"Event\",\n Payload=json.dumps(payload),\n )\n\n if response[\"StatusCode\"] != 202:\n # pylint: disable=broad-exception-raised\n raise Exception(\"Lambda response was not 202.\")\n\n # pylint: disable=broad-exception-caught\n except Exception as e:\n logger.error(\n \"Submitting sources to lambda failed. Reason: %s\", str(e), exc_info=True\n )\n capture_exception(e)\n\n\ndef trigger_facedetection_lambda(\n sources: list[Union[ReferenceFace, FaceDetectionPhoto]]\n):\n \"\"\"Submit a sources to the facedetection lambda function for processing.\n\n This function will check if the sources are valid and, if a lambda function has\n been configured, try to submit the sources to the lambda function in batches.\n\n If no lambda function has been configured, or submitting (a batch of) sources fails,\n this is ignored. The sources can be submitted again later.\n \"\"\"\n if len(sources) == 0:\n raise ValueError(\"No sources to process.\")\n\n if any(source.status != source.Status.PROCESSING for source in sources):\n raise ValueError(\"A source has already been processed.\")\n\n if settings.FACEDETECTION_LAMBDA_ARN is None:\n logger.warning(\n \"No Lambda ARN has been configured. Sources will not be processed.\"\n )\n return\n\n batch_size = settings.FACEDETECTION_LAMBDA_BATCH_SIZE\n for batch in [\n sources[i : i + batch_size] for i in range(0, len(sources), batch_size)\n ]:\n _trigger_facedetection_lambda_batch(batch)\n\n\ndef resubmit_reference_faces() -> list[ReferenceFace]:\n \"\"\"Resubmit reference faces that (should) have already been submitted but aren't done.\n\n Returns a list of reference faces that have been resubmitted.\n \"\"\"\n submitted_before = timezone.now() - timezone.timedelta(hours=7)\n references = list(\n ReferenceFace.objects.filter(\n status=ReferenceFace.Status.PROCESSING,\n ).filter(Q(submitted_at__lte=submitted_before) | Q(submitted_at__isnull=True))\n )\n if references:\n trigger_facedetection_lambda(references)\n return references\n\n\ndef resubmit_photos() -> list[FaceDetectionPhoto]:\n \"\"\"Resubmit photos that (should) have already been submitted but aren't done.\n\n Returns a list of photos that have been resubmitted.\n \"\"\"\n submitted_before = timezone.now() - timezone.timedelta(hours=7)\n photos = list(\n FaceDetectionPhoto.objects.filter(\n status=FaceDetectionPhoto.Status.PROCESSING,\n )\n .filter(Q(submitted_at__lte=submitted_before) | Q(submitted_at__isnull=True))\n .select_related(\"photo\")\n )\n if photos:\n trigger_facedetection_lambda(photos)\n return photos\n\n\ndef submit_new_photos() -> int:\n \"\"\"Submit photos for which no FaceDetectionPhoto exists yet.\n\n Returns the number of new photos that have been submitted.\n \"\"\"\n count = 0\n if not Photo.objects.filter(facedetectionphoto__isnull=True).exists():\n return count\n\n # We have another level of batching (outside of trigger_facedetection_lambda)\n # for performance and responsive output when there are thousands of photos.\n while Photo.objects.filter(facedetectionphoto__isnull=True).exists():\n photos = FaceDetectionPhoto.objects.bulk_create(\n [\n FaceDetectionPhoto(photo=photo)\n for photo in Photo.objects.filter(facedetectionphoto__isnull=True)[:400]\n ]\n )\n\n trigger_facedetection_lambda(photos)\n count += len(photos)\n\n return count\n", "path": "website/facedetection/services.py"}]} | 3,725 | 321 |
gh_patches_debug_863 | rasdani/github-patches | git_diff | vacanza__python-holidays-451 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Can't un-pickle a `HolidayBase`
Seems that after a holidays class, e.g. `holidays.UnitedStates()` is used once, it can't be un-pickled.
For example, this snippet:
```python
import holidays
import pickle
from datetime import datetime
# Works:
us_holidays = holidays.UnitedStates()
us_holidays_ = pickle.loads(pickle.dumps(us_holidays))
b = datetime.fromisoformat("2020-01-01") in us_holidays_
# Fails:
us_holidays = holidays.UnitedStates()
b = datetime.fromisoformat("2020-01-01") in us_holidays
dump = pickle.dumps(us_holidays)
pickle.loads(dump) # <- exception
```
Raises the following exception from the last line:
```
~/.local/share/virtualenvs/sibylla-v2-LxBhzJgn/lib/python3.8/site-packages/holidays/holiday_base.py in __setitem__(self, key, value)
116
117 def __setitem__(self, key, value):
--> 118 if key in self:
119 if self.get(key).find(value) < 0 \
120 and value.find(self.get(key)) < 0:
~/.local/share/virtualenvs/sibylla-v2-LxBhzJgn/lib/python3.8/site-packages/holidays/holiday_base.py in __contains__(self, key)
73
74 def __contains__(self, key):
---> 75 return dict.__contains__(self, self.__keytransform__(key))
76
77 def __getitem__(self, key):
~/.local/share/virtualenvs/sibylla-v2-LxBhzJgn/lib/python3.8/site-packages/holidays/holiday_base.py in __keytransform__(self, key)
67 raise TypeError("Cannot convert type '%s' to date." % type(key))
68
---> 69 if self.expand and key.year not in self.years:
70 self.years.add(key.year)
71 self._populate(key.year)
```
The `expand` attribute is set by `__init__`, but it's not there during deserialization via unpickling.
I think it's because the `HolidayBase` inherits from dict and there's some weirdness there - it seems to first populate the dict in the deserialized object and only then sets the attributes from the state. But since `HolidayBase` overrides `__setitem__` and in this override it's using state attributes that weren't yet set on the object, the `expand` attribute is missing.
Tested with `holidays=='0.10.4'`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `holidays/holiday_base.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # python-holidays
4 # ---------------
5 # A fast, efficient Python library for generating country, province and state
6 # specific sets of holidays on the fly. It aims to make determining whether a
7 # specific date is a holiday as fast and flexible as possible.
8 #
9 # Author: ryanss <[email protected]> (c) 2014-2017
10 # dr-prodigy <[email protected]> (c) 2017-2021
11 # Website: https://github.com/dr-prodigy/python-holidays
12 # License: MIT (see LICENSE file)
13
14 from datetime import timedelta, datetime, date
15
16 import six
17 from dateutil.parser import parse
18
19
20 class HolidayBase(dict):
21 PROVINCES = []
22
23 def __init__(
24 self, years=[], expand=True, observed=True, prov=None, state=None
25 ):
26 self.observed = observed
27 self.expand = expand
28 if isinstance(years, int):
29 years = [
30 years,
31 ]
32 self.years = set(years)
33 if not getattr(self, "prov", False):
34 self.prov = prov
35 self.state = state
36 for year in list(self.years):
37 self._populate(year)
38
39 def __setattr__(self, key, value):
40 if key == "observed" and len(self) > 0:
41 dict.__setattr__(self, key, value)
42 if value is True:
43 # Add (Observed) dates
44 years = list(self.years)
45 self.years = set()
46 self.clear()
47 for year in years:
48 self._populate(year)
49 else:
50 # Remove (Observed) dates
51 for k, v in list(self.items()):
52 if v.find("Observed") >= 0:
53 del self[k]
54 else:
55 return dict.__setattr__(self, key, value)
56
57 def __keytransform__(self, key):
58 if isinstance(key, datetime):
59 key = key.date()
60 elif isinstance(key, date):
61 key = key
62 elif isinstance(key, int) or isinstance(key, float):
63 key = datetime.utcfromtimestamp(key).date()
64 elif isinstance(key, six.string_types):
65 try:
66 key = parse(key).date()
67 except (ValueError, OverflowError):
68 raise ValueError("Cannot parse date from string '%s'" % key)
69 else:
70 raise TypeError("Cannot convert type '%s' to date." % type(key))
71
72 if self.expand and key.year not in self.years:
73 self.years.add(key.year)
74 self._populate(key.year)
75 return key
76
77 def __contains__(self, key):
78 return dict.__contains__(self, self.__keytransform__(key))
79
80 def __getitem__(self, key):
81 if isinstance(key, slice):
82 if not key.start or not key.stop:
83 raise ValueError("Both start and stop must be given.")
84
85 start = self.__keytransform__(key.start)
86 stop = self.__keytransform__(key.stop)
87
88 if key.step is None:
89 step = 1
90 elif isinstance(key.step, timedelta):
91 step = key.step.days
92 elif isinstance(key.step, int):
93 step = key.step
94 else:
95 raise TypeError(
96 "Cannot convert type '%s' to int." % type(key.step)
97 )
98
99 if step == 0:
100 raise ValueError("Step value must not be zero.")
101
102 date_diff = stop - start
103 if date_diff.days < 0 <= step or date_diff.days >= 0 > step:
104 step *= -1
105
106 days_in_range = []
107 for delta_days in range(0, date_diff.days, step):
108 day = start + timedelta(days=delta_days)
109 try:
110 dict.__getitem__(self, day)
111 days_in_range.append(day)
112 except KeyError:
113 pass
114 return days_in_range
115 return dict.__getitem__(self, self.__keytransform__(key))
116
117 def __setitem__(self, key, value):
118 if key in self:
119 if self.get(key).find(value) < 0 and value.find(self.get(key)) < 0:
120 value = "%s, %s" % (value, self.get(key))
121 else:
122 value = self.get(key)
123 return dict.__setitem__(self, self.__keytransform__(key), value)
124
125 def update(self, *args):
126 args = list(args)
127 for arg in args:
128 if isinstance(arg, dict):
129 for key, value in list(arg.items()):
130 self[key] = value
131 elif isinstance(arg, list):
132 for item in arg:
133 self[item] = "Holiday"
134 else:
135 self[arg] = "Holiday"
136
137 def append(self, *args):
138 return self.update(*args)
139
140 def get(self, key, default=None):
141 return dict.get(self, self.__keytransform__(key), default)
142
143 def get_list(self, key):
144 return [h for h in self.get(key, "").split(", ") if h]
145
146 def get_named(self, name):
147 # find all dates matching provided name (accepting partial
148 # strings too, case insensitive), returning them in a list
149 original_expand = self.expand
150 self.expand = False
151 matches = [key for key in self if name.lower() in self[key].lower()]
152 self.expand = original_expand
153 return matches
154
155 def pop(self, key, default=None):
156 if default is None:
157 return dict.pop(self, self.__keytransform__(key))
158 return dict.pop(self, self.__keytransform__(key), default)
159
160 def pop_named(self, name):
161 to_pop = self.get_named(name)
162 if not to_pop:
163 raise KeyError(name)
164 for key in to_pop:
165 self.pop(key)
166 return to_pop
167
168 def __eq__(self, other):
169 return dict.__eq__(self, other) and self.__dict__ == other.__dict__
170
171 def __ne__(self, other):
172 return dict.__ne__(self, other) or self.__dict__ != other.__dict__
173
174 def __add__(self, other):
175 if isinstance(other, int) and other == 0:
176 # Required to sum() list of holidays
177 # sum([h1, h2]) is equivalent to (0 + h1 + h2)
178 return self
179 elif not isinstance(other, HolidayBase):
180 raise TypeError()
181 HolidaySum = createHolidaySum(self, other)
182 country = getattr(self, "country", None) or getattr(
183 other, "country", None
184 )
185 if self.country and other.country and self.country != other.country:
186 c1 = self.country
187 if not isinstance(c1, list):
188 c1 = [c1]
189 c2 = other.country
190 if not isinstance(c2, list):
191 c2 = [c2]
192 country = c1 + c2
193 prov = getattr(self, "prov", None) or getattr(other, "prov", None)
194 if self.prov and other.prov and self.prov != other.prov:
195 p1 = self.prov if isinstance(self.prov, list) else [self.prov]
196 p2 = other.prov if isinstance(other.prov, list) else [other.prov]
197 prov = p1 + p2
198 return HolidaySum(
199 years=(self.years | other.years),
200 expand=(self.expand or other.expand),
201 observed=(self.observed or other.observed),
202 country=country,
203 prov=prov,
204 )
205
206 def __radd__(self, other):
207 return self.__add__(other)
208
209 def _populate(self, year):
210 pass
211
212
213 def createHolidaySum(h1, h2):
214 class HolidaySum(HolidayBase):
215 def __init__(self, country, **kwargs):
216 self.country = country
217 self.holidays = []
218 if getattr(h1, "holidays", False):
219 for h in h1.holidays:
220 self.holidays.append(h)
221 else:
222 self.holidays.append(h1)
223 if getattr(h2, "holidays", False):
224 for h in h2.holidays:
225 self.holidays.append(h)
226 else:
227 self.holidays.append(h2)
228 HolidayBase.__init__(self, **kwargs)
229
230 def _populate(self, year):
231 for h in self.holidays[::-1]:
232 h._populate(year)
233 self.update(h)
234
235 return HolidaySum
236
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/holidays/holiday_base.py b/holidays/holiday_base.py
--- a/holidays/holiday_base.py
+++ b/holidays/holiday_base.py
@@ -209,6 +209,9 @@
def _populate(self, year):
pass
+ def __reduce__(self):
+ return super(HolidayBase, self).__reduce__()
+
def createHolidaySum(h1, h2):
class HolidaySum(HolidayBase):
| {"golden_diff": "diff --git a/holidays/holiday_base.py b/holidays/holiday_base.py\n--- a/holidays/holiday_base.py\n+++ b/holidays/holiday_base.py\n@@ -209,6 +209,9 @@\n def _populate(self, year):\n pass\n \n+ def __reduce__(self):\n+ return super(HolidayBase, self).__reduce__()\n+\n \n def createHolidaySum(h1, h2):\n class HolidaySum(HolidayBase):\n", "issue": "Can't un-pickle a `HolidayBase`\nSeems that after a holidays class, e.g. `holidays.UnitedStates()` is used once, it can't be un-pickled.\r\n\r\nFor example, this snippet:\r\n\r\n```python\r\nimport holidays\r\nimport pickle\r\nfrom datetime import datetime\r\n\r\n# Works:\r\nus_holidays = holidays.UnitedStates()\r\nus_holidays_ = pickle.loads(pickle.dumps(us_holidays))\r\nb = datetime.fromisoformat(\"2020-01-01\") in us_holidays_\r\n\r\n# Fails:\r\nus_holidays = holidays.UnitedStates()\r\nb = datetime.fromisoformat(\"2020-01-01\") in us_holidays\r\ndump = pickle.dumps(us_holidays)\r\npickle.loads(dump) # <- exception\r\n```\r\n\r\nRaises the following exception from the last line:\r\n```\r\n~/.local/share/virtualenvs/sibylla-v2-LxBhzJgn/lib/python3.8/site-packages/holidays/holiday_base.py in __setitem__(self, key, value)\r\n 116\r\n 117 def __setitem__(self, key, value):\r\n--> 118 if key in self:\r\n 119 if self.get(key).find(value) < 0 \\\r\n 120 and value.find(self.get(key)) < 0:\r\n\r\n~/.local/share/virtualenvs/sibylla-v2-LxBhzJgn/lib/python3.8/site-packages/holidays/holiday_base.py in __contains__(self, key)\r\n 73\r\n 74 def __contains__(self, key):\r\n---> 75 return dict.__contains__(self, self.__keytransform__(key))\r\n 76\r\n 77 def __getitem__(self, key):\r\n\r\n~/.local/share/virtualenvs/sibylla-v2-LxBhzJgn/lib/python3.8/site-packages/holidays/holiday_base.py in __keytransform__(self, key)\r\n 67 raise TypeError(\"Cannot convert type '%s' to date.\" % type(key))\r\n 68\r\n---> 69 if self.expand and key.year not in self.years:\r\n 70 self.years.add(key.year)\r\n 71 self._populate(key.year)\r\n```\r\n\r\nThe `expand` attribute is set by `__init__`, but it's not there during deserialization via unpickling.\r\nI think it's because the `HolidayBase` inherits from dict and there's some weirdness there - it seems to first populate the dict in the deserialized object and only then sets the attributes from the state. But since `HolidayBase` overrides `__setitem__` and in this override it's using state attributes that weren't yet set on the object, the `expand` attribute is missing.\r\n\r\nTested with `holidays=='0.10.4'`.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Author: ryanss <[email protected]> (c) 2014-2017\n# dr-prodigy <[email protected]> (c) 2017-2021\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nfrom datetime import timedelta, datetime, date\n\nimport six\nfrom dateutil.parser import parse\n\n\nclass HolidayBase(dict):\n PROVINCES = []\n\n def __init__(\n self, years=[], expand=True, observed=True, prov=None, state=None\n ):\n self.observed = observed\n self.expand = expand\n if isinstance(years, int):\n years = [\n years,\n ]\n self.years = set(years)\n if not getattr(self, \"prov\", False):\n self.prov = prov\n self.state = state\n for year in list(self.years):\n self._populate(year)\n\n def __setattr__(self, key, value):\n if key == \"observed\" and len(self) > 0:\n dict.__setattr__(self, key, value)\n if value is True:\n # Add (Observed) dates\n years = list(self.years)\n self.years = set()\n self.clear()\n for year in years:\n self._populate(year)\n else:\n # Remove (Observed) dates\n for k, v in list(self.items()):\n if v.find(\"Observed\") >= 0:\n del self[k]\n else:\n return dict.__setattr__(self, key, value)\n\n def __keytransform__(self, key):\n if isinstance(key, datetime):\n key = key.date()\n elif isinstance(key, date):\n key = key\n elif isinstance(key, int) or isinstance(key, float):\n key = datetime.utcfromtimestamp(key).date()\n elif isinstance(key, six.string_types):\n try:\n key = parse(key).date()\n except (ValueError, OverflowError):\n raise ValueError(\"Cannot parse date from string '%s'\" % key)\n else:\n raise TypeError(\"Cannot convert type '%s' to date.\" % type(key))\n\n if self.expand and key.year not in self.years:\n self.years.add(key.year)\n self._populate(key.year)\n return key\n\n def __contains__(self, key):\n return dict.__contains__(self, self.__keytransform__(key))\n\n def __getitem__(self, key):\n if isinstance(key, slice):\n if not key.start or not key.stop:\n raise ValueError(\"Both start and stop must be given.\")\n\n start = self.__keytransform__(key.start)\n stop = self.__keytransform__(key.stop)\n\n if key.step is None:\n step = 1\n elif isinstance(key.step, timedelta):\n step = key.step.days\n elif isinstance(key.step, int):\n step = key.step\n else:\n raise TypeError(\n \"Cannot convert type '%s' to int.\" % type(key.step)\n )\n\n if step == 0:\n raise ValueError(\"Step value must not be zero.\")\n\n date_diff = stop - start\n if date_diff.days < 0 <= step or date_diff.days >= 0 > step:\n step *= -1\n\n days_in_range = []\n for delta_days in range(0, date_diff.days, step):\n day = start + timedelta(days=delta_days)\n try:\n dict.__getitem__(self, day)\n days_in_range.append(day)\n except KeyError:\n pass\n return days_in_range\n return dict.__getitem__(self, self.__keytransform__(key))\n\n def __setitem__(self, key, value):\n if key in self:\n if self.get(key).find(value) < 0 and value.find(self.get(key)) < 0:\n value = \"%s, %s\" % (value, self.get(key))\n else:\n value = self.get(key)\n return dict.__setitem__(self, self.__keytransform__(key), value)\n\n def update(self, *args):\n args = list(args)\n for arg in args:\n if isinstance(arg, dict):\n for key, value in list(arg.items()):\n self[key] = value\n elif isinstance(arg, list):\n for item in arg:\n self[item] = \"Holiday\"\n else:\n self[arg] = \"Holiday\"\n\n def append(self, *args):\n return self.update(*args)\n\n def get(self, key, default=None):\n return dict.get(self, self.__keytransform__(key), default)\n\n def get_list(self, key):\n return [h for h in self.get(key, \"\").split(\", \") if h]\n\n def get_named(self, name):\n # find all dates matching provided name (accepting partial\n # strings too, case insensitive), returning them in a list\n original_expand = self.expand\n self.expand = False\n matches = [key for key in self if name.lower() in self[key].lower()]\n self.expand = original_expand\n return matches\n\n def pop(self, key, default=None):\n if default is None:\n return dict.pop(self, self.__keytransform__(key))\n return dict.pop(self, self.__keytransform__(key), default)\n\n def pop_named(self, name):\n to_pop = self.get_named(name)\n if not to_pop:\n raise KeyError(name)\n for key in to_pop:\n self.pop(key)\n return to_pop\n\n def __eq__(self, other):\n return dict.__eq__(self, other) and self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return dict.__ne__(self, other) or self.__dict__ != other.__dict__\n\n def __add__(self, other):\n if isinstance(other, int) and other == 0:\n # Required to sum() list of holidays\n # sum([h1, h2]) is equivalent to (0 + h1 + h2)\n return self\n elif not isinstance(other, HolidayBase):\n raise TypeError()\n HolidaySum = createHolidaySum(self, other)\n country = getattr(self, \"country\", None) or getattr(\n other, \"country\", None\n )\n if self.country and other.country and self.country != other.country:\n c1 = self.country\n if not isinstance(c1, list):\n c1 = [c1]\n c2 = other.country\n if not isinstance(c2, list):\n c2 = [c2]\n country = c1 + c2\n prov = getattr(self, \"prov\", None) or getattr(other, \"prov\", None)\n if self.prov and other.prov and self.prov != other.prov:\n p1 = self.prov if isinstance(self.prov, list) else [self.prov]\n p2 = other.prov if isinstance(other.prov, list) else [other.prov]\n prov = p1 + p2\n return HolidaySum(\n years=(self.years | other.years),\n expand=(self.expand or other.expand),\n observed=(self.observed or other.observed),\n country=country,\n prov=prov,\n )\n\n def __radd__(self, other):\n return self.__add__(other)\n\n def _populate(self, year):\n pass\n\n\ndef createHolidaySum(h1, h2):\n class HolidaySum(HolidayBase):\n def __init__(self, country, **kwargs):\n self.country = country\n self.holidays = []\n if getattr(h1, \"holidays\", False):\n for h in h1.holidays:\n self.holidays.append(h)\n else:\n self.holidays.append(h1)\n if getattr(h2, \"holidays\", False):\n for h in h2.holidays:\n self.holidays.append(h)\n else:\n self.holidays.append(h2)\n HolidayBase.__init__(self, **kwargs)\n\n def _populate(self, year):\n for h in self.holidays[::-1]:\n h._populate(year)\n self.update(h)\n\n return HolidaySum\n", "path": "holidays/holiday_base.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Author: ryanss <[email protected]> (c) 2014-2017\n# dr-prodigy <[email protected]> (c) 2017-2021\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nfrom datetime import timedelta, datetime, date\n\nimport six\nfrom dateutil.parser import parse\n\n\nclass HolidayBase(dict):\n PROVINCES = []\n\n def __init__(\n self, years=[], expand=True, observed=True, prov=None, state=None\n ):\n self.observed = observed\n self.expand = expand\n if isinstance(years, int):\n years = [\n years,\n ]\n self.years = set(years)\n if not getattr(self, \"prov\", False):\n self.prov = prov\n self.state = state\n for year in list(self.years):\n self._populate(year)\n\n def __setattr__(self, key, value):\n if key == \"observed\" and len(self) > 0:\n dict.__setattr__(self, key, value)\n if value is True:\n # Add (Observed) dates\n years = list(self.years)\n self.years = set()\n self.clear()\n for year in years:\n self._populate(year)\n else:\n # Remove (Observed) dates\n for k, v in list(self.items()):\n if v.find(\"Observed\") >= 0:\n del self[k]\n else:\n return dict.__setattr__(self, key, value)\n\n def __keytransform__(self, key):\n if isinstance(key, datetime):\n key = key.date()\n elif isinstance(key, date):\n key = key\n elif isinstance(key, int) or isinstance(key, float):\n key = datetime.utcfromtimestamp(key).date()\n elif isinstance(key, six.string_types):\n try:\n key = parse(key).date()\n except (ValueError, OverflowError):\n raise ValueError(\"Cannot parse date from string '%s'\" % key)\n else:\n raise TypeError(\"Cannot convert type '%s' to date.\" % type(key))\n\n if self.expand and key.year not in self.years:\n self.years.add(key.year)\n self._populate(key.year)\n return key\n\n def __contains__(self, key):\n return dict.__contains__(self, self.__keytransform__(key))\n\n def __getitem__(self, key):\n if isinstance(key, slice):\n if not key.start or not key.stop:\n raise ValueError(\"Both start and stop must be given.\")\n\n start = self.__keytransform__(key.start)\n stop = self.__keytransform__(key.stop)\n\n if key.step is None:\n step = 1\n elif isinstance(key.step, timedelta):\n step = key.step.days\n elif isinstance(key.step, int):\n step = key.step\n else:\n raise TypeError(\n \"Cannot convert type '%s' to int.\" % type(key.step)\n )\n\n if step == 0:\n raise ValueError(\"Step value must not be zero.\")\n\n date_diff = stop - start\n if date_diff.days < 0 <= step or date_diff.days >= 0 > step:\n step *= -1\n\n days_in_range = []\n for delta_days in range(0, date_diff.days, step):\n day = start + timedelta(days=delta_days)\n try:\n dict.__getitem__(self, day)\n days_in_range.append(day)\n except KeyError:\n pass\n return days_in_range\n return dict.__getitem__(self, self.__keytransform__(key))\n\n def __setitem__(self, key, value):\n if key in self:\n if self.get(key).find(value) < 0 and value.find(self.get(key)) < 0:\n value = \"%s, %s\" % (value, self.get(key))\n else:\n value = self.get(key)\n return dict.__setitem__(self, self.__keytransform__(key), value)\n\n def update(self, *args):\n args = list(args)\n for arg in args:\n if isinstance(arg, dict):\n for key, value in list(arg.items()):\n self[key] = value\n elif isinstance(arg, list):\n for item in arg:\n self[item] = \"Holiday\"\n else:\n self[arg] = \"Holiday\"\n\n def append(self, *args):\n return self.update(*args)\n\n def get(self, key, default=None):\n return dict.get(self, self.__keytransform__(key), default)\n\n def get_list(self, key):\n return [h for h in self.get(key, \"\").split(\", \") if h]\n\n def get_named(self, name):\n # find all dates matching provided name (accepting partial\n # strings too, case insensitive), returning them in a list\n original_expand = self.expand\n self.expand = False\n matches = [key for key in self if name.lower() in self[key].lower()]\n self.expand = original_expand\n return matches\n\n def pop(self, key, default=None):\n if default is None:\n return dict.pop(self, self.__keytransform__(key))\n return dict.pop(self, self.__keytransform__(key), default)\n\n def pop_named(self, name):\n to_pop = self.get_named(name)\n if not to_pop:\n raise KeyError(name)\n for key in to_pop:\n self.pop(key)\n return to_pop\n\n def __eq__(self, other):\n return dict.__eq__(self, other) and self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return dict.__ne__(self, other) or self.__dict__ != other.__dict__\n\n def __add__(self, other):\n if isinstance(other, int) and other == 0:\n # Required to sum() list of holidays\n # sum([h1, h2]) is equivalent to (0 + h1 + h2)\n return self\n elif not isinstance(other, HolidayBase):\n raise TypeError()\n HolidaySum = createHolidaySum(self, other)\n country = getattr(self, \"country\", None) or getattr(\n other, \"country\", None\n )\n if self.country and other.country and self.country != other.country:\n c1 = self.country\n if not isinstance(c1, list):\n c1 = [c1]\n c2 = other.country\n if not isinstance(c2, list):\n c2 = [c2]\n country = c1 + c2\n prov = getattr(self, \"prov\", None) or getattr(other, \"prov\", None)\n if self.prov and other.prov and self.prov != other.prov:\n p1 = self.prov if isinstance(self.prov, list) else [self.prov]\n p2 = other.prov if isinstance(other.prov, list) else [other.prov]\n prov = p1 + p2\n return HolidaySum(\n years=(self.years | other.years),\n expand=(self.expand or other.expand),\n observed=(self.observed or other.observed),\n country=country,\n prov=prov,\n )\n\n def __radd__(self, other):\n return self.__add__(other)\n\n def _populate(self, year):\n pass\n\n def __reduce__(self):\n return super(HolidayBase, self).__reduce__()\n\n\ndef createHolidaySum(h1, h2):\n class HolidaySum(HolidayBase):\n def __init__(self, country, **kwargs):\n self.country = country\n self.holidays = []\n if getattr(h1, \"holidays\", False):\n for h in h1.holidays:\n self.holidays.append(h)\n else:\n self.holidays.append(h1)\n if getattr(h2, \"holidays\", False):\n for h in h2.holidays:\n self.holidays.append(h)\n else:\n self.holidays.append(h2)\n HolidayBase.__init__(self, **kwargs)\n\n def _populate(self, year):\n for h in self.holidays[::-1]:\n h._populate(year)\n self.update(h)\n\n return HolidaySum\n", "path": "holidays/holiday_base.py"}]} | 3,336 | 101 |
gh_patches_debug_33192 | rasdani/github-patches | git_diff | conda__conda-build-1471 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
conda-build no longer generates .exe files for my scripts on Windows
I notice that conda-build, verson 1.21.7, used to generate .exe files to invoke my scripts. But 2.0.6 doesn't seem to do that. I've got Python scripts, goodstuff, goodstuff-data, and goodstuff-library, which are marked as scripts in my setup.py file. 1.21.7 turns them into goodstuff-script.py and goodstuff.exe, goodstuff-data-script.py and goodstuff-data.exe, etc. Which I found very useful.
Is there any way to get that 1.21.functionality back again?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda_build/windows.py`
Content:
```
1 from __future__ import absolute_import, division, print_function
2
3 import os
4 import sys
5 from os.path import isdir, join
6
7 # importing setuptools patches distutils so that it knows how to find VC for python 2.7
8 import setuptools # noqa
9 # Leverage the hard work done by setuptools/distutils to find vcvarsall using
10 # either the registry or the VS**COMNTOOLS environment variable
11 from distutils.msvc9compiler import find_vcvarsall as distutils_find_vcvarsall
12 from distutils.msvc9compiler import Reg, WINSDK_BASE
13
14 from .conda_interface import bits
15
16 from conda_build import environ
17 from conda_build.utils import _check_call, root_script_dir, path_prepended
18
19
20 assert sys.platform == 'win32'
21
22
23 VS_VERSION_STRING = {
24 '8.0': 'Visual Studio 8 2005',
25 '9.0': 'Visual Studio 9 2008',
26 '10.0': 'Visual Studio 10 2010',
27 '11.0': 'Visual Studio 11 2012',
28 '12.0': 'Visual Studio 12 2013',
29 '14.0': 'Visual Studio 14 2015'
30 }
31
32
33 def build_vcvarsall_vs_path(version):
34 """
35 Given the Visual Studio version, returns the default path to the
36 Microsoft Visual Studio vcvarsall.bat file.
37 Expected versions are of the form {9.0, 10.0, 12.0, 14.0}
38 """
39 # Set up a load of paths that can be imported from the tests
40 if 'ProgramFiles(x86)' in os.environ:
41 PROGRAM_FILES_PATH = os.environ['ProgramFiles(x86)']
42 else:
43 PROGRAM_FILES_PATH = os.environ['ProgramFiles']
44
45 flatversion = str(version).replace('.', '')
46 vstools = "VS{0}COMNTOOLS".format(flatversion)
47
48 if vstools in os.environ:
49 return os.path.join(os.environ[vstools], '..\\..\\VC\\vcvarsall.bat')
50 else:
51 # prefer looking at env var; fall back to program files defaults
52 return os.path.join(PROGRAM_FILES_PATH,
53 'Microsoft Visual Studio {}'.format(version), 'VC',
54 'vcvarsall.bat')
55
56
57 def msvc_env_cmd(bits, config, override=None):
58 arch_selector = 'x86' if bits == 32 else 'amd64'
59
60 msvc_env_lines = []
61
62 version = None
63 if override is not None:
64 version = override
65
66 # The DISTUTILS_USE_SDK variable tells distutils to not try and validate
67 # the MSVC compiler. For < 3.5 this still forcibly looks for 'cl.exe'.
68 # For > 3.5 it literally just skips the validation logic.
69 # See distutils _msvccompiler.py and msvc9compiler.py / msvccompiler.py
70 # for more information.
71 msvc_env_lines.append('set DISTUTILS_USE_SDK=1')
72 # This is also required to hit the 'don't validate' logic on < 3.5.
73 # For > 3.5 this is ignored.
74 msvc_env_lines.append('set MSSdk=1')
75
76 if not version:
77 if config.PY3K and config.use_MSVC2015:
78 version = '14.0'
79 elif config.PY3K:
80 version = '10.0'
81 else:
82 version = '9.0'
83
84 if float(version) >= 14.0:
85 # For Python 3.5+, ensure that we link with the dynamic runtime. See
86 # http://stevedower.id.au/blog/building-for-python-3-5-part-two/ for more info
87 msvc_env_lines.append('set PY_VCRUNTIME_REDIST=%LIBRARY_BIN%\\vcruntime{0}.dll'.format(
88 version.replace('.', '')))
89
90 vcvarsall_vs_path = build_vcvarsall_vs_path(version)
91
92 def build_vcvarsall_cmd(cmd, arch=arch_selector):
93 # Default argument `arch_selector` is defined above
94 return 'call "{cmd}" {arch}'.format(cmd=cmd, arch=arch)
95
96 msvc_env_lines.append('set "VS_VERSION={}"'.format(version))
97 msvc_env_lines.append('set "VS_MAJOR={}"'.format(version.split('.')[0]))
98 msvc_env_lines.append('set "VS_YEAR={}"'.format(VS_VERSION_STRING[version][-4:]))
99 msvc_env_lines.append('set "CMAKE_GENERATOR={}"'.format(VS_VERSION_STRING[version] +
100 {64: ' Win64', 32: ''}[bits]))
101 # tell msys2 to ignore path conversions for issue-causing windows-style flags in build
102 # See https://github.com/conda-forge/icu-feedstock/pull/5
103 msvc_env_lines.append('set "MSYS2_ARG_CONV_EXCL=/AI;/AL;/OUT;/out;%MSYS2_ARG_CONV_EXCL%"')
104 msvc_env_lines.append('set "MSYS2_ENV_CONV_EXCL=CL"')
105 if version == '10.0':
106 try:
107 WIN_SDK_71_PATH = Reg.get_value(os.path.join(WINSDK_BASE, 'v7.1'),
108 'installationfolder')
109 WIN_SDK_71_BAT_PATH = os.path.join(WIN_SDK_71_PATH, 'Bin', 'SetEnv.cmd')
110
111 win_sdk_arch = '/Release /x86' if bits == 32 else '/Release /x64'
112 win_sdk_cmd = build_vcvarsall_cmd(WIN_SDK_71_BAT_PATH, arch=win_sdk_arch)
113
114 # There are two methods of building Python 3.3 and 3.4 extensions (both
115 # of which required Visual Studio 2010 - as explained in the Python wiki
116 # https://wiki.python.org/moin/WindowsCompilers)
117 # 1) Use the Windows SDK 7.1
118 # 2) Use Visual Studio 2010 (any edition)
119 # However, VS2010 never shipped with a 64-bit compiler, so in this case
120 # **only** option (1) applies. For this reason, we always try and
121 # activate the Windows SDK first. Unfortunately, unsuccessfully setting
122 # up the environment does **not EXIT 1** and therefore we must fall
123 # back to attempting to set up VS2010.
124 # DelayedExpansion is required for the SetEnv.cmd
125 msvc_env_lines.append('Setlocal EnableDelayedExpansion')
126 msvc_env_lines.append(win_sdk_cmd)
127 # If the WindowsSDKDir environment variable has not been successfully
128 # set then try activating VS2010
129 msvc_env_lines.append('if not "%WindowsSDKDir%" == "{}" ( {} )'.format(
130 WIN_SDK_71_PATH, build_vcvarsall_cmd(vcvarsall_vs_path)))
131 # sdk is not installed. Fall back to only trying VS 2010
132 except KeyError:
133 msvc_env_lines.append(build_vcvarsall_cmd(vcvarsall_vs_path))
134 elif version == '9.0':
135 # Get the Visual Studio 2008 path (not the Visual C++ for Python path)
136 # and get the 'vcvars64.bat' from inside the bin (in the directory above
137 # that returned by distutils_find_vcvarsall)
138 try:
139 VCVARS64_VS9_BAT_PATH = os.path.join(os.path.dirname(distutils_find_vcvarsall(9)),
140 'bin', 'vcvars64.bat')
141 # there's an exception if VS or the VC compiler for python are not actually installed.
142 except (KeyError, TypeError):
143 VCVARS64_VS9_BAT_PATH = None
144
145 error1 = 'if errorlevel 1 {}'
146
147 # Prefer VS9 proper over Microsoft Visual C++ Compiler for Python 2.7
148 msvc_env_lines.append(build_vcvarsall_cmd(vcvarsall_vs_path))
149 # The Visual Studio 2008 Express edition does not properly contain
150 # the amd64 build files, so we call the vcvars64.bat manually,
151 # rather than using the vcvarsall.bat which would try and call the
152 # missing bat file.
153 if arch_selector == 'amd64' and VCVARS64_VS9_BAT_PATH:
154 msvc_env_lines.append(error1.format(
155 build_vcvarsall_cmd(VCVARS64_VS9_BAT_PATH)))
156 # Otherwise, fall back to icrosoft Visual C++ Compiler for Python 2.7+
157 # by using the logic provided by setuptools
158 msvc_env_lines.append(error1.format(
159 build_vcvarsall_cmd(distutils_find_vcvarsall(9))))
160 else:
161 # Visual Studio 14 or otherwise
162 msvc_env_lines.append(build_vcvarsall_cmd(vcvarsall_vs_path))
163
164 return '\n'.join(msvc_env_lines) + '\n'
165
166
167 def build(m, bld_bat, config):
168 with path_prepended(config.build_prefix):
169 env = environ.get_dict(config=config, m=m)
170 env["CONDA_BUILD_STATE"] = "BUILD"
171
172 for name in 'BIN', 'INC', 'LIB':
173 path = env['LIBRARY_' + name]
174 if not isdir(path):
175 os.makedirs(path)
176
177 src_dir = config.work_dir
178 if os.path.isfile(bld_bat):
179 with open(bld_bat) as fi:
180 data = fi.read()
181 with open(join(src_dir, 'bld.bat'), 'w') as fo:
182 # more debuggable with echo on
183 fo.write('@echo on\n')
184 for key, value in env.items():
185 fo.write('set "{key}={value}"\n'.format(key=key, value=value))
186 fo.write(msvc_env_cmd(bits=bits, config=config,
187 override=m.get_value('build/msvc_compiler', None)))
188 # Reset echo on, because MSVC scripts might have turned it off
189 fo.write('@echo on\n')
190 fo.write('set "INCLUDE={};%INCLUDE%"\n'.format(env["LIBRARY_INC"]))
191 fo.write('set "LIB={};%LIB%"\n'.format(env["LIBRARY_LIB"]))
192 if config.activate:
193 fo.write('call "{conda_root}\\activate.bat" "{prefix}"\n'.format(
194 conda_root=root_script_dir,
195 prefix=config.build_prefix))
196 fo.write("REM ===== end generated header =====\n")
197 fo.write(data)
198
199 cmd = ['cmd.exe', '/c', 'bld.bat']
200 _check_call(cmd, cwd=src_dir)
201
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conda_build/windows.py b/conda_build/windows.py
--- a/conda_build/windows.py
+++ b/conda_build/windows.py
@@ -2,7 +2,7 @@
import os
import sys
-from os.path import isdir, join
+from os.path import isdir, join, dirname, isfile
# importing setuptools patches distutils so that it knows how to find VC for python 2.7
import setuptools # noqa
@@ -14,7 +14,7 @@
from .conda_interface import bits
from conda_build import environ
-from conda_build.utils import _check_call, root_script_dir, path_prepended
+from conda_build.utils import _check_call, root_script_dir, path_prepended, copy_into
assert sys.platform == 'win32'
@@ -30,6 +30,36 @@
}
+def fix_staged_scripts(scripts_dir):
+ """
+ Fixes scripts which have been installed unix-style to have a .bat
+ helper
+ """
+ if not isdir(scripts_dir):
+ return
+ for fn in os.listdir(scripts_dir):
+ # process all the extensionless files
+ if not isfile(join(scripts_dir, fn)) or '.' in fn:
+ continue
+
+ with open(join(scripts_dir, fn)) as f:
+ line = f.readline().lower()
+ # If it's a #!python script
+ if not (line.startswith('#!') and 'python' in line.lower()):
+ continue
+ print('Adjusting unix-style #! script %s, '
+ 'and adding a .bat file for it' % fn)
+ # copy it with a .py extension (skipping that first #! line)
+ with open(join(scripts_dir, fn + '-script.py'), 'w') as fo:
+ fo.write(f.read())
+ # now create the .exe file
+ copy_into(join(dirname(__file__), 'cli-%d.exe' % bits),
+ join(scripts_dir, fn + '.exe'))
+
+ # remove the original script
+ os.remove(join(scripts_dir, fn))
+
+
def build_vcvarsall_vs_path(version):
"""
Given the Visual Studio version, returns the default path to the
@@ -198,3 +228,5 @@
cmd = ['cmd.exe', '/c', 'bld.bat']
_check_call(cmd, cwd=src_dir)
+
+ fix_staged_scripts(join(config.build_prefix, 'Scripts'))
| {"golden_diff": "diff --git a/conda_build/windows.py b/conda_build/windows.py\n--- a/conda_build/windows.py\n+++ b/conda_build/windows.py\n@@ -2,7 +2,7 @@\n \n import os\n import sys\n-from os.path import isdir, join\n+from os.path import isdir, join, dirname, isfile\n \n # importing setuptools patches distutils so that it knows how to find VC for python 2.7\n import setuptools # noqa\n@@ -14,7 +14,7 @@\n from .conda_interface import bits\n \n from conda_build import environ\n-from conda_build.utils import _check_call, root_script_dir, path_prepended\n+from conda_build.utils import _check_call, root_script_dir, path_prepended, copy_into\n \n \n assert sys.platform == 'win32'\n@@ -30,6 +30,36 @@\n }\n \n \n+def fix_staged_scripts(scripts_dir):\n+ \"\"\"\n+ Fixes scripts which have been installed unix-style to have a .bat\n+ helper\n+ \"\"\"\n+ if not isdir(scripts_dir):\n+ return\n+ for fn in os.listdir(scripts_dir):\n+ # process all the extensionless files\n+ if not isfile(join(scripts_dir, fn)) or '.' in fn:\n+ continue\n+\n+ with open(join(scripts_dir, fn)) as f:\n+ line = f.readline().lower()\n+ # If it's a #!python script\n+ if not (line.startswith('#!') and 'python' in line.lower()):\n+ continue\n+ print('Adjusting unix-style #! script %s, '\n+ 'and adding a .bat file for it' % fn)\n+ # copy it with a .py extension (skipping that first #! line)\n+ with open(join(scripts_dir, fn + '-script.py'), 'w') as fo:\n+ fo.write(f.read())\n+ # now create the .exe file\n+ copy_into(join(dirname(__file__), 'cli-%d.exe' % bits),\n+ join(scripts_dir, fn + '.exe'))\n+\n+ # remove the original script\n+ os.remove(join(scripts_dir, fn))\n+\n+\n def build_vcvarsall_vs_path(version):\n \"\"\"\n Given the Visual Studio version, returns the default path to the\n@@ -198,3 +228,5 @@\n \n cmd = ['cmd.exe', '/c', 'bld.bat']\n _check_call(cmd, cwd=src_dir)\n+\n+ fix_staged_scripts(join(config.build_prefix, 'Scripts'))\n", "issue": "conda-build no longer generates .exe files for my scripts on Windows\nI notice that conda-build, verson 1.21.7, used to generate .exe files to invoke my scripts. But 2.0.6 doesn't seem to do that. I've got Python scripts, goodstuff, goodstuff-data, and goodstuff-library, which are marked as scripts in my setup.py file. 1.21.7 turns them into goodstuff-script.py and goodstuff.exe, goodstuff-data-script.py and goodstuff-data.exe, etc. Which I found very useful.\n\nIs there any way to get that 1.21.functionality back again?\n\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\nfrom os.path import isdir, join\n\n# importing setuptools patches distutils so that it knows how to find VC for python 2.7\nimport setuptools # noqa\n# Leverage the hard work done by setuptools/distutils to find vcvarsall using\n# either the registry or the VS**COMNTOOLS environment variable\nfrom distutils.msvc9compiler import find_vcvarsall as distutils_find_vcvarsall\nfrom distutils.msvc9compiler import Reg, WINSDK_BASE\n\nfrom .conda_interface import bits\n\nfrom conda_build import environ\nfrom conda_build.utils import _check_call, root_script_dir, path_prepended\n\n\nassert sys.platform == 'win32'\n\n\nVS_VERSION_STRING = {\n '8.0': 'Visual Studio 8 2005',\n '9.0': 'Visual Studio 9 2008',\n '10.0': 'Visual Studio 10 2010',\n '11.0': 'Visual Studio 11 2012',\n '12.0': 'Visual Studio 12 2013',\n '14.0': 'Visual Studio 14 2015'\n}\n\n\ndef build_vcvarsall_vs_path(version):\n \"\"\"\n Given the Visual Studio version, returns the default path to the\n Microsoft Visual Studio vcvarsall.bat file.\n Expected versions are of the form {9.0, 10.0, 12.0, 14.0}\n \"\"\"\n # Set up a load of paths that can be imported from the tests\n if 'ProgramFiles(x86)' in os.environ:\n PROGRAM_FILES_PATH = os.environ['ProgramFiles(x86)']\n else:\n PROGRAM_FILES_PATH = os.environ['ProgramFiles']\n\n flatversion = str(version).replace('.', '')\n vstools = \"VS{0}COMNTOOLS\".format(flatversion)\n\n if vstools in os.environ:\n return os.path.join(os.environ[vstools], '..\\\\..\\\\VC\\\\vcvarsall.bat')\n else:\n # prefer looking at env var; fall back to program files defaults\n return os.path.join(PROGRAM_FILES_PATH,\n 'Microsoft Visual Studio {}'.format(version), 'VC',\n 'vcvarsall.bat')\n\n\ndef msvc_env_cmd(bits, config, override=None):\n arch_selector = 'x86' if bits == 32 else 'amd64'\n\n msvc_env_lines = []\n\n version = None\n if override is not None:\n version = override\n\n # The DISTUTILS_USE_SDK variable tells distutils to not try and validate\n # the MSVC compiler. For < 3.5 this still forcibly looks for 'cl.exe'.\n # For > 3.5 it literally just skips the validation logic.\n # See distutils _msvccompiler.py and msvc9compiler.py / msvccompiler.py\n # for more information.\n msvc_env_lines.append('set DISTUTILS_USE_SDK=1')\n # This is also required to hit the 'don't validate' logic on < 3.5.\n # For > 3.5 this is ignored.\n msvc_env_lines.append('set MSSdk=1')\n\n if not version:\n if config.PY3K and config.use_MSVC2015:\n version = '14.0'\n elif config.PY3K:\n version = '10.0'\n else:\n version = '9.0'\n\n if float(version) >= 14.0:\n # For Python 3.5+, ensure that we link with the dynamic runtime. See\n # http://stevedower.id.au/blog/building-for-python-3-5-part-two/ for more info\n msvc_env_lines.append('set PY_VCRUNTIME_REDIST=%LIBRARY_BIN%\\\\vcruntime{0}.dll'.format(\n version.replace('.', '')))\n\n vcvarsall_vs_path = build_vcvarsall_vs_path(version)\n\n def build_vcvarsall_cmd(cmd, arch=arch_selector):\n # Default argument `arch_selector` is defined above\n return 'call \"{cmd}\" {arch}'.format(cmd=cmd, arch=arch)\n\n msvc_env_lines.append('set \"VS_VERSION={}\"'.format(version))\n msvc_env_lines.append('set \"VS_MAJOR={}\"'.format(version.split('.')[0]))\n msvc_env_lines.append('set \"VS_YEAR={}\"'.format(VS_VERSION_STRING[version][-4:]))\n msvc_env_lines.append('set \"CMAKE_GENERATOR={}\"'.format(VS_VERSION_STRING[version] +\n {64: ' Win64', 32: ''}[bits]))\n # tell msys2 to ignore path conversions for issue-causing windows-style flags in build\n # See https://github.com/conda-forge/icu-feedstock/pull/5\n msvc_env_lines.append('set \"MSYS2_ARG_CONV_EXCL=/AI;/AL;/OUT;/out;%MSYS2_ARG_CONV_EXCL%\"')\n msvc_env_lines.append('set \"MSYS2_ENV_CONV_EXCL=CL\"')\n if version == '10.0':\n try:\n WIN_SDK_71_PATH = Reg.get_value(os.path.join(WINSDK_BASE, 'v7.1'),\n 'installationfolder')\n WIN_SDK_71_BAT_PATH = os.path.join(WIN_SDK_71_PATH, 'Bin', 'SetEnv.cmd')\n\n win_sdk_arch = '/Release /x86' if bits == 32 else '/Release /x64'\n win_sdk_cmd = build_vcvarsall_cmd(WIN_SDK_71_BAT_PATH, arch=win_sdk_arch)\n\n # There are two methods of building Python 3.3 and 3.4 extensions (both\n # of which required Visual Studio 2010 - as explained in the Python wiki\n # https://wiki.python.org/moin/WindowsCompilers)\n # 1) Use the Windows SDK 7.1\n # 2) Use Visual Studio 2010 (any edition)\n # However, VS2010 never shipped with a 64-bit compiler, so in this case\n # **only** option (1) applies. For this reason, we always try and\n # activate the Windows SDK first. Unfortunately, unsuccessfully setting\n # up the environment does **not EXIT 1** and therefore we must fall\n # back to attempting to set up VS2010.\n # DelayedExpansion is required for the SetEnv.cmd\n msvc_env_lines.append('Setlocal EnableDelayedExpansion')\n msvc_env_lines.append(win_sdk_cmd)\n # If the WindowsSDKDir environment variable has not been successfully\n # set then try activating VS2010\n msvc_env_lines.append('if not \"%WindowsSDKDir%\" == \"{}\" ( {} )'.format(\n WIN_SDK_71_PATH, build_vcvarsall_cmd(vcvarsall_vs_path)))\n # sdk is not installed. Fall back to only trying VS 2010\n except KeyError:\n msvc_env_lines.append(build_vcvarsall_cmd(vcvarsall_vs_path))\n elif version == '9.0':\n # Get the Visual Studio 2008 path (not the Visual C++ for Python path)\n # and get the 'vcvars64.bat' from inside the bin (in the directory above\n # that returned by distutils_find_vcvarsall)\n try:\n VCVARS64_VS9_BAT_PATH = os.path.join(os.path.dirname(distutils_find_vcvarsall(9)),\n 'bin', 'vcvars64.bat')\n # there's an exception if VS or the VC compiler for python are not actually installed.\n except (KeyError, TypeError):\n VCVARS64_VS9_BAT_PATH = None\n\n error1 = 'if errorlevel 1 {}'\n\n # Prefer VS9 proper over Microsoft Visual C++ Compiler for Python 2.7\n msvc_env_lines.append(build_vcvarsall_cmd(vcvarsall_vs_path))\n # The Visual Studio 2008 Express edition does not properly contain\n # the amd64 build files, so we call the vcvars64.bat manually,\n # rather than using the vcvarsall.bat which would try and call the\n # missing bat file.\n if arch_selector == 'amd64' and VCVARS64_VS9_BAT_PATH:\n msvc_env_lines.append(error1.format(\n build_vcvarsall_cmd(VCVARS64_VS9_BAT_PATH)))\n # Otherwise, fall back to icrosoft Visual C++ Compiler for Python 2.7+\n # by using the logic provided by setuptools\n msvc_env_lines.append(error1.format(\n build_vcvarsall_cmd(distutils_find_vcvarsall(9))))\n else:\n # Visual Studio 14 or otherwise\n msvc_env_lines.append(build_vcvarsall_cmd(vcvarsall_vs_path))\n\n return '\\n'.join(msvc_env_lines) + '\\n'\n\n\ndef build(m, bld_bat, config):\n with path_prepended(config.build_prefix):\n env = environ.get_dict(config=config, m=m)\n env[\"CONDA_BUILD_STATE\"] = \"BUILD\"\n\n for name in 'BIN', 'INC', 'LIB':\n path = env['LIBRARY_' + name]\n if not isdir(path):\n os.makedirs(path)\n\n src_dir = config.work_dir\n if os.path.isfile(bld_bat):\n with open(bld_bat) as fi:\n data = fi.read()\n with open(join(src_dir, 'bld.bat'), 'w') as fo:\n # more debuggable with echo on\n fo.write('@echo on\\n')\n for key, value in env.items():\n fo.write('set \"{key}={value}\"\\n'.format(key=key, value=value))\n fo.write(msvc_env_cmd(bits=bits, config=config,\n override=m.get_value('build/msvc_compiler', None)))\n # Reset echo on, because MSVC scripts might have turned it off\n fo.write('@echo on\\n')\n fo.write('set \"INCLUDE={};%INCLUDE%\"\\n'.format(env[\"LIBRARY_INC\"]))\n fo.write('set \"LIB={};%LIB%\"\\n'.format(env[\"LIBRARY_LIB\"]))\n if config.activate:\n fo.write('call \"{conda_root}\\\\activate.bat\" \"{prefix}\"\\n'.format(\n conda_root=root_script_dir,\n prefix=config.build_prefix))\n fo.write(\"REM ===== end generated header =====\\n\")\n fo.write(data)\n\n cmd = ['cmd.exe', '/c', 'bld.bat']\n _check_call(cmd, cwd=src_dir)\n", "path": "conda_build/windows.py"}], "after_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\nfrom os.path import isdir, join, dirname, isfile\n\n# importing setuptools patches distutils so that it knows how to find VC for python 2.7\nimport setuptools # noqa\n# Leverage the hard work done by setuptools/distutils to find vcvarsall using\n# either the registry or the VS**COMNTOOLS environment variable\nfrom distutils.msvc9compiler import find_vcvarsall as distutils_find_vcvarsall\nfrom distutils.msvc9compiler import Reg, WINSDK_BASE\n\nfrom .conda_interface import bits\n\nfrom conda_build import environ\nfrom conda_build.utils import _check_call, root_script_dir, path_prepended, copy_into\n\n\nassert sys.platform == 'win32'\n\n\nVS_VERSION_STRING = {\n '8.0': 'Visual Studio 8 2005',\n '9.0': 'Visual Studio 9 2008',\n '10.0': 'Visual Studio 10 2010',\n '11.0': 'Visual Studio 11 2012',\n '12.0': 'Visual Studio 12 2013',\n '14.0': 'Visual Studio 14 2015'\n}\n\n\ndef fix_staged_scripts(scripts_dir):\n \"\"\"\n Fixes scripts which have been installed unix-style to have a .bat\n helper\n \"\"\"\n if not isdir(scripts_dir):\n return\n for fn in os.listdir(scripts_dir):\n # process all the extensionless files\n if not isfile(join(scripts_dir, fn)) or '.' in fn:\n continue\n\n with open(join(scripts_dir, fn)) as f:\n line = f.readline().lower()\n # If it's a #!python script\n if not (line.startswith('#!') and 'python' in line.lower()):\n continue\n print('Adjusting unix-style #! script %s, '\n 'and adding a .bat file for it' % fn)\n # copy it with a .py extension (skipping that first #! line)\n with open(join(scripts_dir, fn + '-script.py'), 'w') as fo:\n fo.write(f.read())\n # now create the .exe file\n copy_into(join(dirname(__file__), 'cli-%d.exe' % bits),\n join(scripts_dir, fn + '.exe'))\n\n # remove the original script\n os.remove(join(scripts_dir, fn))\n\n\ndef build_vcvarsall_vs_path(version):\n \"\"\"\n Given the Visual Studio version, returns the default path to the\n Microsoft Visual Studio vcvarsall.bat file.\n Expected versions are of the form {9.0, 10.0, 12.0, 14.0}\n \"\"\"\n # Set up a load of paths that can be imported from the tests\n if 'ProgramFiles(x86)' in os.environ:\n PROGRAM_FILES_PATH = os.environ['ProgramFiles(x86)']\n else:\n PROGRAM_FILES_PATH = os.environ['ProgramFiles']\n\n flatversion = str(version).replace('.', '')\n vstools = \"VS{0}COMNTOOLS\".format(flatversion)\n\n if vstools in os.environ:\n return os.path.join(os.environ[vstools], '..\\\\..\\\\VC\\\\vcvarsall.bat')\n else:\n # prefer looking at env var; fall back to program files defaults\n return os.path.join(PROGRAM_FILES_PATH,\n 'Microsoft Visual Studio {}'.format(version), 'VC',\n 'vcvarsall.bat')\n\n\ndef msvc_env_cmd(bits, config, override=None):\n arch_selector = 'x86' if bits == 32 else 'amd64'\n\n msvc_env_lines = []\n\n version = None\n if override is not None:\n version = override\n\n # The DISTUTILS_USE_SDK variable tells distutils to not try and validate\n # the MSVC compiler. For < 3.5 this still forcibly looks for 'cl.exe'.\n # For > 3.5 it literally just skips the validation logic.\n # See distutils _msvccompiler.py and msvc9compiler.py / msvccompiler.py\n # for more information.\n msvc_env_lines.append('set DISTUTILS_USE_SDK=1')\n # This is also required to hit the 'don't validate' logic on < 3.5.\n # For > 3.5 this is ignored.\n msvc_env_lines.append('set MSSdk=1')\n\n if not version:\n if config.PY3K and config.use_MSVC2015:\n version = '14.0'\n elif config.PY3K:\n version = '10.0'\n else:\n version = '9.0'\n\n if float(version) >= 14.0:\n # For Python 3.5+, ensure that we link with the dynamic runtime. See\n # http://stevedower.id.au/blog/building-for-python-3-5-part-two/ for more info\n msvc_env_lines.append('set PY_VCRUNTIME_REDIST=%LIBRARY_BIN%\\\\vcruntime{0}.dll'.format(\n version.replace('.', '')))\n\n vcvarsall_vs_path = build_vcvarsall_vs_path(version)\n\n def build_vcvarsall_cmd(cmd, arch=arch_selector):\n # Default argument `arch_selector` is defined above\n return 'call \"{cmd}\" {arch}'.format(cmd=cmd, arch=arch)\n\n msvc_env_lines.append('set \"VS_VERSION={}\"'.format(version))\n msvc_env_lines.append('set \"VS_MAJOR={}\"'.format(version.split('.')[0]))\n msvc_env_lines.append('set \"VS_YEAR={}\"'.format(VS_VERSION_STRING[version][-4:]))\n msvc_env_lines.append('set \"CMAKE_GENERATOR={}\"'.format(VS_VERSION_STRING[version] +\n {64: ' Win64', 32: ''}[bits]))\n # tell msys2 to ignore path conversions for issue-causing windows-style flags in build\n # See https://github.com/conda-forge/icu-feedstock/pull/5\n msvc_env_lines.append('set \"MSYS2_ARG_CONV_EXCL=/AI;/AL;/OUT;/out;%MSYS2_ARG_CONV_EXCL%\"')\n msvc_env_lines.append('set \"MSYS2_ENV_CONV_EXCL=CL\"')\n if version == '10.0':\n try:\n WIN_SDK_71_PATH = Reg.get_value(os.path.join(WINSDK_BASE, 'v7.1'),\n 'installationfolder')\n WIN_SDK_71_BAT_PATH = os.path.join(WIN_SDK_71_PATH, 'Bin', 'SetEnv.cmd')\n\n win_sdk_arch = '/Release /x86' if bits == 32 else '/Release /x64'\n win_sdk_cmd = build_vcvarsall_cmd(WIN_SDK_71_BAT_PATH, arch=win_sdk_arch)\n\n # There are two methods of building Python 3.3 and 3.4 extensions (both\n # of which required Visual Studio 2010 - as explained in the Python wiki\n # https://wiki.python.org/moin/WindowsCompilers)\n # 1) Use the Windows SDK 7.1\n # 2) Use Visual Studio 2010 (any edition)\n # However, VS2010 never shipped with a 64-bit compiler, so in this case\n # **only** option (1) applies. For this reason, we always try and\n # activate the Windows SDK first. Unfortunately, unsuccessfully setting\n # up the environment does **not EXIT 1** and therefore we must fall\n # back to attempting to set up VS2010.\n # DelayedExpansion is required for the SetEnv.cmd\n msvc_env_lines.append('Setlocal EnableDelayedExpansion')\n msvc_env_lines.append(win_sdk_cmd)\n # If the WindowsSDKDir environment variable has not been successfully\n # set then try activating VS2010\n msvc_env_lines.append('if not \"%WindowsSDKDir%\" == \"{}\" ( {} )'.format(\n WIN_SDK_71_PATH, build_vcvarsall_cmd(vcvarsall_vs_path)))\n # sdk is not installed. Fall back to only trying VS 2010\n except KeyError:\n msvc_env_lines.append(build_vcvarsall_cmd(vcvarsall_vs_path))\n elif version == '9.0':\n # Get the Visual Studio 2008 path (not the Visual C++ for Python path)\n # and get the 'vcvars64.bat' from inside the bin (in the directory above\n # that returned by distutils_find_vcvarsall)\n try:\n VCVARS64_VS9_BAT_PATH = os.path.join(os.path.dirname(distutils_find_vcvarsall(9)),\n 'bin', 'vcvars64.bat')\n # there's an exception if VS or the VC compiler for python are not actually installed.\n except (KeyError, TypeError):\n VCVARS64_VS9_BAT_PATH = None\n\n error1 = 'if errorlevel 1 {}'\n\n # Prefer VS9 proper over Microsoft Visual C++ Compiler for Python 2.7\n msvc_env_lines.append(build_vcvarsall_cmd(vcvarsall_vs_path))\n # The Visual Studio 2008 Express edition does not properly contain\n # the amd64 build files, so we call the vcvars64.bat manually,\n # rather than using the vcvarsall.bat which would try and call the\n # missing bat file.\n if arch_selector == 'amd64' and VCVARS64_VS9_BAT_PATH:\n msvc_env_lines.append(error1.format(\n build_vcvarsall_cmd(VCVARS64_VS9_BAT_PATH)))\n # Otherwise, fall back to icrosoft Visual C++ Compiler for Python 2.7+\n # by using the logic provided by setuptools\n msvc_env_lines.append(error1.format(\n build_vcvarsall_cmd(distutils_find_vcvarsall(9))))\n else:\n # Visual Studio 14 or otherwise\n msvc_env_lines.append(build_vcvarsall_cmd(vcvarsall_vs_path))\n\n return '\\n'.join(msvc_env_lines) + '\\n'\n\n\ndef build(m, bld_bat, config):\n with path_prepended(config.build_prefix):\n env = environ.get_dict(config=config, m=m)\n env[\"CONDA_BUILD_STATE\"] = \"BUILD\"\n\n for name in 'BIN', 'INC', 'LIB':\n path = env['LIBRARY_' + name]\n if not isdir(path):\n os.makedirs(path)\n\n src_dir = config.work_dir\n if os.path.isfile(bld_bat):\n with open(bld_bat) as fi:\n data = fi.read()\n with open(join(src_dir, 'bld.bat'), 'w') as fo:\n # more debuggable with echo on\n fo.write('@echo on\\n')\n for key, value in env.items():\n fo.write('set \"{key}={value}\"\\n'.format(key=key, value=value))\n fo.write(msvc_env_cmd(bits=bits, config=config,\n override=m.get_value('build/msvc_compiler', None)))\n # Reset echo on, because MSVC scripts might have turned it off\n fo.write('@echo on\\n')\n fo.write('set \"INCLUDE={};%INCLUDE%\"\\n'.format(env[\"LIBRARY_INC\"]))\n fo.write('set \"LIB={};%LIB%\"\\n'.format(env[\"LIBRARY_LIB\"]))\n if config.activate:\n fo.write('call \"{conda_root}\\\\activate.bat\" \"{prefix}\"\\n'.format(\n conda_root=root_script_dir,\n prefix=config.build_prefix))\n fo.write(\"REM ===== end generated header =====\\n\")\n fo.write(data)\n\n cmd = ['cmd.exe', '/c', 'bld.bat']\n _check_call(cmd, cwd=src_dir)\n\n fix_staged_scripts(join(config.build_prefix, 'Scripts'))\n", "path": "conda_build/windows.py"}]} | 3,268 | 562 |
gh_patches_debug_36797 | rasdani/github-patches | git_diff | docker__docker-py-1812 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
client.networks.containers() is always empty
```
>>> import docker
>>> d = docker.from_env()
>>> [n.name for n in d.networks.list()]
['pedlintegrationtests_default', 'bridge', 'host', 'none']
>>> [n.containers for n in d.networks.list()]
[[], [], [], []]
```
```
$ docker network inspect pedlintegrationtests_default | jq '.[0].Containers | length'
4
```
```
$ pip freeze | grep docker && python --version && docker version
docker==2.5.1
docker-compose==1.16.1
docker-pycreds==0.2.1
dockerpty==0.4.1
nvidia-docker-compose==0.1.4
Python 3.5.2
Client:
Version: 17.09.0-ce
API version: 1.32
Go version: go1.8.3
Git commit: afdb6d4
Built: Tue Sep 26 22:42:18 2017
OS/Arch: linux/amd64
Server:
Version: 17.09.0-ce
API version: 1.32 (minimum version 1.12)
Go version: go1.8.3
Git commit: afdb6d4
Built: Tue Sep 26 22:40:56 2017
OS/Arch: linux/amd64
Experimental: false
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/models/networks.py`
Content:
```
1 from ..api import APIClient
2 from .containers import Container
3 from .resource import Model, Collection
4
5
6 class Network(Model):
7 """
8 A Docker network.
9 """
10 @property
11 def name(self):
12 """
13 The name of the network.
14 """
15 return self.attrs.get('Name')
16
17 @property
18 def containers(self):
19 """
20 The containers that are connected to the network, as a list of
21 :py:class:`~docker.models.containers.Container` objects.
22 """
23 return [
24 self.client.containers.get(cid) for cid in
25 (self.attrs.get('Containers') or {}).keys()
26 ]
27
28 def connect(self, container, *args, **kwargs):
29 """
30 Connect a container to this network.
31
32 Args:
33 container (str): Container to connect to this network, as either
34 an ID, name, or :py:class:`~docker.models.containers.Container`
35 object.
36 aliases (:py:class:`list`): A list of aliases for this endpoint.
37 Names in that list can be used within the network to reach the
38 container. Defaults to ``None``.
39 links (:py:class:`list`): A list of links for this endpoint.
40 Containers declared in this list will be linkedto this
41 container. Defaults to ``None``.
42 ipv4_address (str): The IP address of this container on the
43 network, using the IPv4 protocol. Defaults to ``None``.
44 ipv6_address (str): The IP address of this container on the
45 network, using the IPv6 protocol. Defaults to ``None``.
46 link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6)
47 addresses.
48
49 Raises:
50 :py:class:`docker.errors.APIError`
51 If the server returns an error.
52 """
53 if isinstance(container, Container):
54 container = container.id
55 return self.client.api.connect_container_to_network(
56 container, self.id, *args, **kwargs
57 )
58
59 def disconnect(self, container, *args, **kwargs):
60 """
61 Disconnect a container from this network.
62
63 Args:
64 container (str): Container to disconnect from this network, as
65 either an ID, name, or
66 :py:class:`~docker.models.containers.Container` object.
67 force (bool): Force the container to disconnect from a network.
68 Default: ``False``
69
70 Raises:
71 :py:class:`docker.errors.APIError`
72 If the server returns an error.
73 """
74 if isinstance(container, Container):
75 container = container.id
76 return self.client.api.disconnect_container_from_network(
77 container, self.id, *args, **kwargs
78 )
79
80 def remove(self):
81 """
82 Remove this network.
83
84 Raises:
85 :py:class:`docker.errors.APIError`
86 If the server returns an error.
87 """
88 return self.client.api.remove_network(self.id)
89
90
91 class NetworkCollection(Collection):
92 """
93 Networks on the Docker server.
94 """
95 model = Network
96
97 def create(self, name, *args, **kwargs):
98 """
99 Create a network. Similar to the ``docker network create``.
100
101 Args:
102 name (str): Name of the network
103 driver (str): Name of the driver used to create the network
104 options (dict): Driver options as a key-value dictionary
105 ipam (IPAMConfig): Optional custom IP scheme for the network.
106 check_duplicate (bool): Request daemon to check for networks with
107 same name. Default: ``None``.
108 internal (bool): Restrict external access to the network. Default
109 ``False``.
110 labels (dict): Map of labels to set on the network. Default
111 ``None``.
112 enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``.
113 attachable (bool): If enabled, and the network is in the global
114 scope, non-service containers on worker nodes will be able to
115 connect to the network.
116 scope (str): Specify the network's scope (``local``, ``global`` or
117 ``swarm``)
118 ingress (bool): If set, create an ingress network which provides
119 the routing-mesh in swarm mode.
120
121 Returns:
122 (:py:class:`Network`): The network that was created.
123
124 Raises:
125 :py:class:`docker.errors.APIError`
126 If the server returns an error.
127
128 Example:
129 A network using the bridge driver:
130
131 >>> client.networks.create("network1", driver="bridge")
132
133 You can also create more advanced networks with custom IPAM
134 configurations. For example, setting the subnet to
135 ``192.168.52.0/24`` and gateway address to ``192.168.52.254``.
136
137 .. code-block:: python
138
139 >>> ipam_pool = docker.types.IPAMPool(
140 subnet='192.168.52.0/24',
141 gateway='192.168.52.254'
142 )
143 >>> ipam_config = docker.types.IPAMConfig(
144 pool_configs=[ipam_pool]
145 )
146 >>> client.networks.create(
147 "network1",
148 driver="bridge",
149 ipam=ipam_config
150 )
151
152 """
153 resp = self.client.api.create_network(name, *args, **kwargs)
154 return self.get(resp['Id'])
155
156 def get(self, network_id):
157 """
158 Get a network by its ID.
159
160 Args:
161 network_id (str): The ID of the network.
162 verbose (bool): Retrieve the service details across the cluster in
163 swarm mode.
164 scope (str): Filter the network by scope (``swarm``, ``global``
165 or ``local``).
166
167 Returns:
168 (:py:class:`Network`) The network.
169
170 Raises:
171 :py:class:`docker.errors.NotFound`
172 If the network does not exist.
173
174 :py:class:`docker.errors.APIError`
175 If the server returns an error.
176
177 """
178 return self.prepare_model(self.client.api.inspect_network(network_id))
179
180 def list(self, *args, **kwargs):
181 """
182 List networks. Similar to the ``docker networks ls`` command.
183
184 Args:
185 names (:py:class:`list`): List of names to filter by.
186 ids (:py:class:`list`): List of ids to filter by.
187
188 Returns:
189 (list of :py:class:`Network`) The networks on the server.
190
191 Raises:
192 :py:class:`docker.errors.APIError`
193 If the server returns an error.
194 """
195 resp = self.client.api.networks(*args, **kwargs)
196 return [self.prepare_model(item) for item in resp]
197
198 def prune(self, filters=None):
199 self.client.api.prune_networks(filters=filters)
200 prune.__doc__ = APIClient.prune_networks.__doc__
201
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docker/models/networks.py b/docker/models/networks.py
--- a/docker/models/networks.py
+++ b/docker/models/networks.py
@@ -1,4 +1,5 @@
from ..api import APIClient
+from ..utils import version_gte
from .containers import Container
from .resource import Model, Collection
@@ -153,7 +154,7 @@
resp = self.client.api.create_network(name, *args, **kwargs)
return self.get(resp['Id'])
- def get(self, network_id):
+ def get(self, network_id, *args, **kwargs):
"""
Get a network by its ID.
@@ -175,7 +176,9 @@
If the server returns an error.
"""
- return self.prepare_model(self.client.api.inspect_network(network_id))
+ return self.prepare_model(
+ self.client.api.inspect_network(network_id, *args, **kwargs)
+ )
def list(self, *args, **kwargs):
"""
@@ -184,6 +187,13 @@
Args:
names (:py:class:`list`): List of names to filter by.
ids (:py:class:`list`): List of ids to filter by.
+ filters (dict): Filters to be processed on the network list.
+ Available filters:
+ - ``driver=[<driver-name>]`` Matches a network's driver.
+ - ``label=[<key>]`` or ``label=[<key>=<value>]``.
+ - ``type=["custom"|"builtin"]`` Filters networks by type.
+ greedy (bool): Fetch more details for each network individually.
+ You might want this to get the containers attached to them.
Returns:
(list of :py:class:`Network`) The networks on the server.
@@ -192,8 +202,13 @@
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
+ greedy = kwargs.pop('greedy', False)
resp = self.client.api.networks(*args, **kwargs)
- return [self.prepare_model(item) for item in resp]
+ networks = [self.prepare_model(item) for item in resp]
+ if greedy and version_gte(self.client.api._version, '1.28'):
+ for net in networks:
+ net.reload()
+ return networks
def prune(self, filters=None):
self.client.api.prune_networks(filters=filters)
| {"golden_diff": "diff --git a/docker/models/networks.py b/docker/models/networks.py\n--- a/docker/models/networks.py\n+++ b/docker/models/networks.py\n@@ -1,4 +1,5 @@\n from ..api import APIClient\n+from ..utils import version_gte\n from .containers import Container\n from .resource import Model, Collection\n \n@@ -153,7 +154,7 @@\n resp = self.client.api.create_network(name, *args, **kwargs)\n return self.get(resp['Id'])\n \n- def get(self, network_id):\n+ def get(self, network_id, *args, **kwargs):\n \"\"\"\n Get a network by its ID.\n \n@@ -175,7 +176,9 @@\n If the server returns an error.\n \n \"\"\"\n- return self.prepare_model(self.client.api.inspect_network(network_id))\n+ return self.prepare_model(\n+ self.client.api.inspect_network(network_id, *args, **kwargs)\n+ )\n \n def list(self, *args, **kwargs):\n \"\"\"\n@@ -184,6 +187,13 @@\n Args:\n names (:py:class:`list`): List of names to filter by.\n ids (:py:class:`list`): List of ids to filter by.\n+ filters (dict): Filters to be processed on the network list.\n+ Available filters:\n+ - ``driver=[<driver-name>]`` Matches a network's driver.\n+ - ``label=[<key>]`` or ``label=[<key>=<value>]``.\n+ - ``type=[\"custom\"|\"builtin\"]`` Filters networks by type.\n+ greedy (bool): Fetch more details for each network individually.\n+ You might want this to get the containers attached to them.\n \n Returns:\n (list of :py:class:`Network`) The networks on the server.\n@@ -192,8 +202,13 @@\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n+ greedy = kwargs.pop('greedy', False)\n resp = self.client.api.networks(*args, **kwargs)\n- return [self.prepare_model(item) for item in resp]\n+ networks = [self.prepare_model(item) for item in resp]\n+ if greedy and version_gte(self.client.api._version, '1.28'):\n+ for net in networks:\n+ net.reload()\n+ return networks\n \n def prune(self, filters=None):\n self.client.api.prune_networks(filters=filters)\n", "issue": "client.networks.containers() is always empty\n```\r\n>>> import docker\r\n>>> d = docker.from_env()\r\n>>> [n.name for n in d.networks.list()]\r\n['pedlintegrationtests_default', 'bridge', 'host', 'none']\r\n>>> [n.containers for n in d.networks.list()]\r\n[[], [], [], []]\r\n```\r\n\r\n```\r\n$ docker network inspect pedlintegrationtests_default | jq '.[0].Containers | length'\r\n4\r\n```\r\n\r\n```\r\n$ pip freeze | grep docker && python --version && docker version\r\ndocker==2.5.1\r\ndocker-compose==1.16.1\r\ndocker-pycreds==0.2.1\r\ndockerpty==0.4.1\r\nnvidia-docker-compose==0.1.4\r\nPython 3.5.2\r\nClient:\r\n Version: 17.09.0-ce\r\n API version: 1.32\r\n Go version: go1.8.3\r\n Git commit: afdb6d4\r\n Built: Tue Sep 26 22:42:18 2017\r\n OS/Arch: linux/amd64\r\n\r\nServer:\r\n Version: 17.09.0-ce\r\n API version: 1.32 (minimum version 1.12)\r\n Go version: go1.8.3\r\n Git commit: afdb6d4\r\n Built: Tue Sep 26 22:40:56 2017\r\n OS/Arch: linux/amd64\r\n Experimental: false\r\n```\n", "before_files": [{"content": "from ..api import APIClient\nfrom .containers import Container\nfrom .resource import Model, Collection\n\n\nclass Network(Model):\n \"\"\"\n A Docker network.\n \"\"\"\n @property\n def name(self):\n \"\"\"\n The name of the network.\n \"\"\"\n return self.attrs.get('Name')\n\n @property\n def containers(self):\n \"\"\"\n The containers that are connected to the network, as a list of\n :py:class:`~docker.models.containers.Container` objects.\n \"\"\"\n return [\n self.client.containers.get(cid) for cid in\n (self.attrs.get('Containers') or {}).keys()\n ]\n\n def connect(self, container, *args, **kwargs):\n \"\"\"\n Connect a container to this network.\n\n Args:\n container (str): Container to connect to this network, as either\n an ID, name, or :py:class:`~docker.models.containers.Container`\n object.\n aliases (:py:class:`list`): A list of aliases for this endpoint.\n Names in that list can be used within the network to reach the\n container. Defaults to ``None``.\n links (:py:class:`list`): A list of links for this endpoint.\n Containers declared in this list will be linkedto this\n container. Defaults to ``None``.\n ipv4_address (str): The IP address of this container on the\n network, using the IPv4 protocol. Defaults to ``None``.\n ipv6_address (str): The IP address of this container on the\n network, using the IPv6 protocol. Defaults to ``None``.\n link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6)\n addresses.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n if isinstance(container, Container):\n container = container.id\n return self.client.api.connect_container_to_network(\n container, self.id, *args, **kwargs\n )\n\n def disconnect(self, container, *args, **kwargs):\n \"\"\"\n Disconnect a container from this network.\n\n Args:\n container (str): Container to disconnect from this network, as\n either an ID, name, or\n :py:class:`~docker.models.containers.Container` object.\n force (bool): Force the container to disconnect from a network.\n Default: ``False``\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n if isinstance(container, Container):\n container = container.id\n return self.client.api.disconnect_container_from_network(\n container, self.id, *args, **kwargs\n )\n\n def remove(self):\n \"\"\"\n Remove this network.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return self.client.api.remove_network(self.id)\n\n\nclass NetworkCollection(Collection):\n \"\"\"\n Networks on the Docker server.\n \"\"\"\n model = Network\n\n def create(self, name, *args, **kwargs):\n \"\"\"\n Create a network. Similar to the ``docker network create``.\n\n Args:\n name (str): Name of the network\n driver (str): Name of the driver used to create the network\n options (dict): Driver options as a key-value dictionary\n ipam (IPAMConfig): Optional custom IP scheme for the network.\n check_duplicate (bool): Request daemon to check for networks with\n same name. Default: ``None``.\n internal (bool): Restrict external access to the network. Default\n ``False``.\n labels (dict): Map of labels to set on the network. Default\n ``None``.\n enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``.\n attachable (bool): If enabled, and the network is in the global\n scope, non-service containers on worker nodes will be able to\n connect to the network.\n scope (str): Specify the network's scope (``local``, ``global`` or\n ``swarm``)\n ingress (bool): If set, create an ingress network which provides\n the routing-mesh in swarm mode.\n\n Returns:\n (:py:class:`Network`): The network that was created.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n Example:\n A network using the bridge driver:\n\n >>> client.networks.create(\"network1\", driver=\"bridge\")\n\n You can also create more advanced networks with custom IPAM\n configurations. For example, setting the subnet to\n ``192.168.52.0/24`` and gateway address to ``192.168.52.254``.\n\n .. code-block:: python\n\n >>> ipam_pool = docker.types.IPAMPool(\n subnet='192.168.52.0/24',\n gateway='192.168.52.254'\n )\n >>> ipam_config = docker.types.IPAMConfig(\n pool_configs=[ipam_pool]\n )\n >>> client.networks.create(\n \"network1\",\n driver=\"bridge\",\n ipam=ipam_config\n )\n\n \"\"\"\n resp = self.client.api.create_network(name, *args, **kwargs)\n return self.get(resp['Id'])\n\n def get(self, network_id):\n \"\"\"\n Get a network by its ID.\n\n Args:\n network_id (str): The ID of the network.\n verbose (bool): Retrieve the service details across the cluster in\n swarm mode.\n scope (str): Filter the network by scope (``swarm``, ``global``\n or ``local``).\n\n Returns:\n (:py:class:`Network`) The network.\n\n Raises:\n :py:class:`docker.errors.NotFound`\n If the network does not exist.\n\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n \"\"\"\n return self.prepare_model(self.client.api.inspect_network(network_id))\n\n def list(self, *args, **kwargs):\n \"\"\"\n List networks. Similar to the ``docker networks ls`` command.\n\n Args:\n names (:py:class:`list`): List of names to filter by.\n ids (:py:class:`list`): List of ids to filter by.\n\n Returns:\n (list of :py:class:`Network`) The networks on the server.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n resp = self.client.api.networks(*args, **kwargs)\n return [self.prepare_model(item) for item in resp]\n\n def prune(self, filters=None):\n self.client.api.prune_networks(filters=filters)\n prune.__doc__ = APIClient.prune_networks.__doc__\n", "path": "docker/models/networks.py"}], "after_files": [{"content": "from ..api import APIClient\nfrom ..utils import version_gte\nfrom .containers import Container\nfrom .resource import Model, Collection\n\n\nclass Network(Model):\n \"\"\"\n A Docker network.\n \"\"\"\n @property\n def name(self):\n \"\"\"\n The name of the network.\n \"\"\"\n return self.attrs.get('Name')\n\n @property\n def containers(self):\n \"\"\"\n The containers that are connected to the network, as a list of\n :py:class:`~docker.models.containers.Container` objects.\n \"\"\"\n return [\n self.client.containers.get(cid) for cid in\n (self.attrs.get('Containers') or {}).keys()\n ]\n\n def connect(self, container, *args, **kwargs):\n \"\"\"\n Connect a container to this network.\n\n Args:\n container (str): Container to connect to this network, as either\n an ID, name, or :py:class:`~docker.models.containers.Container`\n object.\n aliases (:py:class:`list`): A list of aliases for this endpoint.\n Names in that list can be used within the network to reach the\n container. Defaults to ``None``.\n links (:py:class:`list`): A list of links for this endpoint.\n Containers declared in this list will be linkedto this\n container. Defaults to ``None``.\n ipv4_address (str): The IP address of this container on the\n network, using the IPv4 protocol. Defaults to ``None``.\n ipv6_address (str): The IP address of this container on the\n network, using the IPv6 protocol. Defaults to ``None``.\n link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6)\n addresses.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n if isinstance(container, Container):\n container = container.id\n return self.client.api.connect_container_to_network(\n container, self.id, *args, **kwargs\n )\n\n def disconnect(self, container, *args, **kwargs):\n \"\"\"\n Disconnect a container from this network.\n\n Args:\n container (str): Container to disconnect from this network, as\n either an ID, name, or\n :py:class:`~docker.models.containers.Container` object.\n force (bool): Force the container to disconnect from a network.\n Default: ``False``\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n if isinstance(container, Container):\n container = container.id\n return self.client.api.disconnect_container_from_network(\n container, self.id, *args, **kwargs\n )\n\n def remove(self):\n \"\"\"\n Remove this network.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return self.client.api.remove_network(self.id)\n\n\nclass NetworkCollection(Collection):\n \"\"\"\n Networks on the Docker server.\n \"\"\"\n model = Network\n\n def create(self, name, *args, **kwargs):\n \"\"\"\n Create a network. Similar to the ``docker network create``.\n\n Args:\n name (str): Name of the network\n driver (str): Name of the driver used to create the network\n options (dict): Driver options as a key-value dictionary\n ipam (IPAMConfig): Optional custom IP scheme for the network.\n check_duplicate (bool): Request daemon to check for networks with\n same name. Default: ``None``.\n internal (bool): Restrict external access to the network. Default\n ``False``.\n labels (dict): Map of labels to set on the network. Default\n ``None``.\n enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``.\n attachable (bool): If enabled, and the network is in the global\n scope, non-service containers on worker nodes will be able to\n connect to the network.\n scope (str): Specify the network's scope (``local``, ``global`` or\n ``swarm``)\n ingress (bool): If set, create an ingress network which provides\n the routing-mesh in swarm mode.\n\n Returns:\n (:py:class:`Network`): The network that was created.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n Example:\n A network using the bridge driver:\n\n >>> client.networks.create(\"network1\", driver=\"bridge\")\n\n You can also create more advanced networks with custom IPAM\n configurations. For example, setting the subnet to\n ``192.168.52.0/24`` and gateway address to ``192.168.52.254``.\n\n .. code-block:: python\n\n >>> ipam_pool = docker.types.IPAMPool(\n subnet='192.168.52.0/24',\n gateway='192.168.52.254'\n )\n >>> ipam_config = docker.types.IPAMConfig(\n pool_configs=[ipam_pool]\n )\n >>> client.networks.create(\n \"network1\",\n driver=\"bridge\",\n ipam=ipam_config\n )\n\n \"\"\"\n resp = self.client.api.create_network(name, *args, **kwargs)\n return self.get(resp['Id'])\n\n def get(self, network_id, *args, **kwargs):\n \"\"\"\n Get a network by its ID.\n\n Args:\n network_id (str): The ID of the network.\n verbose (bool): Retrieve the service details across the cluster in\n swarm mode.\n scope (str): Filter the network by scope (``swarm``, ``global``\n or ``local``).\n\n Returns:\n (:py:class:`Network`) The network.\n\n Raises:\n :py:class:`docker.errors.NotFound`\n If the network does not exist.\n\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n \"\"\"\n return self.prepare_model(\n self.client.api.inspect_network(network_id, *args, **kwargs)\n )\n\n def list(self, *args, **kwargs):\n \"\"\"\n List networks. Similar to the ``docker networks ls`` command.\n\n Args:\n names (:py:class:`list`): List of names to filter by.\n ids (:py:class:`list`): List of ids to filter by.\n filters (dict): Filters to be processed on the network list.\n Available filters:\n - ``driver=[<driver-name>]`` Matches a network's driver.\n - ``label=[<key>]`` or ``label=[<key>=<value>]``.\n - ``type=[\"custom\"|\"builtin\"]`` Filters networks by type.\n greedy (bool): Fetch more details for each network individually.\n You might want this to get the containers attached to them.\n\n Returns:\n (list of :py:class:`Network`) The networks on the server.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n greedy = kwargs.pop('greedy', False)\n resp = self.client.api.networks(*args, **kwargs)\n networks = [self.prepare_model(item) for item in resp]\n if greedy and version_gte(self.client.api._version, '1.28'):\n for net in networks:\n net.reload()\n return networks\n\n def prune(self, filters=None):\n self.client.api.prune_networks(filters=filters)\n prune.__doc__ = APIClient.prune_networks.__doc__\n", "path": "docker/models/networks.py"}]} | 2,586 | 546 |
gh_patches_debug_18388 | rasdani/github-patches | git_diff | pypa__pip-3399 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
RuntimeWarning: Config variable 'Py_DEBUG' is unset
When I run pip (any argument) I get the following message:
```
c:\Program Files\Python34\Scripts>pip --version
c:\progra~1\python34\lib\site-packages\pip\pep425tags.py:89: RuntimeWarning: Config variable 'Py_DEBUG' is unset, Python ABI tag may be incorrect
warn=(impl == 'cp')):
c:\progra~1\python34\lib\site-packages\pip\pep425tags.py:93: RuntimeWarning: Config variable 'WITH_PYMALLOC' is unset, Python ABI tag may be incorrect
warn=(impl == 'cp')):
pip 8.0.0 from c:\progra~1\python34\lib\site-packages (python 3.4)
```
I think something is wrong.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pip/pep425tags.py`
Content:
```
1 """Generate and work with PEP 425 Compatibility Tags."""
2 from __future__ import absolute_import
3
4 import re
5 import sys
6 import warnings
7 import platform
8
9 try:
10 import sysconfig
11 except ImportError: # pragma nocover
12 # Python < 2.7
13 import distutils.sysconfig as sysconfig
14 import distutils.util
15
16 _osx_arch_pat = re.compile(r'(.+)_(\d+)_(\d+)_(.+)')
17
18
19 def get_config_var(var):
20 try:
21 return sysconfig.get_config_var(var)
22 except IOError as e: # Issue #1074
23 warnings.warn("{0}".format(e), RuntimeWarning)
24 return None
25
26
27 def get_abbr_impl():
28 """Return abbreviated implementation name."""
29 if hasattr(sys, 'pypy_version_info'):
30 pyimpl = 'pp'
31 elif sys.platform.startswith('java'):
32 pyimpl = 'jy'
33 elif sys.platform == 'cli':
34 pyimpl = 'ip'
35 else:
36 pyimpl = 'cp'
37 return pyimpl
38
39
40 def get_impl_ver():
41 """Return implementation version."""
42 impl_ver = get_config_var("py_version_nodot")
43 if not impl_ver or get_abbr_impl() == 'pp':
44 impl_ver = ''.join(map(str, get_impl_version_info()))
45 return impl_ver
46
47
48 def get_impl_version_info():
49 """Return sys.version_info-like tuple for use in decrementing the minor
50 version."""
51 if get_abbr_impl() == 'pp':
52 # as per https://github.com/pypa/pip/issues/2882
53 return (sys.version_info[0], sys.pypy_version_info.major,
54 sys.pypy_version_info.minor)
55 else:
56 return sys.version_info[0], sys.version_info[1]
57
58
59 def get_impl_tag():
60 """
61 Returns the Tag for this specific implementation.
62 """
63 return "{0}{1}".format(get_abbr_impl(), get_impl_ver())
64
65
66 def get_flag(var, fallback, expected=True, warn=True):
67 """Use a fallback method for determining SOABI flags if the needed config
68 var is unset or unavailable."""
69 val = get_config_var(var)
70 if val is None:
71 if warn:
72 warnings.warn("Config variable '{0}' is unset, Python ABI tag may "
73 "be incorrect".format(var), RuntimeWarning, 2)
74 return fallback()
75 return val == expected
76
77
78 def get_abi_tag():
79 """Return the ABI tag based on SOABI (if available) or emulate SOABI
80 (CPython 2, PyPy)."""
81 soabi = get_config_var('SOABI')
82 impl = get_abbr_impl()
83 if not soabi and impl in ('cp', 'pp') and hasattr(sys, 'maxunicode'):
84 d = ''
85 m = ''
86 u = ''
87 if get_flag('Py_DEBUG',
88 lambda: hasattr(sys, 'gettotalrefcount'),
89 warn=(impl == 'cp')):
90 d = 'd'
91 if get_flag('WITH_PYMALLOC',
92 lambda: impl == 'cp',
93 warn=(impl == 'cp')):
94 m = 'm'
95 if get_flag('Py_UNICODE_SIZE',
96 lambda: sys.maxunicode == 0x10ffff,
97 expected=4,
98 warn=(impl == 'cp' and
99 sys.version_info < (3, 3))) \
100 and sys.version_info < (3, 3):
101 u = 'u'
102 abi = '%s%s%s%s%s' % (impl, get_impl_ver(), d, m, u)
103 elif soabi and soabi.startswith('cpython-'):
104 abi = 'cp' + soabi.split('-')[1]
105 elif soabi:
106 abi = soabi.replace('.', '_').replace('-', '_')
107 else:
108 abi = None
109 return abi
110
111
112 def get_platform():
113 """Return our platform name 'win32', 'linux_x86_64'"""
114 if sys.platform == 'darwin':
115 # distutils.util.get_platform() returns the release based on the value
116 # of MACOSX_DEPLOYMENT_TARGET on which Python was built, which may
117 # be signficantly older than the user's current machine.
118 release, _, machine = platform.mac_ver()
119 split_ver = release.split('.')
120 return 'macosx_{0}_{1}_{2}'.format(split_ver[0], split_ver[1], machine)
121 # XXX remove distutils dependency
122 return distutils.util.get_platform().replace('.', '_').replace('-', '_')
123
124
125 def get_supported(versions=None, noarch=False):
126 """Return a list of supported tags for each version specified in
127 `versions`.
128
129 :param versions: a list of string versions, of the form ["33", "32"],
130 or None. The first version will be assumed to support our ABI.
131 """
132 supported = []
133
134 # Versions must be given with respect to the preference
135 if versions is None:
136 versions = []
137 version_info = get_impl_version_info()
138 major = version_info[:-1]
139 # Support all previous minor Python versions.
140 for minor in range(version_info[-1], -1, -1):
141 versions.append(''.join(map(str, major + (minor,))))
142
143 impl = get_abbr_impl()
144
145 abis = []
146
147 abi = get_abi_tag()
148 if abi:
149 abis[0:0] = [abi]
150
151 abi3s = set()
152 import imp
153 for suffix in imp.get_suffixes():
154 if suffix[0].startswith('.abi'):
155 abi3s.add(suffix[0].split('.', 2)[1])
156
157 abis.extend(sorted(list(abi3s)))
158
159 abis.append('none')
160
161 if not noarch:
162 arch = get_platform()
163 if sys.platform == 'darwin':
164 # support macosx-10.6-intel on macosx-10.9-x86_64
165 match = _osx_arch_pat.match(arch)
166 if match:
167 name, major, minor, actual_arch = match.groups()
168 actual_arches = [actual_arch]
169 if actual_arch in ('i386', 'ppc'):
170 actual_arches.append('fat')
171 if actual_arch in ('i386', 'x86_64'):
172 actual_arches.append('intel')
173 if actual_arch in ('ppc64', 'x86_64'):
174 actual_arches.append('fat64')
175 if actual_arch in ('i386', 'ppc', 'x86_64'):
176 actual_arches.append('fat32')
177 if actual_arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'):
178 actual_arches.append('universal')
179 tpl = '{0}_{1}_%i_%s'.format(name, major)
180 arches = []
181 for m in reversed(range(int(minor) + 1)):
182 for a in actual_arches:
183 arches.append(tpl % (m, a))
184 else:
185 # arch pattern didn't match (?!)
186 arches = [arch]
187 else:
188 arches = [arch]
189
190 # Current version, current API (built specifically for our Python):
191 for abi in abis:
192 for arch in arches:
193 supported.append(('%s%s' % (impl, versions[0]), abi, arch))
194
195 # Has binaries, does not use the Python API:
196 supported.append(('py%s' % (versions[0][0]), 'none', arch))
197
198 # No abi / arch, but requires our implementation:
199 for i, version in enumerate(versions):
200 supported.append(('%s%s' % (impl, version), 'none', 'any'))
201 if i == 0:
202 # Tagged specifically as being cross-version compatible
203 # (with just the major version specified)
204 supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any'))
205
206 # No abi / arch, generic Python
207 for i, version in enumerate(versions):
208 supported.append(('py%s' % (version,), 'none', 'any'))
209 if i == 0:
210 supported.append(('py%s' % (version[0]), 'none', 'any'))
211
212 return supported
213
214 supported_tags = get_supported()
215 supported_tags_noarch = get_supported(noarch=True)
216
217 implementation_tag = get_impl_tag()
218
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pip/pep425tags.py b/pip/pep425tags.py
--- a/pip/pep425tags.py
+++ b/pip/pep425tags.py
@@ -5,6 +5,7 @@
import sys
import warnings
import platform
+import logging
try:
import sysconfig
@@ -13,6 +14,10 @@
import distutils.sysconfig as sysconfig
import distutils.util
+
+logger = logging.getLogger(__name__)
+
+
_osx_arch_pat = re.compile(r'(.+)_(\d+)_(\d+)_(.+)')
@@ -69,8 +74,8 @@
val = get_config_var(var)
if val is None:
if warn:
- warnings.warn("Config variable '{0}' is unset, Python ABI tag may "
- "be incorrect".format(var), RuntimeWarning, 2)
+ logger.debug("Config variable '%s' is unset, Python ABI tag may "
+ "be incorrect", var)
return fallback()
return val == expected
| {"golden_diff": "diff --git a/pip/pep425tags.py b/pip/pep425tags.py\n--- a/pip/pep425tags.py\n+++ b/pip/pep425tags.py\n@@ -5,6 +5,7 @@\n import sys\n import warnings\n import platform\n+import logging\n \n try:\n import sysconfig\n@@ -13,6 +14,10 @@\n import distutils.sysconfig as sysconfig\n import distutils.util\n \n+\n+logger = logging.getLogger(__name__)\n+\n+\n _osx_arch_pat = re.compile(r'(.+)_(\\d+)_(\\d+)_(.+)')\n \n \n@@ -69,8 +74,8 @@\n val = get_config_var(var)\n if val is None:\n if warn:\n- warnings.warn(\"Config variable '{0}' is unset, Python ABI tag may \"\n- \"be incorrect\".format(var), RuntimeWarning, 2)\n+ logger.debug(\"Config variable '%s' is unset, Python ABI tag may \"\n+ \"be incorrect\", var)\n return fallback()\n return val == expected\n", "issue": "RuntimeWarning: Config variable 'Py_DEBUG' is unset\nWhen I run pip (any argument) I get the following message:\n\n```\nc:\\Program Files\\Python34\\Scripts>pip --version\nc:\\progra~1\\python34\\lib\\site-packages\\pip\\pep425tags.py:89: RuntimeWarning: Config variable 'Py_DEBUG' is unset, Python ABI tag may be incorrect\n warn=(impl == 'cp')):\nc:\\progra~1\\python34\\lib\\site-packages\\pip\\pep425tags.py:93: RuntimeWarning: Config variable 'WITH_PYMALLOC' is unset, Python ABI tag may be incorrect\n warn=(impl == 'cp')):\npip 8.0.0 from c:\\progra~1\\python34\\lib\\site-packages (python 3.4)\n```\n\nI think something is wrong.\n\n", "before_files": [{"content": "\"\"\"Generate and work with PEP 425 Compatibility Tags.\"\"\"\nfrom __future__ import absolute_import\n\nimport re\nimport sys\nimport warnings\nimport platform\n\ntry:\n import sysconfig\nexcept ImportError: # pragma nocover\n # Python < 2.7\n import distutils.sysconfig as sysconfig\nimport distutils.util\n\n_osx_arch_pat = re.compile(r'(.+)_(\\d+)_(\\d+)_(.+)')\n\n\ndef get_config_var(var):\n try:\n return sysconfig.get_config_var(var)\n except IOError as e: # Issue #1074\n warnings.warn(\"{0}\".format(e), RuntimeWarning)\n return None\n\n\ndef get_abbr_impl():\n \"\"\"Return abbreviated implementation name.\"\"\"\n if hasattr(sys, 'pypy_version_info'):\n pyimpl = 'pp'\n elif sys.platform.startswith('java'):\n pyimpl = 'jy'\n elif sys.platform == 'cli':\n pyimpl = 'ip'\n else:\n pyimpl = 'cp'\n return pyimpl\n\n\ndef get_impl_ver():\n \"\"\"Return implementation version.\"\"\"\n impl_ver = get_config_var(\"py_version_nodot\")\n if not impl_ver or get_abbr_impl() == 'pp':\n impl_ver = ''.join(map(str, get_impl_version_info()))\n return impl_ver\n\n\ndef get_impl_version_info():\n \"\"\"Return sys.version_info-like tuple for use in decrementing the minor\n version.\"\"\"\n if get_abbr_impl() == 'pp':\n # as per https://github.com/pypa/pip/issues/2882\n return (sys.version_info[0], sys.pypy_version_info.major,\n sys.pypy_version_info.minor)\n else:\n return sys.version_info[0], sys.version_info[1]\n\n\ndef get_impl_tag():\n \"\"\"\n Returns the Tag for this specific implementation.\n \"\"\"\n return \"{0}{1}\".format(get_abbr_impl(), get_impl_ver())\n\n\ndef get_flag(var, fallback, expected=True, warn=True):\n \"\"\"Use a fallback method for determining SOABI flags if the needed config\n var is unset or unavailable.\"\"\"\n val = get_config_var(var)\n if val is None:\n if warn:\n warnings.warn(\"Config variable '{0}' is unset, Python ABI tag may \"\n \"be incorrect\".format(var), RuntimeWarning, 2)\n return fallback()\n return val == expected\n\n\ndef get_abi_tag():\n \"\"\"Return the ABI tag based on SOABI (if available) or emulate SOABI\n (CPython 2, PyPy).\"\"\"\n soabi = get_config_var('SOABI')\n impl = get_abbr_impl()\n if not soabi and impl in ('cp', 'pp') and hasattr(sys, 'maxunicode'):\n d = ''\n m = ''\n u = ''\n if get_flag('Py_DEBUG',\n lambda: hasattr(sys, 'gettotalrefcount'),\n warn=(impl == 'cp')):\n d = 'd'\n if get_flag('WITH_PYMALLOC',\n lambda: impl == 'cp',\n warn=(impl == 'cp')):\n m = 'm'\n if get_flag('Py_UNICODE_SIZE',\n lambda: sys.maxunicode == 0x10ffff,\n expected=4,\n warn=(impl == 'cp' and\n sys.version_info < (3, 3))) \\\n and sys.version_info < (3, 3):\n u = 'u'\n abi = '%s%s%s%s%s' % (impl, get_impl_ver(), d, m, u)\n elif soabi and soabi.startswith('cpython-'):\n abi = 'cp' + soabi.split('-')[1]\n elif soabi:\n abi = soabi.replace('.', '_').replace('-', '_')\n else:\n abi = None\n return abi\n\n\ndef get_platform():\n \"\"\"Return our platform name 'win32', 'linux_x86_64'\"\"\"\n if sys.platform == 'darwin':\n # distutils.util.get_platform() returns the release based on the value\n # of MACOSX_DEPLOYMENT_TARGET on which Python was built, which may\n # be signficantly older than the user's current machine.\n release, _, machine = platform.mac_ver()\n split_ver = release.split('.')\n return 'macosx_{0}_{1}_{2}'.format(split_ver[0], split_ver[1], machine)\n # XXX remove distutils dependency\n return distutils.util.get_platform().replace('.', '_').replace('-', '_')\n\n\ndef get_supported(versions=None, noarch=False):\n \"\"\"Return a list of supported tags for each version specified in\n `versions`.\n\n :param versions: a list of string versions, of the form [\"33\", \"32\"],\n or None. The first version will be assumed to support our ABI.\n \"\"\"\n supported = []\n\n # Versions must be given with respect to the preference\n if versions is None:\n versions = []\n version_info = get_impl_version_info()\n major = version_info[:-1]\n # Support all previous minor Python versions.\n for minor in range(version_info[-1], -1, -1):\n versions.append(''.join(map(str, major + (minor,))))\n\n impl = get_abbr_impl()\n\n abis = []\n\n abi = get_abi_tag()\n if abi:\n abis[0:0] = [abi]\n\n abi3s = set()\n import imp\n for suffix in imp.get_suffixes():\n if suffix[0].startswith('.abi'):\n abi3s.add(suffix[0].split('.', 2)[1])\n\n abis.extend(sorted(list(abi3s)))\n\n abis.append('none')\n\n if not noarch:\n arch = get_platform()\n if sys.platform == 'darwin':\n # support macosx-10.6-intel on macosx-10.9-x86_64\n match = _osx_arch_pat.match(arch)\n if match:\n name, major, minor, actual_arch = match.groups()\n actual_arches = [actual_arch]\n if actual_arch in ('i386', 'ppc'):\n actual_arches.append('fat')\n if actual_arch in ('i386', 'x86_64'):\n actual_arches.append('intel')\n if actual_arch in ('ppc64', 'x86_64'):\n actual_arches.append('fat64')\n if actual_arch in ('i386', 'ppc', 'x86_64'):\n actual_arches.append('fat32')\n if actual_arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'):\n actual_arches.append('universal')\n tpl = '{0}_{1}_%i_%s'.format(name, major)\n arches = []\n for m in reversed(range(int(minor) + 1)):\n for a in actual_arches:\n arches.append(tpl % (m, a))\n else:\n # arch pattern didn't match (?!)\n arches = [arch]\n else:\n arches = [arch]\n\n # Current version, current API (built specifically for our Python):\n for abi in abis:\n for arch in arches:\n supported.append(('%s%s' % (impl, versions[0]), abi, arch))\n\n # Has binaries, does not use the Python API:\n supported.append(('py%s' % (versions[0][0]), 'none', arch))\n\n # No abi / arch, but requires our implementation:\n for i, version in enumerate(versions):\n supported.append(('%s%s' % (impl, version), 'none', 'any'))\n if i == 0:\n # Tagged specifically as being cross-version compatible\n # (with just the major version specified)\n supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any'))\n\n # No abi / arch, generic Python\n for i, version in enumerate(versions):\n supported.append(('py%s' % (version,), 'none', 'any'))\n if i == 0:\n supported.append(('py%s' % (version[0]), 'none', 'any'))\n\n return supported\n\nsupported_tags = get_supported()\nsupported_tags_noarch = get_supported(noarch=True)\n\nimplementation_tag = get_impl_tag()\n", "path": "pip/pep425tags.py"}], "after_files": [{"content": "\"\"\"Generate and work with PEP 425 Compatibility Tags.\"\"\"\nfrom __future__ import absolute_import\n\nimport re\nimport sys\nimport warnings\nimport platform\nimport logging\n\ntry:\n import sysconfig\nexcept ImportError: # pragma nocover\n # Python < 2.7\n import distutils.sysconfig as sysconfig\nimport distutils.util\n\n\nlogger = logging.getLogger(__name__)\n\n\n_osx_arch_pat = re.compile(r'(.+)_(\\d+)_(\\d+)_(.+)')\n\n\ndef get_config_var(var):\n try:\n return sysconfig.get_config_var(var)\n except IOError as e: # Issue #1074\n warnings.warn(\"{0}\".format(e), RuntimeWarning)\n return None\n\n\ndef get_abbr_impl():\n \"\"\"Return abbreviated implementation name.\"\"\"\n if hasattr(sys, 'pypy_version_info'):\n pyimpl = 'pp'\n elif sys.platform.startswith('java'):\n pyimpl = 'jy'\n elif sys.platform == 'cli':\n pyimpl = 'ip'\n else:\n pyimpl = 'cp'\n return pyimpl\n\n\ndef get_impl_ver():\n \"\"\"Return implementation version.\"\"\"\n impl_ver = get_config_var(\"py_version_nodot\")\n if not impl_ver or get_abbr_impl() == 'pp':\n impl_ver = ''.join(map(str, get_impl_version_info()))\n return impl_ver\n\n\ndef get_impl_version_info():\n \"\"\"Return sys.version_info-like tuple for use in decrementing the minor\n version.\"\"\"\n if get_abbr_impl() == 'pp':\n # as per https://github.com/pypa/pip/issues/2882\n return (sys.version_info[0], sys.pypy_version_info.major,\n sys.pypy_version_info.minor)\n else:\n return sys.version_info[0], sys.version_info[1]\n\n\ndef get_impl_tag():\n \"\"\"\n Returns the Tag for this specific implementation.\n \"\"\"\n return \"{0}{1}\".format(get_abbr_impl(), get_impl_ver())\n\n\ndef get_flag(var, fallback, expected=True, warn=True):\n \"\"\"Use a fallback method for determining SOABI flags if the needed config\n var is unset or unavailable.\"\"\"\n val = get_config_var(var)\n if val is None:\n if warn:\n logger.debug(\"Config variable '%s' is unset, Python ABI tag may \"\n \"be incorrect\", var)\n return fallback()\n return val == expected\n\n\ndef get_abi_tag():\n \"\"\"Return the ABI tag based on SOABI (if available) or emulate SOABI\n (CPython 2, PyPy).\"\"\"\n soabi = get_config_var('SOABI')\n impl = get_abbr_impl()\n if not soabi and impl in ('cp', 'pp') and hasattr(sys, 'maxunicode'):\n d = ''\n m = ''\n u = ''\n if get_flag('Py_DEBUG',\n lambda: hasattr(sys, 'gettotalrefcount'),\n warn=(impl == 'cp')):\n d = 'd'\n if get_flag('WITH_PYMALLOC',\n lambda: impl == 'cp',\n warn=(impl == 'cp')):\n m = 'm'\n if get_flag('Py_UNICODE_SIZE',\n lambda: sys.maxunicode == 0x10ffff,\n expected=4,\n warn=(impl == 'cp' and\n sys.version_info < (3, 3))) \\\n and sys.version_info < (3, 3):\n u = 'u'\n abi = '%s%s%s%s%s' % (impl, get_impl_ver(), d, m, u)\n elif soabi and soabi.startswith('cpython-'):\n abi = 'cp' + soabi.split('-')[1]\n elif soabi:\n abi = soabi.replace('.', '_').replace('-', '_')\n else:\n abi = None\n return abi\n\n\ndef get_platform():\n \"\"\"Return our platform name 'win32', 'linux_x86_64'\"\"\"\n if sys.platform == 'darwin':\n # distutils.util.get_platform() returns the release based on the value\n # of MACOSX_DEPLOYMENT_TARGET on which Python was built, which may\n # be signficantly older than the user's current machine.\n release, _, machine = platform.mac_ver()\n split_ver = release.split('.')\n return 'macosx_{0}_{1}_{2}'.format(split_ver[0], split_ver[1], machine)\n # XXX remove distutils dependency\n return distutils.util.get_platform().replace('.', '_').replace('-', '_')\n\n\ndef get_supported(versions=None, noarch=False):\n \"\"\"Return a list of supported tags for each version specified in\n `versions`.\n\n :param versions: a list of string versions, of the form [\"33\", \"32\"],\n or None. The first version will be assumed to support our ABI.\n \"\"\"\n supported = []\n\n # Versions must be given with respect to the preference\n if versions is None:\n versions = []\n version_info = get_impl_version_info()\n major = version_info[:-1]\n # Support all previous minor Python versions.\n for minor in range(version_info[-1], -1, -1):\n versions.append(''.join(map(str, major + (minor,))))\n\n impl = get_abbr_impl()\n\n abis = []\n\n abi = get_abi_tag()\n if abi:\n abis[0:0] = [abi]\n\n abi3s = set()\n import imp\n for suffix in imp.get_suffixes():\n if suffix[0].startswith('.abi'):\n abi3s.add(suffix[0].split('.', 2)[1])\n\n abis.extend(sorted(list(abi3s)))\n\n abis.append('none')\n\n if not noarch:\n arch = get_platform()\n if sys.platform == 'darwin':\n # support macosx-10.6-intel on macosx-10.9-x86_64\n match = _osx_arch_pat.match(arch)\n if match:\n name, major, minor, actual_arch = match.groups()\n actual_arches = [actual_arch]\n if actual_arch in ('i386', 'ppc'):\n actual_arches.append('fat')\n if actual_arch in ('i386', 'x86_64'):\n actual_arches.append('intel')\n if actual_arch in ('ppc64', 'x86_64'):\n actual_arches.append('fat64')\n if actual_arch in ('i386', 'ppc', 'x86_64'):\n actual_arches.append('fat32')\n if actual_arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'):\n actual_arches.append('universal')\n tpl = '{0}_{1}_%i_%s'.format(name, major)\n arches = []\n for m in reversed(range(int(minor) + 1)):\n for a in actual_arches:\n arches.append(tpl % (m, a))\n else:\n # arch pattern didn't match (?!)\n arches = [arch]\n else:\n arches = [arch]\n\n # Current version, current API (built specifically for our Python):\n for abi in abis:\n for arch in arches:\n supported.append(('%s%s' % (impl, versions[0]), abi, arch))\n\n # Has binaries, does not use the Python API:\n supported.append(('py%s' % (versions[0][0]), 'none', arch))\n\n # No abi / arch, but requires our implementation:\n for i, version in enumerate(versions):\n supported.append(('%s%s' % (impl, version), 'none', 'any'))\n if i == 0:\n # Tagged specifically as being cross-version compatible\n # (with just the major version specified)\n supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any'))\n\n # No abi / arch, generic Python\n for i, version in enumerate(versions):\n supported.append(('py%s' % (version,), 'none', 'any'))\n if i == 0:\n supported.append(('py%s' % (version[0]), 'none', 'any'))\n\n return supported\n\nsupported_tags = get_supported()\nsupported_tags_noarch = get_supported(noarch=True)\n\nimplementation_tag = get_impl_tag()\n", "path": "pip/pep425tags.py"}]} | 2,847 | 244 |
gh_patches_debug_16894 | rasdani/github-patches | git_diff | svthalia__concrexit-2820 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Admin site doesnt show organizers
### Describe the bug
Organizers are not shown in the site admin
### How to reproduce
Steps to reproduce the behaviour:
1. Go to any event
2. See that the organizers field is empty
### Expected behaviour
there should be at least one organizer
### Additional context
multiple organizers broke things again
Admin site doesnt show organizers
### Describe the bug
Organizers are not shown in the site admin
### How to reproduce
Steps to reproduce the behaviour:
1. Go to any event
2. See that the organizers field is empty
### Expected behaviour
there should be at least one organizer
### Additional context
multiple organizers broke things again
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/events/emails.py`
Content:
```
1 """The emails defined by the events package."""
2 from django.conf import settings
3 from django.core.mail import EmailMessage
4 from django.template.loader import get_template
5 from django.utils.translation import gettext_lazy as _
6
7
8 def notify_first_waiting(event):
9 """Send an email to the first person on the waiting list when someone cancels their registration.
10
11 :param event: the event
12 """
13 if (
14 event.max_participants is not None
15 and event.eventregistration_set.filter(date_cancelled=None).count()
16 > event.max_participants
17 ):
18 # Prepare email to send to the first person on the waiting list
19 first_waiting = event.eventregistration_set.filter(
20 date_cancelled=None
21 ).order_by("date")[event.max_participants]
22
23 text_template = get_template("events/member_email.txt")
24
25 subject = _("[THALIA] Notification about your registration for '{}'").format(
26 event.title
27 )
28
29 organiser_emails = [
30 organiser.contact_address
31 for organiser in event.organisers.all()
32 if organiser.contact_address is not None
33 ]
34 text_message = text_template.render(
35 {
36 "event": event,
37 "registration": first_waiting,
38 "name": first_waiting.name or first_waiting.member.first_name,
39 "base_url": settings.BASE_URL,
40 "organisers": organiser_emails,
41 }
42 )
43
44 EmailMessage(subject, text_message, to=[first_waiting.email]).send()
45
46
47 def notify_organiser(event, registration):
48 """Send an email to the organiser of the event if someone cancels their registration.
49
50 :param event: the event
51 :param registration: the registration that was cancelled
52 """
53 if not event.organisers.exists():
54 return
55
56 text_template = get_template("events/organiser_email.txt")
57 subject = f"Registration for {event.title} cancelled by member"
58 text_message = text_template.render({"event": event, "registration": registration})
59
60 EmailMessage(
61 subject,
62 text_message,
63 to=[
64 organiser.contact_mailinglist.name + "@" + settings.SITE_DOMAIN
65 for organiser in event.organisers.all()
66 ],
67 ).send()
68
69
70 def notify_waiting(event, registration):
71 text_template = get_template("events/more_places_email.txt")
72 subject = _("[THALIA] Notification about your registration for '{}'").format(
73 event.title
74 )
75 text_message = text_template.render(
76 {
77 "event": event,
78 "registration": registration,
79 "name": registration.name or registration.member.first_name,
80 "base_url": settings.BASE_URL,
81 }
82 )
83 EmailMessage(subject, text_message, to=[registration.email]).send()
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/events/emails.py b/website/events/emails.py
--- a/website/events/emails.py
+++ b/website/events/emails.py
@@ -72,12 +72,21 @@
subject = _("[THALIA] Notification about your registration for '{}'").format(
event.title
)
+
+ organiser_emails = [
+ organiser.contact_address
+ for organiser in event.organisers.all()
+ if organiser.contact_address is not None
+ ]
+
text_message = text_template.render(
{
"event": event,
"registration": registration,
"name": registration.name or registration.member.first_name,
"base_url": settings.BASE_URL,
+ "organisers": organiser_emails,
}
)
+
EmailMessage(subject, text_message, to=[registration.email]).send()
| {"golden_diff": "diff --git a/website/events/emails.py b/website/events/emails.py\n--- a/website/events/emails.py\n+++ b/website/events/emails.py\n@@ -72,12 +72,21 @@\n subject = _(\"[THALIA] Notification about your registration for '{}'\").format(\n event.title\n )\n+\n+ organiser_emails = [\n+ organiser.contact_address\n+ for organiser in event.organisers.all()\n+ if organiser.contact_address is not None\n+ ]\n+\n text_message = text_template.render(\n {\n \"event\": event,\n \"registration\": registration,\n \"name\": registration.name or registration.member.first_name,\n \"base_url\": settings.BASE_URL,\n+ \"organisers\": organiser_emails,\n }\n )\n+\n EmailMessage(subject, text_message, to=[registration.email]).send()\n", "issue": "Admin site doesnt show organizers\n### Describe the bug\r\nOrganizers are not shown in the site admin\r\n\r\n### How to reproduce\r\nSteps to reproduce the behaviour:\r\n1. Go to any event\r\n2. See that the organizers field is empty\r\n\r\n### Expected behaviour\r\nthere should be at least one organizer\r\n\r\n### Additional context\r\nmultiple organizers broke things again\r\n\nAdmin site doesnt show organizers\n### Describe the bug\r\nOrganizers are not shown in the site admin\r\n\r\n### How to reproduce\r\nSteps to reproduce the behaviour:\r\n1. Go to any event\r\n2. See that the organizers field is empty\r\n\r\n### Expected behaviour\r\nthere should be at least one organizer\r\n\r\n### Additional context\r\nmultiple organizers broke things again\r\n\n", "before_files": [{"content": "\"\"\"The emails defined by the events package.\"\"\"\nfrom django.conf import settings\nfrom django.core.mail import EmailMessage\nfrom django.template.loader import get_template\nfrom django.utils.translation import gettext_lazy as _\n\n\ndef notify_first_waiting(event):\n \"\"\"Send an email to the first person on the waiting list when someone cancels their registration.\n\n :param event: the event\n \"\"\"\n if (\n event.max_participants is not None\n and event.eventregistration_set.filter(date_cancelled=None).count()\n > event.max_participants\n ):\n # Prepare email to send to the first person on the waiting list\n first_waiting = event.eventregistration_set.filter(\n date_cancelled=None\n ).order_by(\"date\")[event.max_participants]\n\n text_template = get_template(\"events/member_email.txt\")\n\n subject = _(\"[THALIA] Notification about your registration for '{}'\").format(\n event.title\n )\n\n organiser_emails = [\n organiser.contact_address\n for organiser in event.organisers.all()\n if organiser.contact_address is not None\n ]\n text_message = text_template.render(\n {\n \"event\": event,\n \"registration\": first_waiting,\n \"name\": first_waiting.name or first_waiting.member.first_name,\n \"base_url\": settings.BASE_URL,\n \"organisers\": organiser_emails,\n }\n )\n\n EmailMessage(subject, text_message, to=[first_waiting.email]).send()\n\n\ndef notify_organiser(event, registration):\n \"\"\"Send an email to the organiser of the event if someone cancels their registration.\n\n :param event: the event\n :param registration: the registration that was cancelled\n \"\"\"\n if not event.organisers.exists():\n return\n\n text_template = get_template(\"events/organiser_email.txt\")\n subject = f\"Registration for {event.title} cancelled by member\"\n text_message = text_template.render({\"event\": event, \"registration\": registration})\n\n EmailMessage(\n subject,\n text_message,\n to=[\n organiser.contact_mailinglist.name + \"@\" + settings.SITE_DOMAIN\n for organiser in event.organisers.all()\n ],\n ).send()\n\n\ndef notify_waiting(event, registration):\n text_template = get_template(\"events/more_places_email.txt\")\n subject = _(\"[THALIA] Notification about your registration for '{}'\").format(\n event.title\n )\n text_message = text_template.render(\n {\n \"event\": event,\n \"registration\": registration,\n \"name\": registration.name or registration.member.first_name,\n \"base_url\": settings.BASE_URL,\n }\n )\n EmailMessage(subject, text_message, to=[registration.email]).send()\n", "path": "website/events/emails.py"}], "after_files": [{"content": "\"\"\"The emails defined by the events package.\"\"\"\nfrom django.conf import settings\nfrom django.core.mail import EmailMessage\nfrom django.template.loader import get_template\nfrom django.utils.translation import gettext_lazy as _\n\n\ndef notify_first_waiting(event):\n \"\"\"Send an email to the first person on the waiting list when someone cancels their registration.\n\n :param event: the event\n \"\"\"\n if (\n event.max_participants is not None\n and event.eventregistration_set.filter(date_cancelled=None).count()\n > event.max_participants\n ):\n # Prepare email to send to the first person on the waiting list\n first_waiting = event.eventregistration_set.filter(\n date_cancelled=None\n ).order_by(\"date\")[event.max_participants]\n\n text_template = get_template(\"events/member_email.txt\")\n\n subject = _(\"[THALIA] Notification about your registration for '{}'\").format(\n event.title\n )\n\n organiser_emails = [\n organiser.contact_address\n for organiser in event.organisers.all()\n if organiser.contact_address is not None\n ]\n text_message = text_template.render(\n {\n \"event\": event,\n \"registration\": first_waiting,\n \"name\": first_waiting.name or first_waiting.member.first_name,\n \"base_url\": settings.BASE_URL,\n \"organisers\": organiser_emails,\n }\n )\n\n EmailMessage(subject, text_message, to=[first_waiting.email]).send()\n\n\ndef notify_organiser(event, registration):\n \"\"\"Send an email to the organiser of the event if someone cancels their registration.\n\n :param event: the event\n :param registration: the registration that was cancelled\n \"\"\"\n if not event.organisers.exists():\n return\n\n text_template = get_template(\"events/organiser_email.txt\")\n subject = f\"Registration for {event.title} cancelled by member\"\n text_message = text_template.render({\"event\": event, \"registration\": registration})\n\n EmailMessage(\n subject,\n text_message,\n to=[\n organiser.contact_mailinglist.name + \"@\" + settings.SITE_DOMAIN\n for organiser in event.organisers.all()\n ],\n ).send()\n\n\ndef notify_waiting(event, registration):\n text_template = get_template(\"events/more_places_email.txt\")\n subject = _(\"[THALIA] Notification about your registration for '{}'\").format(\n event.title\n )\n\n organiser_emails = [\n organiser.contact_address\n for organiser in event.organisers.all()\n if organiser.contact_address is not None\n ]\n\n text_message = text_template.render(\n {\n \"event\": event,\n \"registration\": registration,\n \"name\": registration.name or registration.member.first_name,\n \"base_url\": settings.BASE_URL,\n \"organisers\": organiser_emails,\n }\n )\n\n EmailMessage(subject, text_message, to=[registration.email]).send()\n", "path": "website/events/emails.py"}]} | 1,133 | 188 |
gh_patches_debug_12514 | rasdani/github-patches | git_diff | flairNLP__flair-587 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
one redundant code line
in TextClassifier class
`
34 self.document_embeddings = document_embeddings
`
`
38 self.document_embeddings: flair.embeddings.DocumentLSTMEmbeddings = document_embeddings
`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flair/models/text_classification_model.py`
Content:
```
1 import warnings
2 import logging
3 from pathlib import Path
4 from typing import List, Union
5
6 import torch
7 import torch.nn as nn
8
9 import flair.nn
10 import flair.embeddings
11 from flair.data import Dictionary, Sentence, Label
12 from flair.file_utils import cached_path
13 from flair.training_utils import convert_labels_to_one_hot, clear_embeddings
14
15
16 log = logging.getLogger('flair')
17
18
19 class TextClassifier(flair.nn.Model):
20 """
21 Text Classification Model
22 The model takes word embeddings, puts them into an LSTM to obtain a text representation, and puts the
23 text representation in the end into a linear layer to get the actual class label.
24 The model can handle single and multi class data sets.
25 """
26
27 def __init__(self,
28 document_embeddings: flair.embeddings.DocumentEmbeddings,
29 label_dictionary: Dictionary,
30 multi_label: bool):
31
32 super(TextClassifier, self).__init__()
33
34 self.document_embeddings = document_embeddings
35 self.label_dictionary: Dictionary = label_dictionary
36 self.multi_label = multi_label
37
38 self.document_embeddings: flair.embeddings.DocumentLSTMEmbeddings = document_embeddings
39
40 self.decoder = nn.Linear(self.document_embeddings.embedding_length, len(self.label_dictionary))
41
42 self._init_weights()
43
44 if multi_label:
45 self.loss_function = nn.BCELoss()
46 else:
47 self.loss_function = nn.CrossEntropyLoss()
48
49 # auto-spawn on GPU if available
50 self.to(flair.device)
51
52 def _init_weights(self):
53 nn.init.xavier_uniform_(self.decoder.weight)
54
55 def forward(self, sentences) -> List[List[float]]:
56 self.document_embeddings.embed(sentences)
57
58 text_embedding_list = [sentence.get_embedding().unsqueeze(0) for sentence in sentences]
59 text_embedding_tensor = torch.cat(text_embedding_list, 0).to(flair.device)
60
61 label_scores = self.decoder(text_embedding_tensor)
62
63 return label_scores
64
65 def save(self, model_file: Union[str, Path]):
66 """
67 Saves the current model to the provided file.
68 :param model_file: the model file
69 """
70 model_state = {
71 'state_dict': self.state_dict(),
72 'document_embeddings': self.document_embeddings,
73 'label_dictionary': self.label_dictionary,
74 'multi_label': self.multi_label,
75 }
76 torch.save(model_state, str(model_file), pickle_protocol=4)
77
78 def save_checkpoint(self, model_file: Union[str, Path], optimizer_state: dict, scheduler_state: dict, epoch: int, loss: float):
79 """
80 Saves the current model to the provided file.
81 :param model_file: the model file
82 """
83 model_state = {
84 'state_dict': self.state_dict(),
85 'document_embeddings': self.document_embeddings,
86 'label_dictionary': self.label_dictionary,
87 'multi_label': self.multi_label,
88 'optimizer_state_dict': optimizer_state,
89 'scheduler_state_dict': scheduler_state,
90 'epoch': epoch,
91 'loss': loss
92 }
93 torch.save(model_state, str(model_file), pickle_protocol=4)
94
95 @classmethod
96 def load_from_file(cls, model_file: Union[str, Path]):
97 """
98 Loads the model from the given file.
99 :param model_file: the model file
100 :return: the loaded text classifier model
101 """
102 state = TextClassifier._load_state(model_file)
103
104 model = TextClassifier(
105 document_embeddings=state['document_embeddings'],
106 label_dictionary=state['label_dictionary'],
107 multi_label=state['multi_label']
108 )
109 model.load_state_dict(state['state_dict'])
110 model.eval()
111 model.to(flair.device)
112
113 return model
114
115 @classmethod
116 def load_checkpoint(cls, model_file: Union[str, Path]):
117 state = TextClassifier._load_state(model_file)
118 model = TextClassifier.load_from_file(model_file)
119
120 epoch = state['epoch'] if 'epoch' in state else None
121 loss = state['loss'] if 'loss' in state else None
122 optimizer_state_dict = state['optimizer_state_dict'] if 'optimizer_state_dict' in state else None
123 scheduler_state_dict = state['scheduler_state_dict'] if 'scheduler_state_dict' in state else None
124
125 return {
126 'model': model, 'epoch': epoch, 'loss': loss,
127 'optimizer_state_dict': optimizer_state_dict, 'scheduler_state_dict': scheduler_state_dict
128 }
129
130 @classmethod
131 def _load_state(cls, model_file: Union[str, Path]):
132 # ATTENTION: suppressing torch serialization warnings. This needs to be taken out once we sort out recursive
133 # serialization of torch objects
134 # https://docs.python.org/3/library/warnings.html#temporarily-suppressing-warnings
135 with warnings.catch_warnings():
136 warnings.filterwarnings("ignore")
137 # load_big_file is a workaround by https://github.com/highway11git to load models on some Mac/Windows setups
138 # see https://github.com/zalandoresearch/flair/issues/351
139 f = flair.file_utils.load_big_file(str(model_file))
140 state = torch.load(f, map_location=flair.device)
141 return state
142
143 def forward_loss(self, sentences: Union[List[Sentence], Sentence]) -> torch.tensor:
144 scores = self.forward(sentences)
145 return self._calculate_loss(scores, sentences)
146
147 def forward_labels_and_loss(self, sentences: Union[Sentence, List[Sentence]]) -> (List[List[Label]], torch.tensor):
148 scores = self.forward(sentences)
149 labels = self._obtain_labels(scores)
150 loss = self._calculate_loss(scores, sentences)
151 return labels, loss
152
153 def predict(self, sentences: Union[Sentence, List[Sentence]], mini_batch_size: int = 32) -> List[Sentence]:
154 """
155 Predicts the class labels for the given sentences. The labels are directly added to the sentences.
156 :param sentences: list of sentences
157 :param mini_batch_size: mini batch size to use
158 :return: the list of sentences containing the labels
159 """
160 with torch.no_grad():
161 if type(sentences) is Sentence:
162 sentences = [sentences]
163
164 filtered_sentences = self._filter_empty_sentences(sentences)
165
166 batches = [filtered_sentences[x:x + mini_batch_size] for x in range(0, len(filtered_sentences), mini_batch_size)]
167
168 for batch in batches:
169 scores = self.forward(batch)
170 predicted_labels = self._obtain_labels(scores)
171
172 for (sentence, labels) in zip(batch, predicted_labels):
173 sentence.labels = labels
174
175 clear_embeddings(batch)
176
177 return sentences
178
179 @staticmethod
180 def _filter_empty_sentences(sentences: List[Sentence]) -> List[Sentence]:
181 filtered_sentences = [sentence for sentence in sentences if sentence.tokens]
182 if len(sentences) != len(filtered_sentences):
183 log.warning('Ignore {} sentence(s) with no tokens.'.format(len(sentences) - len(filtered_sentences)))
184 return filtered_sentences
185
186 def _calculate_loss(self, scores: List[List[float]], sentences: List[Sentence]) -> float:
187 """
188 Calculates the loss.
189 :param scores: the prediction scores from the model
190 :param sentences: list of sentences
191 :return: loss value
192 """
193 if self.multi_label:
194 return self._calculate_multi_label_loss(scores, sentences)
195
196 return self._calculate_single_label_loss(scores, sentences)
197
198 def _obtain_labels(self, scores: List[List[float]]) -> List[List[Label]]:
199 """
200 Predicts the labels of sentences.
201 :param scores: the prediction scores from the model
202 :return: list of predicted labels
203 """
204
205 if self.multi_label:
206 return [self._get_multi_label(s) for s in scores]
207
208 return [self._get_single_label(s) for s in scores]
209
210 def _get_multi_label(self, label_scores) -> List[Label]:
211 labels = []
212
213 sigmoid = torch.nn.Sigmoid()
214
215 results = list(map(lambda x: sigmoid(x), label_scores))
216 for idx, conf in enumerate(results):
217 if conf > 0.5:
218 label = self.label_dictionary.get_item_for_index(idx)
219 labels.append(Label(label, conf.item()))
220
221 return labels
222
223 def _get_single_label(self, label_scores) -> List[Label]:
224 conf, idx = torch.max(label_scores, 0)
225 label = self.label_dictionary.get_item_for_index(idx.item())
226
227 return [Label(label, conf.item())]
228
229 def _calculate_multi_label_loss(self, label_scores, sentences: List[Sentence]) -> float:
230 sigmoid = nn.Sigmoid()
231 return self.loss_function(sigmoid(label_scores), self._labels_to_one_hot(sentences))
232
233 def _calculate_single_label_loss(self, label_scores, sentences: List[Sentence]) -> float:
234 return self.loss_function(label_scores, self._labels_to_indices(sentences))
235
236 def _labels_to_one_hot(self, sentences: List[Sentence]):
237 label_list = [sentence.get_label_names() for sentence in sentences]
238 one_hot = convert_labels_to_one_hot(label_list, self.label_dictionary)
239 one_hot = [torch.FloatTensor(l).unsqueeze(0) for l in one_hot]
240 one_hot = torch.cat(one_hot, 0).to(flair.device)
241 return one_hot
242
243 def _labels_to_indices(self, sentences: List[Sentence]):
244 indices = [
245 torch.LongTensor([self.label_dictionary.get_idx_for_item(label.value) for label in sentence.labels])
246 for sentence in sentences
247 ]
248
249 vec = torch.cat(indices, 0).to(flair.device)
250
251 return vec
252
253 @staticmethod
254 def load(model: str):
255 model_file = None
256 aws_resource_path = 'https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/models-v0.4'
257 cache_dir = Path('models')
258
259 if model.lower() == 'de-offensive-language':
260 base_path = '/'.join([aws_resource_path, 'TEXT-CLASSIFICATION_germ-eval-2018_task-1',
261 'germ-eval-2018-task-1.pt'])
262 model_file = cached_path(base_path, cache_dir=cache_dir)
263
264 elif model.lower() == 'en-sentiment':
265 base_path = '/'.join([aws_resource_path, 'TEXT-CLASSIFICATION_imdb', 'imdb.pt'])
266 model_file = cached_path(base_path, cache_dir=cache_dir)
267
268 if model_file is not None:
269 return TextClassifier.load_from_file(model_file)
270
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flair/models/text_classification_model.py b/flair/models/text_classification_model.py
--- a/flair/models/text_classification_model.py
+++ b/flair/models/text_classification_model.py
@@ -31,12 +31,10 @@
super(TextClassifier, self).__init__()
- self.document_embeddings = document_embeddings
+ self.document_embeddings: flair.embeddings.DocumentLSTMEmbeddings = document_embeddings
self.label_dictionary: Dictionary = label_dictionary
self.multi_label = multi_label
- self.document_embeddings: flair.embeddings.DocumentLSTMEmbeddings = document_embeddings
-
self.decoder = nn.Linear(self.document_embeddings.embedding_length, len(self.label_dictionary))
self._init_weights()
| {"golden_diff": "diff --git a/flair/models/text_classification_model.py b/flair/models/text_classification_model.py\n--- a/flair/models/text_classification_model.py\n+++ b/flair/models/text_classification_model.py\n@@ -31,12 +31,10 @@\n \n super(TextClassifier, self).__init__()\n \n- self.document_embeddings = document_embeddings\n+ self.document_embeddings: flair.embeddings.DocumentLSTMEmbeddings = document_embeddings\n self.label_dictionary: Dictionary = label_dictionary\n self.multi_label = multi_label\n \n- self.document_embeddings: flair.embeddings.DocumentLSTMEmbeddings = document_embeddings\n-\n self.decoder = nn.Linear(self.document_embeddings.embedding_length, len(self.label_dictionary))\n \n self._init_weights()\n", "issue": "one redundant code line\nin TextClassifier class\r\n`\r\n34 self.document_embeddings = document_embeddings\r\n`\r\n`\r\n38 self.document_embeddings: flair.embeddings.DocumentLSTMEmbeddings = document_embeddings\r\n`\n", "before_files": [{"content": "import warnings\nimport logging\nfrom pathlib import Path\nfrom typing import List, Union\n\nimport torch\nimport torch.nn as nn\n\nimport flair.nn\nimport flair.embeddings\nfrom flair.data import Dictionary, Sentence, Label\nfrom flair.file_utils import cached_path\nfrom flair.training_utils import convert_labels_to_one_hot, clear_embeddings\n\n\nlog = logging.getLogger('flair')\n\n\nclass TextClassifier(flair.nn.Model):\n \"\"\"\n Text Classification Model\n The model takes word embeddings, puts them into an LSTM to obtain a text representation, and puts the\n text representation in the end into a linear layer to get the actual class label.\n The model can handle single and multi class data sets.\n \"\"\"\n\n def __init__(self,\n document_embeddings: flair.embeddings.DocumentEmbeddings,\n label_dictionary: Dictionary,\n multi_label: bool):\n\n super(TextClassifier, self).__init__()\n\n self.document_embeddings = document_embeddings\n self.label_dictionary: Dictionary = label_dictionary\n self.multi_label = multi_label\n\n self.document_embeddings: flair.embeddings.DocumentLSTMEmbeddings = document_embeddings\n\n self.decoder = nn.Linear(self.document_embeddings.embedding_length, len(self.label_dictionary))\n\n self._init_weights()\n\n if multi_label:\n self.loss_function = nn.BCELoss()\n else:\n self.loss_function = nn.CrossEntropyLoss()\n\n # auto-spawn on GPU if available\n self.to(flair.device)\n\n def _init_weights(self):\n nn.init.xavier_uniform_(self.decoder.weight)\n\n def forward(self, sentences) -> List[List[float]]:\n self.document_embeddings.embed(sentences)\n\n text_embedding_list = [sentence.get_embedding().unsqueeze(0) for sentence in sentences]\n text_embedding_tensor = torch.cat(text_embedding_list, 0).to(flair.device)\n\n label_scores = self.decoder(text_embedding_tensor)\n\n return label_scores\n\n def save(self, model_file: Union[str, Path]):\n \"\"\"\n Saves the current model to the provided file.\n :param model_file: the model file\n \"\"\"\n model_state = {\n 'state_dict': self.state_dict(),\n 'document_embeddings': self.document_embeddings,\n 'label_dictionary': self.label_dictionary,\n 'multi_label': self.multi_label,\n }\n torch.save(model_state, str(model_file), pickle_protocol=4)\n\n def save_checkpoint(self, model_file: Union[str, Path], optimizer_state: dict, scheduler_state: dict, epoch: int, loss: float):\n \"\"\"\n Saves the current model to the provided file.\n :param model_file: the model file\n \"\"\"\n model_state = {\n 'state_dict': self.state_dict(),\n 'document_embeddings': self.document_embeddings,\n 'label_dictionary': self.label_dictionary,\n 'multi_label': self.multi_label,\n 'optimizer_state_dict': optimizer_state,\n 'scheduler_state_dict': scheduler_state,\n 'epoch': epoch,\n 'loss': loss\n }\n torch.save(model_state, str(model_file), pickle_protocol=4)\n\n @classmethod\n def load_from_file(cls, model_file: Union[str, Path]):\n \"\"\"\n Loads the model from the given file.\n :param model_file: the model file\n :return: the loaded text classifier model\n \"\"\"\n state = TextClassifier._load_state(model_file)\n\n model = TextClassifier(\n document_embeddings=state['document_embeddings'],\n label_dictionary=state['label_dictionary'],\n multi_label=state['multi_label']\n )\n model.load_state_dict(state['state_dict'])\n model.eval()\n model.to(flair.device)\n\n return model\n\n @classmethod\n def load_checkpoint(cls, model_file: Union[str, Path]):\n state = TextClassifier._load_state(model_file)\n model = TextClassifier.load_from_file(model_file)\n\n epoch = state['epoch'] if 'epoch' in state else None\n loss = state['loss'] if 'loss' in state else None\n optimizer_state_dict = state['optimizer_state_dict'] if 'optimizer_state_dict' in state else None\n scheduler_state_dict = state['scheduler_state_dict'] if 'scheduler_state_dict' in state else None\n\n return {\n 'model': model, 'epoch': epoch, 'loss': loss,\n 'optimizer_state_dict': optimizer_state_dict, 'scheduler_state_dict': scheduler_state_dict\n }\n\n @classmethod\n def _load_state(cls, model_file: Union[str, Path]):\n # ATTENTION: suppressing torch serialization warnings. This needs to be taken out once we sort out recursive\n # serialization of torch objects\n # https://docs.python.org/3/library/warnings.html#temporarily-suppressing-warnings\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\")\n # load_big_file is a workaround by https://github.com/highway11git to load models on some Mac/Windows setups\n # see https://github.com/zalandoresearch/flair/issues/351\n f = flair.file_utils.load_big_file(str(model_file))\n state = torch.load(f, map_location=flair.device)\n return state\n\n def forward_loss(self, sentences: Union[List[Sentence], Sentence]) -> torch.tensor:\n scores = self.forward(sentences)\n return self._calculate_loss(scores, sentences)\n\n def forward_labels_and_loss(self, sentences: Union[Sentence, List[Sentence]]) -> (List[List[Label]], torch.tensor):\n scores = self.forward(sentences)\n labels = self._obtain_labels(scores)\n loss = self._calculate_loss(scores, sentences)\n return labels, loss\n\n def predict(self, sentences: Union[Sentence, List[Sentence]], mini_batch_size: int = 32) -> List[Sentence]:\n \"\"\"\n Predicts the class labels for the given sentences. The labels are directly added to the sentences.\n :param sentences: list of sentences\n :param mini_batch_size: mini batch size to use\n :return: the list of sentences containing the labels\n \"\"\"\n with torch.no_grad():\n if type(sentences) is Sentence:\n sentences = [sentences]\n\n filtered_sentences = self._filter_empty_sentences(sentences)\n\n batches = [filtered_sentences[x:x + mini_batch_size] for x in range(0, len(filtered_sentences), mini_batch_size)]\n\n for batch in batches:\n scores = self.forward(batch)\n predicted_labels = self._obtain_labels(scores)\n\n for (sentence, labels) in zip(batch, predicted_labels):\n sentence.labels = labels\n\n clear_embeddings(batch)\n\n return sentences\n\n @staticmethod\n def _filter_empty_sentences(sentences: List[Sentence]) -> List[Sentence]:\n filtered_sentences = [sentence for sentence in sentences if sentence.tokens]\n if len(sentences) != len(filtered_sentences):\n log.warning('Ignore {} sentence(s) with no tokens.'.format(len(sentences) - len(filtered_sentences)))\n return filtered_sentences\n\n def _calculate_loss(self, scores: List[List[float]], sentences: List[Sentence]) -> float:\n \"\"\"\n Calculates the loss.\n :param scores: the prediction scores from the model\n :param sentences: list of sentences\n :return: loss value\n \"\"\"\n if self.multi_label:\n return self._calculate_multi_label_loss(scores, sentences)\n\n return self._calculate_single_label_loss(scores, sentences)\n\n def _obtain_labels(self, scores: List[List[float]]) -> List[List[Label]]:\n \"\"\"\n Predicts the labels of sentences.\n :param scores: the prediction scores from the model\n :return: list of predicted labels\n \"\"\"\n\n if self.multi_label:\n return [self._get_multi_label(s) for s in scores]\n\n return [self._get_single_label(s) for s in scores]\n\n def _get_multi_label(self, label_scores) -> List[Label]:\n labels = []\n\n sigmoid = torch.nn.Sigmoid()\n\n results = list(map(lambda x: sigmoid(x), label_scores))\n for idx, conf in enumerate(results):\n if conf > 0.5:\n label = self.label_dictionary.get_item_for_index(idx)\n labels.append(Label(label, conf.item()))\n\n return labels\n\n def _get_single_label(self, label_scores) -> List[Label]:\n conf, idx = torch.max(label_scores, 0)\n label = self.label_dictionary.get_item_for_index(idx.item())\n\n return [Label(label, conf.item())]\n\n def _calculate_multi_label_loss(self, label_scores, sentences: List[Sentence]) -> float:\n sigmoid = nn.Sigmoid()\n return self.loss_function(sigmoid(label_scores), self._labels_to_one_hot(sentences))\n\n def _calculate_single_label_loss(self, label_scores, sentences: List[Sentence]) -> float:\n return self.loss_function(label_scores, self._labels_to_indices(sentences))\n\n def _labels_to_one_hot(self, sentences: List[Sentence]):\n label_list = [sentence.get_label_names() for sentence in sentences]\n one_hot = convert_labels_to_one_hot(label_list, self.label_dictionary)\n one_hot = [torch.FloatTensor(l).unsqueeze(0) for l in one_hot]\n one_hot = torch.cat(one_hot, 0).to(flair.device)\n return one_hot\n\n def _labels_to_indices(self, sentences: List[Sentence]):\n indices = [\n torch.LongTensor([self.label_dictionary.get_idx_for_item(label.value) for label in sentence.labels])\n for sentence in sentences\n ]\n\n vec = torch.cat(indices, 0).to(flair.device)\n\n return vec\n\n @staticmethod\n def load(model: str):\n model_file = None\n aws_resource_path = 'https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/models-v0.4'\n cache_dir = Path('models')\n\n if model.lower() == 'de-offensive-language':\n base_path = '/'.join([aws_resource_path, 'TEXT-CLASSIFICATION_germ-eval-2018_task-1',\n 'germ-eval-2018-task-1.pt'])\n model_file = cached_path(base_path, cache_dir=cache_dir)\n\n elif model.lower() == 'en-sentiment':\n base_path = '/'.join([aws_resource_path, 'TEXT-CLASSIFICATION_imdb', 'imdb.pt'])\n model_file = cached_path(base_path, cache_dir=cache_dir)\n\n if model_file is not None:\n return TextClassifier.load_from_file(model_file)\n", "path": "flair/models/text_classification_model.py"}], "after_files": [{"content": "import warnings\nimport logging\nfrom pathlib import Path\nfrom typing import List, Union\n\nimport torch\nimport torch.nn as nn\n\nimport flair.nn\nimport flair.embeddings\nfrom flair.data import Dictionary, Sentence, Label\nfrom flair.file_utils import cached_path\nfrom flair.training_utils import convert_labels_to_one_hot, clear_embeddings\n\n\nlog = logging.getLogger('flair')\n\n\nclass TextClassifier(flair.nn.Model):\n \"\"\"\n Text Classification Model\n The model takes word embeddings, puts them into an LSTM to obtain a text representation, and puts the\n text representation in the end into a linear layer to get the actual class label.\n The model can handle single and multi class data sets.\n \"\"\"\n\n def __init__(self,\n document_embeddings: flair.embeddings.DocumentEmbeddings,\n label_dictionary: Dictionary,\n multi_label: bool):\n\n super(TextClassifier, self).__init__()\n\n self.document_embeddings: flair.embeddings.DocumentLSTMEmbeddings = document_embeddings\n self.label_dictionary: Dictionary = label_dictionary\n self.multi_label = multi_label\n\n self.decoder = nn.Linear(self.document_embeddings.embedding_length, len(self.label_dictionary))\n\n self._init_weights()\n\n if multi_label:\n self.loss_function = nn.BCELoss()\n else:\n self.loss_function = nn.CrossEntropyLoss()\n\n # auto-spawn on GPU if available\n self.to(flair.device)\n\n def _init_weights(self):\n nn.init.xavier_uniform_(self.decoder.weight)\n\n def forward(self, sentences) -> List[List[float]]:\n self.document_embeddings.embed(sentences)\n\n text_embedding_list = [sentence.get_embedding().unsqueeze(0) for sentence in sentences]\n text_embedding_tensor = torch.cat(text_embedding_list, 0).to(flair.device)\n\n label_scores = self.decoder(text_embedding_tensor)\n\n return label_scores\n\n def save(self, model_file: Union[str, Path]):\n \"\"\"\n Saves the current model to the provided file.\n :param model_file: the model file\n \"\"\"\n model_state = {\n 'state_dict': self.state_dict(),\n 'document_embeddings': self.document_embeddings,\n 'label_dictionary': self.label_dictionary,\n 'multi_label': self.multi_label,\n }\n torch.save(model_state, str(model_file), pickle_protocol=4)\n\n def save_checkpoint(self, model_file: Union[str, Path], optimizer_state: dict, scheduler_state: dict, epoch: int, loss: float):\n \"\"\"\n Saves the current model to the provided file.\n :param model_file: the model file\n \"\"\"\n model_state = {\n 'state_dict': self.state_dict(),\n 'document_embeddings': self.document_embeddings,\n 'label_dictionary': self.label_dictionary,\n 'multi_label': self.multi_label,\n 'optimizer_state_dict': optimizer_state,\n 'scheduler_state_dict': scheduler_state,\n 'epoch': epoch,\n 'loss': loss\n }\n torch.save(model_state, str(model_file), pickle_protocol=4)\n\n @classmethod\n def load_from_file(cls, model_file: Union[str, Path]):\n \"\"\"\n Loads the model from the given file.\n :param model_file: the model file\n :return: the loaded text classifier model\n \"\"\"\n state = TextClassifier._load_state(model_file)\n\n model = TextClassifier(\n document_embeddings=state['document_embeddings'],\n label_dictionary=state['label_dictionary'],\n multi_label=state['multi_label']\n )\n model.load_state_dict(state['state_dict'])\n model.eval()\n model.to(flair.device)\n\n return model\n\n @classmethod\n def load_checkpoint(cls, model_file: Union[str, Path]):\n state = TextClassifier._load_state(model_file)\n model = TextClassifier.load_from_file(model_file)\n\n epoch = state['epoch'] if 'epoch' in state else None\n loss = state['loss'] if 'loss' in state else None\n optimizer_state_dict = state['optimizer_state_dict'] if 'optimizer_state_dict' in state else None\n scheduler_state_dict = state['scheduler_state_dict'] if 'scheduler_state_dict' in state else None\n\n return {\n 'model': model, 'epoch': epoch, 'loss': loss,\n 'optimizer_state_dict': optimizer_state_dict, 'scheduler_state_dict': scheduler_state_dict\n }\n\n @classmethod\n def _load_state(cls, model_file: Union[str, Path]):\n # ATTENTION: suppressing torch serialization warnings. This needs to be taken out once we sort out recursive\n # serialization of torch objects\n # https://docs.python.org/3/library/warnings.html#temporarily-suppressing-warnings\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\")\n # load_big_file is a workaround by https://github.com/highway11git to load models on some Mac/Windows setups\n # see https://github.com/zalandoresearch/flair/issues/351\n f = flair.file_utils.load_big_file(str(model_file))\n state = torch.load(f, map_location=flair.device)\n return state\n\n def forward_loss(self, sentences: Union[List[Sentence], Sentence]) -> torch.tensor:\n scores = self.forward(sentences)\n return self._calculate_loss(scores, sentences)\n\n def forward_labels_and_loss(self, sentences: Union[Sentence, List[Sentence]]) -> (List[List[Label]], torch.tensor):\n scores = self.forward(sentences)\n labels = self._obtain_labels(scores)\n loss = self._calculate_loss(scores, sentences)\n return labels, loss\n\n def predict(self, sentences: Union[Sentence, List[Sentence]], mini_batch_size: int = 32) -> List[Sentence]:\n \"\"\"\n Predicts the class labels for the given sentences. The labels are directly added to the sentences.\n :param sentences: list of sentences\n :param mini_batch_size: mini batch size to use\n :return: the list of sentences containing the labels\n \"\"\"\n with torch.no_grad():\n if type(sentences) is Sentence:\n sentences = [sentences]\n\n filtered_sentences = self._filter_empty_sentences(sentences)\n\n batches = [filtered_sentences[x:x + mini_batch_size] for x in range(0, len(filtered_sentences), mini_batch_size)]\n\n for batch in batches:\n scores = self.forward(batch)\n predicted_labels = self._obtain_labels(scores)\n\n for (sentence, labels) in zip(batch, predicted_labels):\n sentence.labels = labels\n\n clear_embeddings(batch)\n\n return sentences\n\n @staticmethod\n def _filter_empty_sentences(sentences: List[Sentence]) -> List[Sentence]:\n filtered_sentences = [sentence for sentence in sentences if sentence.tokens]\n if len(sentences) != len(filtered_sentences):\n log.warning('Ignore {} sentence(s) with no tokens.'.format(len(sentences) - len(filtered_sentences)))\n return filtered_sentences\n\n def _calculate_loss(self, scores: List[List[float]], sentences: List[Sentence]) -> float:\n \"\"\"\n Calculates the loss.\n :param scores: the prediction scores from the model\n :param sentences: list of sentences\n :return: loss value\n \"\"\"\n if self.multi_label:\n return self._calculate_multi_label_loss(scores, sentences)\n\n return self._calculate_single_label_loss(scores, sentences)\n\n def _obtain_labels(self, scores: List[List[float]]) -> List[List[Label]]:\n \"\"\"\n Predicts the labels of sentences.\n :param scores: the prediction scores from the model\n :return: list of predicted labels\n \"\"\"\n\n if self.multi_label:\n return [self._get_multi_label(s) for s in scores]\n\n return [self._get_single_label(s) for s in scores]\n\n def _get_multi_label(self, label_scores) -> List[Label]:\n labels = []\n\n sigmoid = torch.nn.Sigmoid()\n\n results = list(map(lambda x: sigmoid(x), label_scores))\n for idx, conf in enumerate(results):\n if conf > 0.5:\n label = self.label_dictionary.get_item_for_index(idx)\n labels.append(Label(label, conf.item()))\n\n return labels\n\n def _get_single_label(self, label_scores) -> List[Label]:\n conf, idx = torch.max(label_scores, 0)\n label = self.label_dictionary.get_item_for_index(idx.item())\n\n return [Label(label, conf.item())]\n\n def _calculate_multi_label_loss(self, label_scores, sentences: List[Sentence]) -> float:\n sigmoid = nn.Sigmoid()\n return self.loss_function(sigmoid(label_scores), self._labels_to_one_hot(sentences))\n\n def _calculate_single_label_loss(self, label_scores, sentences: List[Sentence]) -> float:\n return self.loss_function(label_scores, self._labels_to_indices(sentences))\n\n def _labels_to_one_hot(self, sentences: List[Sentence]):\n label_list = [sentence.get_label_names() for sentence in sentences]\n one_hot = convert_labels_to_one_hot(label_list, self.label_dictionary)\n one_hot = [torch.FloatTensor(l).unsqueeze(0) for l in one_hot]\n one_hot = torch.cat(one_hot, 0).to(flair.device)\n return one_hot\n\n def _labels_to_indices(self, sentences: List[Sentence]):\n indices = [\n torch.LongTensor([self.label_dictionary.get_idx_for_item(label.value) for label in sentence.labels])\n for sentence in sentences\n ]\n\n vec = torch.cat(indices, 0).to(flair.device)\n\n return vec\n\n @staticmethod\n def load(model: str):\n model_file = None\n aws_resource_path = 'https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/models-v0.4'\n cache_dir = Path('models')\n\n if model.lower() == 'de-offensive-language':\n base_path = '/'.join([aws_resource_path, 'TEXT-CLASSIFICATION_germ-eval-2018_task-1',\n 'germ-eval-2018-task-1.pt'])\n model_file = cached_path(base_path, cache_dir=cache_dir)\n\n elif model.lower() == 'en-sentiment':\n base_path = '/'.join([aws_resource_path, 'TEXT-CLASSIFICATION_imdb', 'imdb.pt'])\n model_file = cached_path(base_path, cache_dir=cache_dir)\n\n if model_file is not None:\n return TextClassifier.load_from_file(model_file)\n", "path": "flair/models/text_classification_model.py"}]} | 3,253 | 153 |
gh_patches_debug_28771 | rasdani/github-patches | git_diff | opsdroid__opsdroid-182 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add ssl to the web server
It should be possible to enable ssl on the web server and pass in paths to the ssl keys in the config.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opsdroid/web.py`
Content:
```
1 """Submodule to handle web requests in opsdroid."""
2
3 import json
4 import logging
5
6 from aiohttp import web
7
8 from opsdroid.const import __version__
9
10
11 _LOGGER = logging.getLogger(__name__)
12
13
14 class Web:
15 """Web server for opsdroid."""
16
17 def __init__(self, opsdroid):
18 """Create web object."""
19 self.opsdroid = opsdroid
20 try:
21 self.config = self.opsdroid.config["web"]
22 except KeyError:
23 self.config = {}
24 self.web_app = web.Application(loop=self.opsdroid.eventloop)
25 self.web_app.router.add_get('/', self.web_index_handler)
26 self.web_app.router.add_get('', self.web_index_handler)
27 self.web_app.router.add_get('/stats', self.web_stats_handler)
28 self.web_app.router.add_get('/stats/', self.web_stats_handler)
29
30 @property
31 def get_port(self):
32 """Return port from config or the default."""
33 try:
34 port = self.config["port"]
35 except KeyError:
36 port = 8080
37 return port
38
39 @property
40 def get_host(self):
41 """Return host from config or the default."""
42 try:
43 host = self.config["host"]
44 except KeyError:
45 host = '127.0.0.1'
46 return host
47
48 def start(self):
49 """Start web servers."""
50 _LOGGER.debug(
51 "Starting web server with host %s and port %s",
52 self.get_host, self.get_port)
53 web.run_app(self.web_app, host=self.get_host,
54 port=self.get_port, print=_LOGGER.info)
55
56 @staticmethod
57 def build_response(status, result):
58 """Build a json response object."""
59 return web.Response(text=json.dumps(result), status=status)
60
61 def web_index_handler(self, request):
62 """Handle root web request."""
63 return self.build_response(200, {
64 "message": "Welcome to the opsdroid API"})
65
66 def web_stats_handler(self, request):
67 """Handle stats request."""
68 stats = self.opsdroid.stats
69 try:
70 stats["average_response_time"] = \
71 stats["total_response_time"] / stats["total_responses"]
72 except ZeroDivisionError:
73 stats["average_response_time"] = 0
74
75 return self.build_response(200, {
76 "version": __version__,
77 "messages": {
78 "total_parsed": stats["messages_parsed"],
79 "webhooks_called": stats["webhooks_called"],
80 "total_response_time": stats["total_response_time"],
81 "total_responses": stats["total_responses"],
82 "average_response_time": stats["average_response_time"]
83 },
84 "modules": {
85 "skills": len(self.opsdroid.skills),
86 "connectors": len(self.opsdroid.connectors),
87 "databases": len(self.opsdroid.memory.databases)
88 }
89 })
90
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opsdroid/web.py b/opsdroid/web.py
--- a/opsdroid/web.py
+++ b/opsdroid/web.py
@@ -2,6 +2,7 @@
import json
import logging
+import ssl
from aiohttp import web
@@ -33,7 +34,10 @@
try:
port = self.config["port"]
except KeyError:
- port = 8080
+ if self.get_ssl_context is not None:
+ port = 8443
+ else:
+ port = 8080
return port
@property
@@ -45,13 +49,28 @@
host = '127.0.0.1'
return host
+ @property
+ def get_ssl_context(self):
+ """Return the ssl context or None."""
+ try:
+ ssl_config = self.config["ssl"]
+ sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+ sslcontext.load_cert_chain(ssl_config["cert"], ssl_config["key"])
+ return sslcontext
+ except FileNotFoundError:
+ _LOGGER.error("Cannot find ssl cert or key.")
+ return None
+ except KeyError:
+ return None
+
def start(self):
"""Start web servers."""
_LOGGER.debug(
"Starting web server with host %s and port %s",
self.get_host, self.get_port)
web.run_app(self.web_app, host=self.get_host,
- port=self.get_port, print=_LOGGER.info)
+ port=self.get_port, print=_LOGGER.info,
+ ssl_context=self.get_ssl_context)
@staticmethod
def build_response(status, result):
| {"golden_diff": "diff --git a/opsdroid/web.py b/opsdroid/web.py\n--- a/opsdroid/web.py\n+++ b/opsdroid/web.py\n@@ -2,6 +2,7 @@\n \n import json\n import logging\n+import ssl\n \n from aiohttp import web\n \n@@ -33,7 +34,10 @@\n try:\n port = self.config[\"port\"]\n except KeyError:\n- port = 8080\n+ if self.get_ssl_context is not None:\n+ port = 8443\n+ else:\n+ port = 8080\n return port\n \n @property\n@@ -45,13 +49,28 @@\n host = '127.0.0.1'\n return host\n \n+ @property\n+ def get_ssl_context(self):\n+ \"\"\"Return the ssl context or None.\"\"\"\n+ try:\n+ ssl_config = self.config[\"ssl\"]\n+ sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)\n+ sslcontext.load_cert_chain(ssl_config[\"cert\"], ssl_config[\"key\"])\n+ return sslcontext\n+ except FileNotFoundError:\n+ _LOGGER.error(\"Cannot find ssl cert or key.\")\n+ return None\n+ except KeyError:\n+ return None\n+\n def start(self):\n \"\"\"Start web servers.\"\"\"\n _LOGGER.debug(\n \"Starting web server with host %s and port %s\",\n self.get_host, self.get_port)\n web.run_app(self.web_app, host=self.get_host,\n- port=self.get_port, print=_LOGGER.info)\n+ port=self.get_port, print=_LOGGER.info,\n+ ssl_context=self.get_ssl_context)\n \n @staticmethod\n def build_response(status, result):\n", "issue": "Add ssl to the web server\nIt should be possible to enable ssl on the web server and pass in paths to the ssl keys in the config.\n", "before_files": [{"content": "\"\"\"Submodule to handle web requests in opsdroid.\"\"\"\n\nimport json\nimport logging\n\nfrom aiohttp import web\n\nfrom opsdroid.const import __version__\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Web:\n \"\"\"Web server for opsdroid.\"\"\"\n\n def __init__(self, opsdroid):\n \"\"\"Create web object.\"\"\"\n self.opsdroid = opsdroid\n try:\n self.config = self.opsdroid.config[\"web\"]\n except KeyError:\n self.config = {}\n self.web_app = web.Application(loop=self.opsdroid.eventloop)\n self.web_app.router.add_get('/', self.web_index_handler)\n self.web_app.router.add_get('', self.web_index_handler)\n self.web_app.router.add_get('/stats', self.web_stats_handler)\n self.web_app.router.add_get('/stats/', self.web_stats_handler)\n\n @property\n def get_port(self):\n \"\"\"Return port from config or the default.\"\"\"\n try:\n port = self.config[\"port\"]\n except KeyError:\n port = 8080\n return port\n\n @property\n def get_host(self):\n \"\"\"Return host from config or the default.\"\"\"\n try:\n host = self.config[\"host\"]\n except KeyError:\n host = '127.0.0.1'\n return host\n\n def start(self):\n \"\"\"Start web servers.\"\"\"\n _LOGGER.debug(\n \"Starting web server with host %s and port %s\",\n self.get_host, self.get_port)\n web.run_app(self.web_app, host=self.get_host,\n port=self.get_port, print=_LOGGER.info)\n\n @staticmethod\n def build_response(status, result):\n \"\"\"Build a json response object.\"\"\"\n return web.Response(text=json.dumps(result), status=status)\n\n def web_index_handler(self, request):\n \"\"\"Handle root web request.\"\"\"\n return self.build_response(200, {\n \"message\": \"Welcome to the opsdroid API\"})\n\n def web_stats_handler(self, request):\n \"\"\"Handle stats request.\"\"\"\n stats = self.opsdroid.stats\n try:\n stats[\"average_response_time\"] = \\\n stats[\"total_response_time\"] / stats[\"total_responses\"]\n except ZeroDivisionError:\n stats[\"average_response_time\"] = 0\n\n return self.build_response(200, {\n \"version\": __version__,\n \"messages\": {\n \"total_parsed\": stats[\"messages_parsed\"],\n \"webhooks_called\": stats[\"webhooks_called\"],\n \"total_response_time\": stats[\"total_response_time\"],\n \"total_responses\": stats[\"total_responses\"],\n \"average_response_time\": stats[\"average_response_time\"]\n },\n \"modules\": {\n \"skills\": len(self.opsdroid.skills),\n \"connectors\": len(self.opsdroid.connectors),\n \"databases\": len(self.opsdroid.memory.databases)\n }\n })\n", "path": "opsdroid/web.py"}], "after_files": [{"content": "\"\"\"Submodule to handle web requests in opsdroid.\"\"\"\n\nimport json\nimport logging\nimport ssl\n\nfrom aiohttp import web\n\nfrom opsdroid.const import __version__\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Web:\n \"\"\"Web server for opsdroid.\"\"\"\n\n def __init__(self, opsdroid):\n \"\"\"Create web object.\"\"\"\n self.opsdroid = opsdroid\n try:\n self.config = self.opsdroid.config[\"web\"]\n except KeyError:\n self.config = {}\n self.web_app = web.Application(loop=self.opsdroid.eventloop)\n self.web_app.router.add_get('/', self.web_index_handler)\n self.web_app.router.add_get('', self.web_index_handler)\n self.web_app.router.add_get('/stats', self.web_stats_handler)\n self.web_app.router.add_get('/stats/', self.web_stats_handler)\n\n @property\n def get_port(self):\n \"\"\"Return port from config or the default.\"\"\"\n try:\n port = self.config[\"port\"]\n except KeyError:\n if self.get_ssl_context is not None:\n port = 8443\n else:\n port = 8080\n return port\n\n @property\n def get_host(self):\n \"\"\"Return host from config or the default.\"\"\"\n try:\n host = self.config[\"host\"]\n except KeyError:\n host = '127.0.0.1'\n return host\n\n @property\n def get_ssl_context(self):\n \"\"\"Return the ssl context or None.\"\"\"\n try:\n ssl_config = self.config[\"ssl\"]\n sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)\n sslcontext.load_cert_chain(ssl_config[\"cert\"], ssl_config[\"key\"])\n return sslcontext\n except FileNotFoundError:\n _LOGGER.error(\"Cannot find ssl cert or key.\")\n return None\n except KeyError:\n return None\n\n def start(self):\n \"\"\"Start web servers.\"\"\"\n _LOGGER.debug(\n \"Starting web server with host %s and port %s\",\n self.get_host, self.get_port)\n web.run_app(self.web_app, host=self.get_host,\n port=self.get_port, print=_LOGGER.info,\n ssl_context=self.get_ssl_context)\n\n @staticmethod\n def build_response(status, result):\n \"\"\"Build a json response object.\"\"\"\n return web.Response(text=json.dumps(result), status=status)\n\n def web_index_handler(self, request):\n \"\"\"Handle root web request.\"\"\"\n return self.build_response(200, {\n \"message\": \"Welcome to the opsdroid API\"})\n\n def web_stats_handler(self, request):\n \"\"\"Handle stats request.\"\"\"\n stats = self.opsdroid.stats\n try:\n stats[\"average_response_time\"] = \\\n stats[\"total_response_time\"] / stats[\"total_responses\"]\n except ZeroDivisionError:\n stats[\"average_response_time\"] = 0\n\n return self.build_response(200, {\n \"version\": __version__,\n \"messages\": {\n \"total_parsed\": stats[\"messages_parsed\"],\n \"webhooks_called\": stats[\"webhooks_called\"],\n \"total_response_time\": stats[\"total_response_time\"],\n \"total_responses\": stats[\"total_responses\"],\n \"average_response_time\": stats[\"average_response_time\"]\n },\n \"modules\": {\n \"skills\": len(self.opsdroid.skills),\n \"connectors\": len(self.opsdroid.connectors),\n \"databases\": len(self.opsdroid.memory.databases)\n }\n })\n", "path": "opsdroid/web.py"}]} | 1,080 | 387 |
gh_patches_debug_66421 | rasdani/github-patches | git_diff | Textualize__textual-1837 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[CSS] Descendant type selectors can't have a numeric in their name
Consider the following code:
```python
from textual.app import App, ComposeResult
from textual.containers import Vertical
from textual.widgets import Header, Footer, Label
class LabelH1( Label ):
...
class CSSOddnessApp( App[ None ] ):
CSS = """
Vertical LabelH1 {
background: red;
}
"""
def compose( self ) -> ComposeResult:
yield Header()
yield Vertical(
Label( "Label" ),
LabelH1( "LabelH1" ),
)
yield Footer()
if __name__ == "__main__":
CSSOddnessApp().run()
```
When run we get the following error:
```
Error in stylesheet:
/Users/davep/develop/python/textual-sandbox/css_oddness.py:CSSOddnessApp:1:19
╭────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
│ ❱ 1 │ │
│ 2 │ Vertical LabelH1 { │
│ 3 │ │ background: red; │
╰────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
• Expected one of 'combinator child', 'comment start', 'declaration set start', 'new selector', 'pseudo class', 'selector', 'selector class', 'selector id',
'selector universal', or 'whitespace'.
• Did you forget a semicolon at the end of a line?
```
The same thing happens with `Vertical LabelH1`. On the other hand, if I remove the number from the inherited label widget:
```python
from textual.app import App, ComposeResult
from textual.containers import Vertical
from textual.widgets import Header, Footer, Label
class LabelHOne( Label ):
...
class CSSOddnessApp( App[ None ] ):
CSS = """
Vertical LabelHOne {
background: red;
}
"""
def compose( self ) -> ComposeResult:
yield Header()
yield Vertical(
Label( "Label" ),
LabelHOne( "LabelHOne" ),
)
yield Footer()
if __name__ == "__main__":
CSSOddnessApp().run()
```
this works fine. Likewise, if I retain the name but *don't* use combination:
```python
from textual.app import App, ComposeResult
from textual.containers import Vertical
from textual.widgets import Header, Footer, Label
class LabelH1( Label ):
...
class CSSOddnessApp( App[ None ] ):
CSS = """
LabelH1 {
background: red;
}
"""
def compose( self ) -> ComposeResult:
yield Header()
yield Vertical(
Label( "Label" ),
LabelH1( "LabelH1" ),
)
yield Footer()
if __name__ == "__main__":
CSSOddnessApp().run()
```
that also works fine.
I would suspect a variation on #1253.
[CSS] Descendant type selectors can't have a numeric in their name
Consider the following code:
```python
from textual.app import App, ComposeResult
from textual.containers import Vertical
from textual.widgets import Header, Footer, Label
class LabelH1( Label ):
...
class CSSOddnessApp( App[ None ] ):
CSS = """
Vertical LabelH1 {
background: red;
}
"""
def compose( self ) -> ComposeResult:
yield Header()
yield Vertical(
Label( "Label" ),
LabelH1( "LabelH1" ),
)
yield Footer()
if __name__ == "__main__":
CSSOddnessApp().run()
```
When run we get the following error:
```
Error in stylesheet:
/Users/davep/develop/python/textual-sandbox/css_oddness.py:CSSOddnessApp:1:19
╭────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
│ ❱ 1 │ │
│ 2 │ Vertical LabelH1 { │
│ 3 │ │ background: red; │
╰────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
• Expected one of 'combinator child', 'comment start', 'declaration set start', 'new selector', 'pseudo class', 'selector', 'selector class', 'selector id',
'selector universal', or 'whitespace'.
• Did you forget a semicolon at the end of a line?
```
The same thing happens with `Vertical LabelH1`. On the other hand, if I remove the number from the inherited label widget:
```python
from textual.app import App, ComposeResult
from textual.containers import Vertical
from textual.widgets import Header, Footer, Label
class LabelHOne( Label ):
...
class CSSOddnessApp( App[ None ] ):
CSS = """
Vertical LabelHOne {
background: red;
}
"""
def compose( self ) -> ComposeResult:
yield Header()
yield Vertical(
Label( "Label" ),
LabelHOne( "LabelHOne" ),
)
yield Footer()
if __name__ == "__main__":
CSSOddnessApp().run()
```
this works fine. Likewise, if I retain the name but *don't* use combination:
```python
from textual.app import App, ComposeResult
from textual.containers import Vertical
from textual.widgets import Header, Footer, Label
class LabelH1( Label ):
...
class CSSOddnessApp( App[ None ] ):
CSS = """
LabelH1 {
background: red;
}
"""
def compose( self ) -> ComposeResult:
yield Header()
yield Vertical(
Label( "Label" ),
LabelH1( "LabelH1" ),
)
yield Footer()
if __name__ == "__main__":
CSSOddnessApp().run()
```
that also works fine.
I would suspect a variation on #1253.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/textual/css/tokenize.py`
Content:
```
1 from __future__ import annotations
2
3 import re
4 from pathlib import PurePath
5 from typing import Iterable
6
7 from textual.css.tokenizer import Expect, Token, Tokenizer
8
9 PERCENT = r"-?\d+\.?\d*%"
10 DECIMAL = r"-?\d+\.?\d*"
11 COMMA = r"\s*,\s*"
12 OPEN_BRACE = r"\(\s*"
13 CLOSE_BRACE = r"\s*\)"
14
15 HEX_COLOR = r"\#[0-9a-fA-F]{8}|\#[0-9a-fA-F]{6}|\#[0-9a-fA-F]{4}|\#[0-9a-fA-F]{3}"
16 RGB_COLOR = rf"rgb{OPEN_BRACE}{DECIMAL}{COMMA}{DECIMAL}{COMMA}{DECIMAL}{CLOSE_BRACE}|rgba{OPEN_BRACE}{DECIMAL}{COMMA}{DECIMAL}{COMMA}{DECIMAL}{COMMA}{DECIMAL}{CLOSE_BRACE}"
17 HSL_COLOR = rf"hsl{OPEN_BRACE}{DECIMAL}{COMMA}{PERCENT}{COMMA}{PERCENT}{CLOSE_BRACE}|hsla{OPEN_BRACE}{DECIMAL}{COMMA}{PERCENT}{COMMA}{PERCENT}{COMMA}{DECIMAL}{CLOSE_BRACE}"
18
19 COMMENT_START = r"\/\*"
20 SCALAR = rf"{DECIMAL}(?:fr|%|w|h|vw|vh)"
21 DURATION = r"\d+\.?\d*(?:ms|s)"
22 NUMBER = r"\-?\d+\.?\d*"
23 COLOR = rf"{HEX_COLOR}|{RGB_COLOR}|{HSL_COLOR}"
24 KEY_VALUE = r"[a-zA-Z_-][a-zA-Z0-9_-]*=[0-9a-zA-Z_\-\/]+"
25 TOKEN = "[a-zA-Z][a-zA-Z0-9_-]*"
26 STRING = r"\".*?\""
27 VARIABLE_REF = r"\$[a-zA-Z0-9_\-]+"
28
29 IDENTIFIER = r"[a-zA-Z_\-][a-zA-Z0-9_\-]*"
30
31 # Values permitted in variable and rule declarations.
32 DECLARATION_VALUES = {
33 "scalar": SCALAR,
34 "duration": DURATION,
35 "number": NUMBER,
36 "color": COLOR,
37 "key_value": KEY_VALUE,
38 "token": TOKEN,
39 "string": STRING,
40 "variable_ref": VARIABLE_REF,
41 }
42
43 # The tokenizers "expectation" while at the root/highest level of scope
44 # in the CSS file. At this level we might expect to see selectors, comments,
45 # variable definitions etc.
46 expect_root_scope = Expect(
47 whitespace=r"\s+",
48 comment_start=COMMENT_START,
49 selector_start_id=r"\#" + IDENTIFIER,
50 selector_start_class=r"\." + IDENTIFIER,
51 selector_start_universal=r"\*",
52 selector_start=IDENTIFIER,
53 variable_name=rf"{VARIABLE_REF}:",
54 ).expect_eof(True)
55
56 # After a variable declaration e.g. "$warning-text: TOKENS;"
57 # for tokenizing variable value ------^~~~~~~^
58 expect_variable_name_continue = Expect(
59 variable_value_end=r"\n|;",
60 whitespace=r"\s+",
61 comment_start=COMMENT_START,
62 **DECLARATION_VALUES,
63 ).expect_eof(True)
64
65 expect_comment_end = Expect(
66 comment_end=re.escape("*/"),
67 )
68
69 # After we come across a selector in CSS e.g. ".my-class", we may
70 # find other selectors, pseudo-classes... e.g. ".my-class :hover"
71 expect_selector_continue = Expect(
72 whitespace=r"\s+",
73 comment_start=COMMENT_START,
74 pseudo_class=r"\:[a-zA-Z_-]+",
75 selector_id=r"\#[a-zA-Z_\-][a-zA-Z0-9_\-]*",
76 selector_class=r"\.[a-zA-Z_\-][a-zA-Z0-9_\-]*",
77 selector_universal=r"\*",
78 selector=r"[a-zA-Z_\-]+",
79 combinator_child=">",
80 new_selector=r",",
81 declaration_set_start=r"\{",
82 )
83
84 # A rule declaration e.g. "text: red;"
85 # ^---^
86 expect_declaration = Expect(
87 whitespace=r"\s+",
88 comment_start=COMMENT_START,
89 declaration_name=r"[a-zA-Z_\-]+\:",
90 declaration_set_end=r"\}",
91 )
92
93 expect_declaration_solo = Expect(
94 whitespace=r"\s+",
95 comment_start=COMMENT_START,
96 declaration_name=r"[a-zA-Z_\-]+\:",
97 declaration_set_end=r"\}",
98 ).expect_eof(True)
99
100 # The value(s)/content from a rule declaration e.g. "text: red;"
101 # ^---^
102 expect_declaration_content = Expect(
103 declaration_end=r";",
104 whitespace=r"\s+",
105 comment_start=COMMENT_START,
106 **DECLARATION_VALUES,
107 important=r"\!important",
108 comma=",",
109 declaration_set_end=r"\}",
110 )
111
112 expect_declaration_content_solo = Expect(
113 declaration_end=r";",
114 whitespace=r"\s+",
115 comment_start=COMMENT_START,
116 **DECLARATION_VALUES,
117 important=r"\!important",
118 comma=",",
119 declaration_set_end=r"\}",
120 ).expect_eof(True)
121
122
123 class TokenizerState:
124 """State machine for the tokenizer.
125
126 Attributes:
127 EXPECT: The initial expectation of the tokenizer. Since we start tokenizing
128 at the root scope, we might expect to see either a variable or selector, for example.
129 STATE_MAP: Maps token names to Expects, defines the sets of valid tokens
130 that we'd expect to see next, given the current token. For example, if
131 we've just processed a variable declaration name, we next expect to see
132 the value of that variable.
133 """
134
135 EXPECT = expect_root_scope
136 STATE_MAP = {
137 "variable_name": expect_variable_name_continue,
138 "variable_value_end": expect_root_scope,
139 "selector_start": expect_selector_continue,
140 "selector_start_id": expect_selector_continue,
141 "selector_start_class": expect_selector_continue,
142 "selector_start_universal": expect_selector_continue,
143 "selector_id": expect_selector_continue,
144 "selector_class": expect_selector_continue,
145 "selector_universal": expect_selector_continue,
146 "declaration_set_start": expect_declaration,
147 "declaration_name": expect_declaration_content,
148 "declaration_end": expect_declaration,
149 "declaration_set_end": expect_root_scope,
150 }
151
152 def __call__(self, code: str, path: str | PurePath) -> Iterable[Token]:
153 tokenizer = Tokenizer(code, path=path)
154 expect = self.EXPECT
155 get_token = tokenizer.get_token
156 get_state = self.STATE_MAP.get
157 while True:
158 token = get_token(expect)
159 name = token.name
160 if name == "comment_start":
161 tokenizer.skip_to(expect_comment_end)
162 continue
163 elif name == "eof":
164 break
165 expect = get_state(name, expect)
166 yield token
167
168
169 class DeclarationTokenizerState(TokenizerState):
170 EXPECT = expect_declaration_solo
171 STATE_MAP = {
172 "declaration_name": expect_declaration_content,
173 "declaration_end": expect_declaration_solo,
174 }
175
176
177 class ValueTokenizerState(TokenizerState):
178 EXPECT = expect_declaration_content_solo
179
180
181 tokenize = TokenizerState()
182 tokenize_declarations = DeclarationTokenizerState()
183 tokenize_value = ValueTokenizerState()
184
185
186 def tokenize_values(values: dict[str, str]) -> dict[str, list[Token]]:
187 """Tokens the values in a dict of strings.
188
189 Args:
190 values: A mapping of CSS variable name on to a value, to be
191 added to the CSS context.
192
193 Returns:
194 A mapping of name on to a list of tokens,
195 """
196 value_tokens = {
197 name: list(tokenize_value(value, "__name__")) for name, value in values.items()
198 }
199 return value_tokens
200
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/textual/css/tokenize.py b/src/textual/css/tokenize.py
--- a/src/textual/css/tokenize.py
+++ b/src/textual/css/tokenize.py
@@ -75,7 +75,7 @@
selector_id=r"\#[a-zA-Z_\-][a-zA-Z0-9_\-]*",
selector_class=r"\.[a-zA-Z_\-][a-zA-Z0-9_\-]*",
selector_universal=r"\*",
- selector=r"[a-zA-Z_\-]+",
+ selector=IDENTIFIER,
combinator_child=">",
new_selector=r",",
declaration_set_start=r"\{",
| {"golden_diff": "diff --git a/src/textual/css/tokenize.py b/src/textual/css/tokenize.py\n--- a/src/textual/css/tokenize.py\n+++ b/src/textual/css/tokenize.py\n@@ -75,7 +75,7 @@\n selector_id=r\"\\#[a-zA-Z_\\-][a-zA-Z0-9_\\-]*\",\n selector_class=r\"\\.[a-zA-Z_\\-][a-zA-Z0-9_\\-]*\",\n selector_universal=r\"\\*\",\n- selector=r\"[a-zA-Z_\\-]+\",\n+ selector=IDENTIFIER,\n combinator_child=\">\",\n new_selector=r\",\",\n declaration_set_start=r\"\\{\",\n", "issue": "[CSS] Descendant type selectors can't have a numeric in their name\nConsider the following code:\r\n\r\n```python\r\nfrom textual.app import App, ComposeResult\r\nfrom textual.containers import Vertical\r\nfrom textual.widgets import Header, Footer, Label\r\n\r\nclass LabelH1( Label ):\r\n ...\r\n\r\nclass CSSOddnessApp( App[ None ] ):\r\n\r\n CSS = \"\"\"\r\n Vertical LabelH1 {\r\n background: red;\r\n }\r\n \"\"\"\r\n\r\n def compose( self ) -> ComposeResult:\r\n yield Header()\r\n yield Vertical(\r\n Label( \"Label\" ),\r\n LabelH1( \"LabelH1\" ),\r\n )\r\n yield Footer()\r\n\r\nif __name__ == \"__main__\":\r\n CSSOddnessApp().run()\r\n```\r\n\r\nWhen run we get the following error:\r\n\r\n```\r\n Error in stylesheet:\r\n /Users/davep/develop/python/textual-sandbox/css_oddness.py:CSSOddnessApp:1:19\r\n\u256d\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256e\r\n\u2502 \u2771 1 \u2502 \u2502\r\n\u2502 2 \u2502 Vertical LabelH1 { \u2502\r\n\u2502 3 \u2502 \u2502 background: red; \u2502\r\n\u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f\r\n \u2022 Expected one of 'combinator child', 'comment start', 'declaration set start', 'new selector', 'pseudo class', 'selector', 'selector class', 'selector id',\r\n 'selector universal', or 'whitespace'.\r\n \u2022 Did you forget a semicolon at the end of a line?\r\n```\r\n\r\nThe same thing happens with `Vertical LabelH1`. On the other hand, if I remove the number from the inherited label widget:\r\n\r\n```python\r\nfrom textual.app import App, ComposeResult\r\nfrom textual.containers import Vertical\r\nfrom textual.widgets import Header, Footer, Label\r\n\r\nclass LabelHOne( Label ):\r\n ...\r\n\r\nclass CSSOddnessApp( App[ None ] ):\r\n\r\n CSS = \"\"\"\r\n Vertical LabelHOne {\r\n background: red;\r\n }\r\n \"\"\"\r\n\r\n def compose( self ) -> ComposeResult:\r\n yield Header()\r\n yield Vertical(\r\n Label( \"Label\" ),\r\n LabelHOne( \"LabelHOne\" ),\r\n )\r\n yield Footer()\r\n\r\nif __name__ == \"__main__\":\r\n CSSOddnessApp().run()\r\n```\r\n\r\nthis works fine. Likewise, if I retain the name but *don't* use combination:\r\n\r\n```python\r\nfrom textual.app import App, ComposeResult\r\nfrom textual.containers import Vertical\r\nfrom textual.widgets import Header, Footer, Label\r\n\r\nclass LabelH1( Label ):\r\n ...\r\n\r\nclass CSSOddnessApp( App[ None ] ):\r\n\r\n CSS = \"\"\"\r\n LabelH1 {\r\n background: red;\r\n }\r\n \"\"\"\r\n\r\n def compose( self ) -> ComposeResult:\r\n yield Header()\r\n yield Vertical(\r\n Label( \"Label\" ),\r\n LabelH1( \"LabelH1\" ),\r\n )\r\n yield Footer()\r\n\r\nif __name__ == \"__main__\":\r\n CSSOddnessApp().run()\r\n```\r\n\r\nthat also works fine.\r\n\r\nI would suspect a variation on #1253.\n[CSS] Descendant type selectors can't have a numeric in their name\nConsider the following code:\r\n\r\n```python\r\nfrom textual.app import App, ComposeResult\r\nfrom textual.containers import Vertical\r\nfrom textual.widgets import Header, Footer, Label\r\n\r\nclass LabelH1( Label ):\r\n ...\r\n\r\nclass CSSOddnessApp( App[ None ] ):\r\n\r\n CSS = \"\"\"\r\n Vertical LabelH1 {\r\n background: red;\r\n }\r\n \"\"\"\r\n\r\n def compose( self ) -> ComposeResult:\r\n yield Header()\r\n yield Vertical(\r\n Label( \"Label\" ),\r\n LabelH1( \"LabelH1\" ),\r\n )\r\n yield Footer()\r\n\r\nif __name__ == \"__main__\":\r\n CSSOddnessApp().run()\r\n```\r\n\r\nWhen run we get the following error:\r\n\r\n```\r\n Error in stylesheet:\r\n /Users/davep/develop/python/textual-sandbox/css_oddness.py:CSSOddnessApp:1:19\r\n\u256d\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256e\r\n\u2502 \u2771 1 \u2502 \u2502\r\n\u2502 2 \u2502 Vertical LabelH1 { \u2502\r\n\u2502 3 \u2502 \u2502 background: red; \u2502\r\n\u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f\r\n \u2022 Expected one of 'combinator child', 'comment start', 'declaration set start', 'new selector', 'pseudo class', 'selector', 'selector class', 'selector id',\r\n 'selector universal', or 'whitespace'.\r\n \u2022 Did you forget a semicolon at the end of a line?\r\n```\r\n\r\nThe same thing happens with `Vertical LabelH1`. On the other hand, if I remove the number from the inherited label widget:\r\n\r\n```python\r\nfrom textual.app import App, ComposeResult\r\nfrom textual.containers import Vertical\r\nfrom textual.widgets import Header, Footer, Label\r\n\r\nclass LabelHOne( Label ):\r\n ...\r\n\r\nclass CSSOddnessApp( App[ None ] ):\r\n\r\n CSS = \"\"\"\r\n Vertical LabelHOne {\r\n background: red;\r\n }\r\n \"\"\"\r\n\r\n def compose( self ) -> ComposeResult:\r\n yield Header()\r\n yield Vertical(\r\n Label( \"Label\" ),\r\n LabelHOne( \"LabelHOne\" ),\r\n )\r\n yield Footer()\r\n\r\nif __name__ == \"__main__\":\r\n CSSOddnessApp().run()\r\n```\r\n\r\nthis works fine. Likewise, if I retain the name but *don't* use combination:\r\n\r\n```python\r\nfrom textual.app import App, ComposeResult\r\nfrom textual.containers import Vertical\r\nfrom textual.widgets import Header, Footer, Label\r\n\r\nclass LabelH1( Label ):\r\n ...\r\n\r\nclass CSSOddnessApp( App[ None ] ):\r\n\r\n CSS = \"\"\"\r\n LabelH1 {\r\n background: red;\r\n }\r\n \"\"\"\r\n\r\n def compose( self ) -> ComposeResult:\r\n yield Header()\r\n yield Vertical(\r\n Label( \"Label\" ),\r\n LabelH1( \"LabelH1\" ),\r\n )\r\n yield Footer()\r\n\r\nif __name__ == \"__main__\":\r\n CSSOddnessApp().run()\r\n```\r\n\r\nthat also works fine.\r\n\r\nI would suspect a variation on #1253.\n", "before_files": [{"content": "from __future__ import annotations\n\nimport re\nfrom pathlib import PurePath\nfrom typing import Iterable\n\nfrom textual.css.tokenizer import Expect, Token, Tokenizer\n\nPERCENT = r\"-?\\d+\\.?\\d*%\"\nDECIMAL = r\"-?\\d+\\.?\\d*\"\nCOMMA = r\"\\s*,\\s*\"\nOPEN_BRACE = r\"\\(\\s*\"\nCLOSE_BRACE = r\"\\s*\\)\"\n\nHEX_COLOR = r\"\\#[0-9a-fA-F]{8}|\\#[0-9a-fA-F]{6}|\\#[0-9a-fA-F]{4}|\\#[0-9a-fA-F]{3}\"\nRGB_COLOR = rf\"rgb{OPEN_BRACE}{DECIMAL}{COMMA}{DECIMAL}{COMMA}{DECIMAL}{CLOSE_BRACE}|rgba{OPEN_BRACE}{DECIMAL}{COMMA}{DECIMAL}{COMMA}{DECIMAL}{COMMA}{DECIMAL}{CLOSE_BRACE}\"\nHSL_COLOR = rf\"hsl{OPEN_BRACE}{DECIMAL}{COMMA}{PERCENT}{COMMA}{PERCENT}{CLOSE_BRACE}|hsla{OPEN_BRACE}{DECIMAL}{COMMA}{PERCENT}{COMMA}{PERCENT}{COMMA}{DECIMAL}{CLOSE_BRACE}\"\n\nCOMMENT_START = r\"\\/\\*\"\nSCALAR = rf\"{DECIMAL}(?:fr|%|w|h|vw|vh)\"\nDURATION = r\"\\d+\\.?\\d*(?:ms|s)\"\nNUMBER = r\"\\-?\\d+\\.?\\d*\"\nCOLOR = rf\"{HEX_COLOR}|{RGB_COLOR}|{HSL_COLOR}\"\nKEY_VALUE = r\"[a-zA-Z_-][a-zA-Z0-9_-]*=[0-9a-zA-Z_\\-\\/]+\"\nTOKEN = \"[a-zA-Z][a-zA-Z0-9_-]*\"\nSTRING = r\"\\\".*?\\\"\"\nVARIABLE_REF = r\"\\$[a-zA-Z0-9_\\-]+\"\n\nIDENTIFIER = r\"[a-zA-Z_\\-][a-zA-Z0-9_\\-]*\"\n\n# Values permitted in variable and rule declarations.\nDECLARATION_VALUES = {\n \"scalar\": SCALAR,\n \"duration\": DURATION,\n \"number\": NUMBER,\n \"color\": COLOR,\n \"key_value\": KEY_VALUE,\n \"token\": TOKEN,\n \"string\": STRING,\n \"variable_ref\": VARIABLE_REF,\n}\n\n# The tokenizers \"expectation\" while at the root/highest level of scope\n# in the CSS file. At this level we might expect to see selectors, comments,\n# variable definitions etc.\nexpect_root_scope = Expect(\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n selector_start_id=r\"\\#\" + IDENTIFIER,\n selector_start_class=r\"\\.\" + IDENTIFIER,\n selector_start_universal=r\"\\*\",\n selector_start=IDENTIFIER,\n variable_name=rf\"{VARIABLE_REF}:\",\n).expect_eof(True)\n\n# After a variable declaration e.g. \"$warning-text: TOKENS;\"\n# for tokenizing variable value ------^~~~~~~^\nexpect_variable_name_continue = Expect(\n variable_value_end=r\"\\n|;\",\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n **DECLARATION_VALUES,\n).expect_eof(True)\n\nexpect_comment_end = Expect(\n comment_end=re.escape(\"*/\"),\n)\n\n# After we come across a selector in CSS e.g. \".my-class\", we may\n# find other selectors, pseudo-classes... e.g. \".my-class :hover\"\nexpect_selector_continue = Expect(\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n pseudo_class=r\"\\:[a-zA-Z_-]+\",\n selector_id=r\"\\#[a-zA-Z_\\-][a-zA-Z0-9_\\-]*\",\n selector_class=r\"\\.[a-zA-Z_\\-][a-zA-Z0-9_\\-]*\",\n selector_universal=r\"\\*\",\n selector=r\"[a-zA-Z_\\-]+\",\n combinator_child=\">\",\n new_selector=r\",\",\n declaration_set_start=r\"\\{\",\n)\n\n# A rule declaration e.g. \"text: red;\"\n# ^---^\nexpect_declaration = Expect(\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n declaration_name=r\"[a-zA-Z_\\-]+\\:\",\n declaration_set_end=r\"\\}\",\n)\n\nexpect_declaration_solo = Expect(\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n declaration_name=r\"[a-zA-Z_\\-]+\\:\",\n declaration_set_end=r\"\\}\",\n).expect_eof(True)\n\n# The value(s)/content from a rule declaration e.g. \"text: red;\"\n# ^---^\nexpect_declaration_content = Expect(\n declaration_end=r\";\",\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n **DECLARATION_VALUES,\n important=r\"\\!important\",\n comma=\",\",\n declaration_set_end=r\"\\}\",\n)\n\nexpect_declaration_content_solo = Expect(\n declaration_end=r\";\",\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n **DECLARATION_VALUES,\n important=r\"\\!important\",\n comma=\",\",\n declaration_set_end=r\"\\}\",\n).expect_eof(True)\n\n\nclass TokenizerState:\n \"\"\"State machine for the tokenizer.\n\n Attributes:\n EXPECT: The initial expectation of the tokenizer. Since we start tokenizing\n at the root scope, we might expect to see either a variable or selector, for example.\n STATE_MAP: Maps token names to Expects, defines the sets of valid tokens\n that we'd expect to see next, given the current token. For example, if\n we've just processed a variable declaration name, we next expect to see\n the value of that variable.\n \"\"\"\n\n EXPECT = expect_root_scope\n STATE_MAP = {\n \"variable_name\": expect_variable_name_continue,\n \"variable_value_end\": expect_root_scope,\n \"selector_start\": expect_selector_continue,\n \"selector_start_id\": expect_selector_continue,\n \"selector_start_class\": expect_selector_continue,\n \"selector_start_universal\": expect_selector_continue,\n \"selector_id\": expect_selector_continue,\n \"selector_class\": expect_selector_continue,\n \"selector_universal\": expect_selector_continue,\n \"declaration_set_start\": expect_declaration,\n \"declaration_name\": expect_declaration_content,\n \"declaration_end\": expect_declaration,\n \"declaration_set_end\": expect_root_scope,\n }\n\n def __call__(self, code: str, path: str | PurePath) -> Iterable[Token]:\n tokenizer = Tokenizer(code, path=path)\n expect = self.EXPECT\n get_token = tokenizer.get_token\n get_state = self.STATE_MAP.get\n while True:\n token = get_token(expect)\n name = token.name\n if name == \"comment_start\":\n tokenizer.skip_to(expect_comment_end)\n continue\n elif name == \"eof\":\n break\n expect = get_state(name, expect)\n yield token\n\n\nclass DeclarationTokenizerState(TokenizerState):\n EXPECT = expect_declaration_solo\n STATE_MAP = {\n \"declaration_name\": expect_declaration_content,\n \"declaration_end\": expect_declaration_solo,\n }\n\n\nclass ValueTokenizerState(TokenizerState):\n EXPECT = expect_declaration_content_solo\n\n\ntokenize = TokenizerState()\ntokenize_declarations = DeclarationTokenizerState()\ntokenize_value = ValueTokenizerState()\n\n\ndef tokenize_values(values: dict[str, str]) -> dict[str, list[Token]]:\n \"\"\"Tokens the values in a dict of strings.\n\n Args:\n values: A mapping of CSS variable name on to a value, to be\n added to the CSS context.\n\n Returns:\n A mapping of name on to a list of tokens,\n \"\"\"\n value_tokens = {\n name: list(tokenize_value(value, \"__name__\")) for name, value in values.items()\n }\n return value_tokens\n", "path": "src/textual/css/tokenize.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport re\nfrom pathlib import PurePath\nfrom typing import Iterable\n\nfrom textual.css.tokenizer import Expect, Token, Tokenizer\n\nPERCENT = r\"-?\\d+\\.?\\d*%\"\nDECIMAL = r\"-?\\d+\\.?\\d*\"\nCOMMA = r\"\\s*,\\s*\"\nOPEN_BRACE = r\"\\(\\s*\"\nCLOSE_BRACE = r\"\\s*\\)\"\n\nHEX_COLOR = r\"\\#[0-9a-fA-F]{8}|\\#[0-9a-fA-F]{6}|\\#[0-9a-fA-F]{4}|\\#[0-9a-fA-F]{3}\"\nRGB_COLOR = rf\"rgb{OPEN_BRACE}{DECIMAL}{COMMA}{DECIMAL}{COMMA}{DECIMAL}{CLOSE_BRACE}|rgba{OPEN_BRACE}{DECIMAL}{COMMA}{DECIMAL}{COMMA}{DECIMAL}{COMMA}{DECIMAL}{CLOSE_BRACE}\"\nHSL_COLOR = rf\"hsl{OPEN_BRACE}{DECIMAL}{COMMA}{PERCENT}{COMMA}{PERCENT}{CLOSE_BRACE}|hsla{OPEN_BRACE}{DECIMAL}{COMMA}{PERCENT}{COMMA}{PERCENT}{COMMA}{DECIMAL}{CLOSE_BRACE}\"\n\nCOMMENT_START = r\"\\/\\*\"\nSCALAR = rf\"{DECIMAL}(?:fr|%|w|h|vw|vh)\"\nDURATION = r\"\\d+\\.?\\d*(?:ms|s)\"\nNUMBER = r\"\\-?\\d+\\.?\\d*\"\nCOLOR = rf\"{HEX_COLOR}|{RGB_COLOR}|{HSL_COLOR}\"\nKEY_VALUE = r\"[a-zA-Z_-][a-zA-Z0-9_-]*=[0-9a-zA-Z_\\-\\/]+\"\nTOKEN = \"[a-zA-Z][a-zA-Z0-9_-]*\"\nSTRING = r\"\\\".*?\\\"\"\nVARIABLE_REF = r\"\\$[a-zA-Z0-9_\\-]+\"\n\nIDENTIFIER = r\"[a-zA-Z_\\-][a-zA-Z0-9_\\-]*\"\n\n# Values permitted in variable and rule declarations.\nDECLARATION_VALUES = {\n \"scalar\": SCALAR,\n \"duration\": DURATION,\n \"number\": NUMBER,\n \"color\": COLOR,\n \"key_value\": KEY_VALUE,\n \"token\": TOKEN,\n \"string\": STRING,\n \"variable_ref\": VARIABLE_REF,\n}\n\n# The tokenizers \"expectation\" while at the root/highest level of scope\n# in the CSS file. At this level we might expect to see selectors, comments,\n# variable definitions etc.\nexpect_root_scope = Expect(\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n selector_start_id=r\"\\#\" + IDENTIFIER,\n selector_start_class=r\"\\.\" + IDENTIFIER,\n selector_start_universal=r\"\\*\",\n selector_start=IDENTIFIER,\n variable_name=rf\"{VARIABLE_REF}:\",\n).expect_eof(True)\n\n# After a variable declaration e.g. \"$warning-text: TOKENS;\"\n# for tokenizing variable value ------^~~~~~~^\nexpect_variable_name_continue = Expect(\n variable_value_end=r\"\\n|;\",\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n **DECLARATION_VALUES,\n).expect_eof(True)\n\nexpect_comment_end = Expect(\n comment_end=re.escape(\"*/\"),\n)\n\n# After we come across a selector in CSS e.g. \".my-class\", we may\n# find other selectors, pseudo-classes... e.g. \".my-class :hover\"\nexpect_selector_continue = Expect(\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n pseudo_class=r\"\\:[a-zA-Z_-]+\",\n selector_id=r\"\\#[a-zA-Z_\\-][a-zA-Z0-9_\\-]*\",\n selector_class=r\"\\.[a-zA-Z_\\-][a-zA-Z0-9_\\-]*\",\n selector_universal=r\"\\*\",\n selector=IDENTIFIER,\n combinator_child=\">\",\n new_selector=r\",\",\n declaration_set_start=r\"\\{\",\n)\n\n# A rule declaration e.g. \"text: red;\"\n# ^---^\nexpect_declaration = Expect(\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n declaration_name=r\"[a-zA-Z_\\-]+\\:\",\n declaration_set_end=r\"\\}\",\n)\n\nexpect_declaration_solo = Expect(\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n declaration_name=r\"[a-zA-Z_\\-]+\\:\",\n declaration_set_end=r\"\\}\",\n).expect_eof(True)\n\n# The value(s)/content from a rule declaration e.g. \"text: red;\"\n# ^---^\nexpect_declaration_content = Expect(\n declaration_end=r\";\",\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n **DECLARATION_VALUES,\n important=r\"\\!important\",\n comma=\",\",\n declaration_set_end=r\"\\}\",\n)\n\nexpect_declaration_content_solo = Expect(\n declaration_end=r\";\",\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n **DECLARATION_VALUES,\n important=r\"\\!important\",\n comma=\",\",\n declaration_set_end=r\"\\}\",\n).expect_eof(True)\n\n\nclass TokenizerState:\n \"\"\"State machine for the tokenizer.\n\n Attributes:\n EXPECT: The initial expectation of the tokenizer. Since we start tokenizing\n at the root scope, we might expect to see either a variable or selector, for example.\n STATE_MAP: Maps token names to Expects, defines the sets of valid tokens\n that we'd expect to see next, given the current token. For example, if\n we've just processed a variable declaration name, we next expect to see\n the value of that variable.\n \"\"\"\n\n EXPECT = expect_root_scope\n STATE_MAP = {\n \"variable_name\": expect_variable_name_continue,\n \"variable_value_end\": expect_root_scope,\n \"selector_start\": expect_selector_continue,\n \"selector_start_id\": expect_selector_continue,\n \"selector_start_class\": expect_selector_continue,\n \"selector_start_universal\": expect_selector_continue,\n \"selector_id\": expect_selector_continue,\n \"selector_class\": expect_selector_continue,\n \"selector_universal\": expect_selector_continue,\n \"declaration_set_start\": expect_declaration,\n \"declaration_name\": expect_declaration_content,\n \"declaration_end\": expect_declaration,\n \"declaration_set_end\": expect_root_scope,\n }\n\n def __call__(self, code: str, path: str | PurePath) -> Iterable[Token]:\n tokenizer = Tokenizer(code, path=path)\n expect = self.EXPECT\n get_token = tokenizer.get_token\n get_state = self.STATE_MAP.get\n while True:\n token = get_token(expect)\n name = token.name\n if name == \"comment_start\":\n tokenizer.skip_to(expect_comment_end)\n continue\n elif name == \"eof\":\n break\n expect = get_state(name, expect)\n yield token\n\n\nclass DeclarationTokenizerState(TokenizerState):\n EXPECT = expect_declaration_solo\n STATE_MAP = {\n \"declaration_name\": expect_declaration_content,\n \"declaration_end\": expect_declaration_solo,\n }\n\n\nclass ValueTokenizerState(TokenizerState):\n EXPECT = expect_declaration_content_solo\n\n\ntokenize = TokenizerState()\ntokenize_declarations = DeclarationTokenizerState()\ntokenize_value = ValueTokenizerState()\n\n\ndef tokenize_values(values: dict[str, str]) -> dict[str, list[Token]]:\n \"\"\"Tokens the values in a dict of strings.\n\n Args:\n values: A mapping of CSS variable name on to a value, to be\n added to the CSS context.\n\n Returns:\n A mapping of name on to a list of tokens,\n \"\"\"\n value_tokens = {\n name: list(tokenize_value(value, \"__name__\")) for name, value in values.items()\n }\n return value_tokens\n", "path": "src/textual/css/tokenize.py"}]} | 3,789 | 143 |
gh_patches_debug_15963 | rasdani/github-patches | git_diff | marshmallow-code__webargs-557 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add support for headers to DjangoParser
```
NotImplementedError: Header parsing not supported by DjangoParser
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/webargs/djangoparser.py`
Content:
```
1 """Django request argument parsing.
2
3 Example usage: ::
4
5 from django.views.generic import View
6 from django.http import HttpResponse
7 from marshmallow import fields
8 from webargs.djangoparser import use_args
9
10 hello_args = {
11 'name': fields.Str(missing='World')
12 }
13
14 class MyView(View):
15
16 @use_args(hello_args)
17 def get(self, args, request):
18 return HttpResponse('Hello ' + args['name'])
19 """
20 from webargs import core
21 from webargs.multidictproxy import MultiDictProxy
22
23
24 def is_json_request(req):
25 return core.is_json(req.content_type)
26
27
28 class DjangoParser(core.Parser):
29 """Django request argument parser.
30
31 .. warning::
32
33 :class:`DjangoParser` does not override
34 :meth:`handle_error <webargs.core.Parser.handle_error>`, so your Django
35 views are responsible for catching any :exc:`ValidationErrors` raised by
36 the parser and returning the appropriate `HTTPResponse`.
37 """
38
39 def _raw_load_json(self, req):
40 """Read a json payload from the request for the core parser's load_json
41
42 Checks the input mimetype and may return 'missing' if the mimetype is
43 non-json, even if the request body is parseable as json."""
44 if not is_json_request(req):
45 return core.missing
46
47 return core.parse_json(req.body)
48
49 def load_querystring(self, req, schema):
50 """Return query params from the request as a MultiDictProxy."""
51 return MultiDictProxy(req.GET, schema)
52
53 def load_form(self, req, schema):
54 """Return form values from the request as a MultiDictProxy."""
55 return MultiDictProxy(req.POST, schema)
56
57 def load_cookies(self, req, schema):
58 """Return cookies from the request."""
59 return req.COOKIES
60
61 def load_headers(self, req, schema):
62 raise NotImplementedError(
63 f"Header parsing not supported by {self.__class__.__name__}"
64 )
65
66 def load_files(self, req, schema):
67 """Return files from the request as a MultiDictProxy."""
68 return MultiDictProxy(req.FILES, schema)
69
70 def get_request_from_view_args(self, view, args, kwargs):
71 # The first argument is either `self` or `request`
72 try: # self.request
73 return args[0].request
74 except AttributeError: # first arg is request
75 return args[0]
76
77
78 parser = DjangoParser()
79 use_args = parser.use_args
80 use_kwargs = parser.use_kwargs
81
```
Path: `setup.py`
Content:
```
1 import re
2 from setuptools import setup, find_packages
3
4 FRAMEWORKS = [
5 "Flask>=0.12.2",
6 "Django>=1.11.16",
7 "bottle>=0.12.13",
8 "tornado>=4.5.2",
9 "pyramid>=1.9.1",
10 "webapp2>=3.0.0b1",
11 "falcon>=2.0.0",
12 "aiohttp>=3.0.0",
13 ]
14 EXTRAS_REQUIRE = {
15 "frameworks": FRAMEWORKS,
16 "tests": [
17 "pytest",
18 "webtest==2.0.35",
19 "webtest-aiohttp==2.0.0",
20 "pytest-aiohttp>=0.3.0",
21 ]
22 + FRAMEWORKS,
23 "lint": [
24 "mypy==0.790",
25 "flake8==3.8.4",
26 "flake8-bugbear==20.1.4",
27 "pre-commit~=2.4",
28 ],
29 "docs": ["Sphinx==3.3.0", "sphinx-issues==1.2.0", "sphinx-typlog-theme==0.8.0"]
30 + FRAMEWORKS,
31 }
32 EXTRAS_REQUIRE["dev"] = EXTRAS_REQUIRE["tests"] + EXTRAS_REQUIRE["lint"] + ["tox"]
33
34
35 def find_version(fname):
36 """Attempts to find the version number in the file names fname.
37 Raises RuntimeError if not found.
38 """
39 version = ""
40 with open(fname) as fp:
41 reg = re.compile(r'__version__ = [\'"]([^\'"]*)[\'"]')
42 for line in fp:
43 m = reg.match(line)
44 if m:
45 version = m.group(1)
46 break
47 if not version:
48 raise RuntimeError("Cannot find version information")
49 return version
50
51
52 def read(fname):
53 with open(fname) as fp:
54 content = fp.read()
55 return content
56
57
58 setup(
59 name="webargs",
60 version=find_version("src/webargs/__init__.py"),
61 description=(
62 "Declarative parsing and validation of HTTP request objects, "
63 "with built-in support for popular web frameworks, including "
64 "Flask, Django, Bottle, Tornado, Pyramid, webapp2, Falcon, and aiohttp."
65 ),
66 long_description=read("README.rst"),
67 author="Steven Loria",
68 author_email="[email protected]",
69 url="https://github.com/marshmallow-code/webargs",
70 packages=find_packages("src"),
71 package_dir={"": "src"},
72 install_requires=["marshmallow>=3.0.0"],
73 extras_require=EXTRAS_REQUIRE,
74 license="MIT",
75 zip_safe=False,
76 keywords=(
77 "webargs",
78 "http",
79 "flask",
80 "django",
81 "bottle",
82 "tornado",
83 "aiohttp",
84 "webapp2",
85 "request",
86 "arguments",
87 "validation",
88 "parameters",
89 "rest",
90 "api",
91 "marshmallow",
92 ),
93 python_requires=">=3.6",
94 classifiers=[
95 "Development Status :: 5 - Production/Stable",
96 "Intended Audience :: Developers",
97 "License :: OSI Approved :: MIT License",
98 "Natural Language :: English",
99 "Programming Language :: Python :: 3",
100 "Programming Language :: Python :: 3.6",
101 "Programming Language :: Python :: 3.7",
102 "Programming Language :: Python :: 3.8",
103 "Programming Language :: Python :: 3.9",
104 "Programming Language :: Python :: 3 :: Only",
105 "Topic :: Internet :: WWW/HTTP :: Dynamic Content",
106 "Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
107 ],
108 test_suite="tests",
109 project_urls={
110 "Changelog": "https://webargs.readthedocs.io/en/latest/changelog.html",
111 "Issues": "https://github.com/marshmallow-code/webargs/issues",
112 "Funding": "https://opencollective.com/marshmallow",
113 "Tidelift": "https://tidelift.com/subscription/pkg/pypi-webargs?utm_source=pypi-marshmallow&utm_medium=pypi", # noqa
114 },
115 )
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -3,7 +3,7 @@
FRAMEWORKS = [
"Flask>=0.12.2",
- "Django>=1.11.16",
+ "Django>=2.2.0",
"bottle>=0.12.13",
"tornado>=4.5.2",
"pyramid>=1.9.1",
diff --git a/src/webargs/djangoparser.py b/src/webargs/djangoparser.py
--- a/src/webargs/djangoparser.py
+++ b/src/webargs/djangoparser.py
@@ -59,9 +59,10 @@
return req.COOKIES
def load_headers(self, req, schema):
- raise NotImplementedError(
- f"Header parsing not supported by {self.__class__.__name__}"
- )
+ """Return headers from the request."""
+ # Django's HttpRequest.headers is a case-insensitive dict type, but it
+ # isn't a multidict, so this is not proxied
+ return req.headers
def load_files(self, req, schema):
"""Return files from the request as a MultiDictProxy."""
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -3,7 +3,7 @@\n \n FRAMEWORKS = [\n \"Flask>=0.12.2\",\n- \"Django>=1.11.16\",\n+ \"Django>=2.2.0\",\n \"bottle>=0.12.13\",\n \"tornado>=4.5.2\",\n \"pyramid>=1.9.1\",\ndiff --git a/src/webargs/djangoparser.py b/src/webargs/djangoparser.py\n--- a/src/webargs/djangoparser.py\n+++ b/src/webargs/djangoparser.py\n@@ -59,9 +59,10 @@\n return req.COOKIES\n \n def load_headers(self, req, schema):\n- raise NotImplementedError(\n- f\"Header parsing not supported by {self.__class__.__name__}\"\n- )\n+ \"\"\"Return headers from the request.\"\"\"\n+ # Django's HttpRequest.headers is a case-insensitive dict type, but it\n+ # isn't a multidict, so this is not proxied\n+ return req.headers\n \n def load_files(self, req, schema):\n \"\"\"Return files from the request as a MultiDictProxy.\"\"\"\n", "issue": "Add support for headers to DjangoParser\n```\r\nNotImplementedError: Header parsing not supported by DjangoParser\r\n```\n", "before_files": [{"content": "\"\"\"Django request argument parsing.\n\nExample usage: ::\n\n from django.views.generic import View\n from django.http import HttpResponse\n from marshmallow import fields\n from webargs.djangoparser import use_args\n\n hello_args = {\n 'name': fields.Str(missing='World')\n }\n\n class MyView(View):\n\n @use_args(hello_args)\n def get(self, args, request):\n return HttpResponse('Hello ' + args['name'])\n\"\"\"\nfrom webargs import core\nfrom webargs.multidictproxy import MultiDictProxy\n\n\ndef is_json_request(req):\n return core.is_json(req.content_type)\n\n\nclass DjangoParser(core.Parser):\n \"\"\"Django request argument parser.\n\n .. warning::\n\n :class:`DjangoParser` does not override\n :meth:`handle_error <webargs.core.Parser.handle_error>`, so your Django\n views are responsible for catching any :exc:`ValidationErrors` raised by\n the parser and returning the appropriate `HTTPResponse`.\n \"\"\"\n\n def _raw_load_json(self, req):\n \"\"\"Read a json payload from the request for the core parser's load_json\n\n Checks the input mimetype and may return 'missing' if the mimetype is\n non-json, even if the request body is parseable as json.\"\"\"\n if not is_json_request(req):\n return core.missing\n\n return core.parse_json(req.body)\n\n def load_querystring(self, req, schema):\n \"\"\"Return query params from the request as a MultiDictProxy.\"\"\"\n return MultiDictProxy(req.GET, schema)\n\n def load_form(self, req, schema):\n \"\"\"Return form values from the request as a MultiDictProxy.\"\"\"\n return MultiDictProxy(req.POST, schema)\n\n def load_cookies(self, req, schema):\n \"\"\"Return cookies from the request.\"\"\"\n return req.COOKIES\n\n def load_headers(self, req, schema):\n raise NotImplementedError(\n f\"Header parsing not supported by {self.__class__.__name__}\"\n )\n\n def load_files(self, req, schema):\n \"\"\"Return files from the request as a MultiDictProxy.\"\"\"\n return MultiDictProxy(req.FILES, schema)\n\n def get_request_from_view_args(self, view, args, kwargs):\n # The first argument is either `self` or `request`\n try: # self.request\n return args[0].request\n except AttributeError: # first arg is request\n return args[0]\n\n\nparser = DjangoParser()\nuse_args = parser.use_args\nuse_kwargs = parser.use_kwargs\n", "path": "src/webargs/djangoparser.py"}, {"content": "import re\nfrom setuptools import setup, find_packages\n\nFRAMEWORKS = [\n \"Flask>=0.12.2\",\n \"Django>=1.11.16\",\n \"bottle>=0.12.13\",\n \"tornado>=4.5.2\",\n \"pyramid>=1.9.1\",\n \"webapp2>=3.0.0b1\",\n \"falcon>=2.0.0\",\n \"aiohttp>=3.0.0\",\n]\nEXTRAS_REQUIRE = {\n \"frameworks\": FRAMEWORKS,\n \"tests\": [\n \"pytest\",\n \"webtest==2.0.35\",\n \"webtest-aiohttp==2.0.0\",\n \"pytest-aiohttp>=0.3.0\",\n ]\n + FRAMEWORKS,\n \"lint\": [\n \"mypy==0.790\",\n \"flake8==3.8.4\",\n \"flake8-bugbear==20.1.4\",\n \"pre-commit~=2.4\",\n ],\n \"docs\": [\"Sphinx==3.3.0\", \"sphinx-issues==1.2.0\", \"sphinx-typlog-theme==0.8.0\"]\n + FRAMEWORKS,\n}\nEXTRAS_REQUIRE[\"dev\"] = EXTRAS_REQUIRE[\"tests\"] + EXTRAS_REQUIRE[\"lint\"] + [\"tox\"]\n\n\ndef find_version(fname):\n \"\"\"Attempts to find the version number in the file names fname.\n Raises RuntimeError if not found.\n \"\"\"\n version = \"\"\n with open(fname) as fp:\n reg = re.compile(r'__version__ = [\\'\"]([^\\'\"]*)[\\'\"]')\n for line in fp:\n m = reg.match(line)\n if m:\n version = m.group(1)\n break\n if not version:\n raise RuntimeError(\"Cannot find version information\")\n return version\n\n\ndef read(fname):\n with open(fname) as fp:\n content = fp.read()\n return content\n\n\nsetup(\n name=\"webargs\",\n version=find_version(\"src/webargs/__init__.py\"),\n description=(\n \"Declarative parsing and validation of HTTP request objects, \"\n \"with built-in support for popular web frameworks, including \"\n \"Flask, Django, Bottle, Tornado, Pyramid, webapp2, Falcon, and aiohttp.\"\n ),\n long_description=read(\"README.rst\"),\n author=\"Steven Loria\",\n author_email=\"[email protected]\",\n url=\"https://github.com/marshmallow-code/webargs\",\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n install_requires=[\"marshmallow>=3.0.0\"],\n extras_require=EXTRAS_REQUIRE,\n license=\"MIT\",\n zip_safe=False,\n keywords=(\n \"webargs\",\n \"http\",\n \"flask\",\n \"django\",\n \"bottle\",\n \"tornado\",\n \"aiohttp\",\n \"webapp2\",\n \"request\",\n \"arguments\",\n \"validation\",\n \"parameters\",\n \"rest\",\n \"api\",\n \"marshmallow\",\n ),\n python_requires=\">=3.6\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Topic :: Internet :: WWW/HTTP :: Dynamic Content\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n ],\n test_suite=\"tests\",\n project_urls={\n \"Changelog\": \"https://webargs.readthedocs.io/en/latest/changelog.html\",\n \"Issues\": \"https://github.com/marshmallow-code/webargs/issues\",\n \"Funding\": \"https://opencollective.com/marshmallow\",\n \"Tidelift\": \"https://tidelift.com/subscription/pkg/pypi-webargs?utm_source=pypi-marshmallow&utm_medium=pypi\", # noqa\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"Django request argument parsing.\n\nExample usage: ::\n\n from django.views.generic import View\n from django.http import HttpResponse\n from marshmallow import fields\n from webargs.djangoparser import use_args\n\n hello_args = {\n 'name': fields.Str(missing='World')\n }\n\n class MyView(View):\n\n @use_args(hello_args)\n def get(self, args, request):\n return HttpResponse('Hello ' + args['name'])\n\"\"\"\nfrom webargs import core\nfrom webargs.multidictproxy import MultiDictProxy\n\n\ndef is_json_request(req):\n return core.is_json(req.content_type)\n\n\nclass DjangoParser(core.Parser):\n \"\"\"Django request argument parser.\n\n .. warning::\n\n :class:`DjangoParser` does not override\n :meth:`handle_error <webargs.core.Parser.handle_error>`, so your Django\n views are responsible for catching any :exc:`ValidationErrors` raised by\n the parser and returning the appropriate `HTTPResponse`.\n \"\"\"\n\n def _raw_load_json(self, req):\n \"\"\"Read a json payload from the request for the core parser's load_json\n\n Checks the input mimetype and may return 'missing' if the mimetype is\n non-json, even if the request body is parseable as json.\"\"\"\n if not is_json_request(req):\n return core.missing\n\n return core.parse_json(req.body)\n\n def load_querystring(self, req, schema):\n \"\"\"Return query params from the request as a MultiDictProxy.\"\"\"\n return MultiDictProxy(req.GET, schema)\n\n def load_form(self, req, schema):\n \"\"\"Return form values from the request as a MultiDictProxy.\"\"\"\n return MultiDictProxy(req.POST, schema)\n\n def load_cookies(self, req, schema):\n \"\"\"Return cookies from the request.\"\"\"\n return req.COOKIES\n\n def load_headers(self, req, schema):\n \"\"\"Return headers from the request.\"\"\"\n # Django's HttpRequest.headers is a case-insensitive dict type, but it\n # isn't a multidict, so this is not proxied\n return req.headers\n\n def load_files(self, req, schema):\n \"\"\"Return files from the request as a MultiDictProxy.\"\"\"\n return MultiDictProxy(req.FILES, schema)\n\n def get_request_from_view_args(self, view, args, kwargs):\n # The first argument is either `self` or `request`\n try: # self.request\n return args[0].request\n except AttributeError: # first arg is request\n return args[0]\n\n\nparser = DjangoParser()\nuse_args = parser.use_args\nuse_kwargs = parser.use_kwargs\n", "path": "src/webargs/djangoparser.py"}, {"content": "import re\nfrom setuptools import setup, find_packages\n\nFRAMEWORKS = [\n \"Flask>=0.12.2\",\n \"Django>=2.2.0\",\n \"bottle>=0.12.13\",\n \"tornado>=4.5.2\",\n \"pyramid>=1.9.1\",\n \"webapp2>=3.0.0b1\",\n \"falcon>=2.0.0\",\n \"aiohttp>=3.0.0\",\n]\nEXTRAS_REQUIRE = {\n \"frameworks\": FRAMEWORKS,\n \"tests\": [\n \"pytest\",\n \"webtest==2.0.35\",\n \"webtest-aiohttp==2.0.0\",\n \"pytest-aiohttp>=0.3.0\",\n ]\n + FRAMEWORKS,\n \"lint\": [\n \"mypy==0.790\",\n \"flake8==3.8.4\",\n \"flake8-bugbear==20.1.4\",\n \"pre-commit~=2.4\",\n ],\n \"docs\": [\"Sphinx==3.3.0\", \"sphinx-issues==1.2.0\", \"sphinx-typlog-theme==0.8.0\"]\n + FRAMEWORKS,\n}\nEXTRAS_REQUIRE[\"dev\"] = EXTRAS_REQUIRE[\"tests\"] + EXTRAS_REQUIRE[\"lint\"] + [\"tox\"]\n\n\ndef find_version(fname):\n \"\"\"Attempts to find the version number in the file names fname.\n Raises RuntimeError if not found.\n \"\"\"\n version = \"\"\n with open(fname) as fp:\n reg = re.compile(r'__version__ = [\\'\"]([^\\'\"]*)[\\'\"]')\n for line in fp:\n m = reg.match(line)\n if m:\n version = m.group(1)\n break\n if not version:\n raise RuntimeError(\"Cannot find version information\")\n return version\n\n\ndef read(fname):\n with open(fname) as fp:\n content = fp.read()\n return content\n\n\nsetup(\n name=\"webargs\",\n version=find_version(\"src/webargs/__init__.py\"),\n description=(\n \"Declarative parsing and validation of HTTP request objects, \"\n \"with built-in support for popular web frameworks, including \"\n \"Flask, Django, Bottle, Tornado, Pyramid, webapp2, Falcon, and aiohttp.\"\n ),\n long_description=read(\"README.rst\"),\n author=\"Steven Loria\",\n author_email=\"[email protected]\",\n url=\"https://github.com/marshmallow-code/webargs\",\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n install_requires=[\"marshmallow>=3.0.0\"],\n extras_require=EXTRAS_REQUIRE,\n license=\"MIT\",\n zip_safe=False,\n keywords=(\n \"webargs\",\n \"http\",\n \"flask\",\n \"django\",\n \"bottle\",\n \"tornado\",\n \"aiohttp\",\n \"webapp2\",\n \"request\",\n \"arguments\",\n \"validation\",\n \"parameters\",\n \"rest\",\n \"api\",\n \"marshmallow\",\n ),\n python_requires=\">=3.6\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Topic :: Internet :: WWW/HTTP :: Dynamic Content\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n ],\n test_suite=\"tests\",\n project_urls={\n \"Changelog\": \"https://webargs.readthedocs.io/en/latest/changelog.html\",\n \"Issues\": \"https://github.com/marshmallow-code/webargs/issues\",\n \"Funding\": \"https://opencollective.com/marshmallow\",\n \"Tidelift\": \"https://tidelift.com/subscription/pkg/pypi-webargs?utm_source=pypi-marshmallow&utm_medium=pypi\", # noqa\n },\n)\n", "path": "setup.py"}]} | 2,187 | 280 |
gh_patches_debug_16212 | rasdani/github-patches | git_diff | getsentry__sentry-46418 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pege not found when configuring DSN keys for project name "onboarding"
### Environment
SaaS (https://sentry.io/)
### Version
_No response_
### Link
_No response_
### DSN
_No response_
### Steps to Reproduce
1. Create or rename a project to "onboarding"
2. Go to Client Keys -> Configure
### Expected Result
Open the Client key configuration page
### Actual Result
Page not found error, as in the screenshot below.
Current workaround: rename the project.
URL: https://dev-curumas.sentry.io/settings/projects/onboarding/a9217e8f8e01407a9b6df81101793546/

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sentry/api/utils.py`
Content:
```
1 from __future__ import annotations
2
3 import datetime
4 import logging
5 import re
6 from datetime import timedelta
7 from typing import Any, List, Literal, Tuple, overload
8 from urllib.parse import urlparse
9
10 from django.http import HttpResponseNotAllowed
11 from django.utils import timezone
12 from django.views.decorators.csrf import csrf_exempt
13 from rest_framework.request import Request
14
15 from sentry import options
16
17 # Unfortunately, this function is imported as an export of this module in several places, keep it.
18 from sentry.auth.access import get_cached_organization_member # noqa
19 from sentry.auth.superuser import is_active_superuser
20 from sentry.models.organization import Organization
21 from sentry.search.utils import InvalidQuery, parse_datetime_string
22 from sentry.services.hybrid_cloud import extract_id_from
23 from sentry.services.hybrid_cloud.organization import (
24 RpcOrganization,
25 RpcOrganizationMember,
26 RpcUserOrganizationContext,
27 organization_service,
28 )
29 from sentry.utils.dates import parse_stats_period
30
31 logger = logging.getLogger(__name__)
32
33 MAX_STATS_PERIOD = timedelta(days=90)
34
35
36 class InvalidParams(Exception):
37 pass
38
39
40 def get_datetime_from_stats_period(
41 stats_period: str, now: datetime.datetime | None = None
42 ) -> datetime.datetime:
43 if now is None:
44 now = timezone.now()
45 parsed_stats_period = parse_stats_period(stats_period)
46 if parsed_stats_period is None:
47 raise InvalidParams(f"Invalid statsPeriod: {stats_period!r}")
48 try:
49 return now - parsed_stats_period
50 except OverflowError:
51 raise InvalidParams(f"Invalid statsPeriod: {stats_period!r}")
52
53
54 def default_start_end_dates(
55 now: datetime.datetime | None = None,
56 default_stats_period: datetime.timedelta = MAX_STATS_PERIOD,
57 ) -> tuple[datetime.datetime, datetime.datetime]:
58 if now is None:
59 now = timezone.now()
60 return now - default_stats_period, now
61
62
63 @overload
64 def get_date_range_from_params(
65 params: dict[str, Any],
66 optional: Literal[False] = ...,
67 default_stats_period: datetime.timedelta = ...,
68 ) -> tuple[datetime.datetime, datetime.datetime]:
69 ...
70
71
72 @overload
73 def get_date_range_from_params(
74 params: dict[str, Any],
75 optional: bool = ...,
76 default_stats_period: datetime.timedelta = ...,
77 ) -> tuple[None, None] | tuple[datetime.datetime, datetime.datetime]:
78 ...
79
80
81 def get_date_range_from_params(
82 params: dict[str, Any],
83 optional: bool = False,
84 default_stats_period: datetime.timedelta = MAX_STATS_PERIOD,
85 ) -> tuple[None, None] | tuple[datetime.datetime, datetime.datetime]:
86 """
87 A wrapper function for `get_date_range_from_stats_period` that allows us
88 to alias `statsPeriod` to ensure backward compatibility.
89
90 If `timeframe` is passed then convert to a time delta and make sure it
91 fits within our min/max period length. Values are in the format
92 <number><period_type>, where period type is one of `s` (seconds),
93 `m` (minutes), `h` (hours) or `d` (days).
94
95 Similarly, `timeframeStart` and `timeframeEnd` allow for selecting a
96 relative range, for example: 15 days ago through 8 days ago. This uses the same
97 format as `statsPeriod`.
98
99 :param params:
100 If `start` end `end` are passed, validate them, convert to `datetime` and
101 returns them if valid.
102 :param optional: When True, if no params passed then return `(None, None)`.
103 :param default_stats_period: When set, this becomes the interval upon which default start
104 and end dates are defined
105 :return: A length 2 tuple containing start/end or raises an `InvalidParams`
106 exception
107 """
108 timeframe = params.get("timeframe")
109 timeframe_start = params.get("timeframeStart")
110 timeframe_end = params.get("timeframeEnd")
111
112 if timeframe is not None:
113 params["statsPeriod"] = timeframe
114
115 elif timeframe_start or timeframe_end:
116 if not all([timeframe_start, timeframe_end]):
117 raise InvalidParams("timeframeStart and timeframeEnd are both required")
118 else:
119 params["statsPeriodStart"] = timeframe_start
120 params["statsPeriodEnd"] = timeframe_end
121
122 return get_date_range_from_stats_period(
123 params, optional=optional, default_stats_period=default_stats_period
124 )
125
126
127 @overload
128 def get_date_range_from_stats_period(
129 params: dict[str, Any],
130 optional: Literal[False] = ...,
131 default_stats_period: datetime.timedelta = ...,
132 ) -> tuple[datetime.datetime, datetime.datetime]:
133 ...
134
135
136 @overload
137 def get_date_range_from_stats_period(
138 params: dict[str, Any],
139 optional: bool = ...,
140 default_stats_period: datetime.timedelta = ...,
141 ) -> tuple[None, None] | tuple[datetime.datetime, datetime.datetime]:
142 ...
143
144
145 def get_date_range_from_stats_period(
146 params: dict[str, Any],
147 optional: bool = False,
148 default_stats_period: datetime.timedelta = MAX_STATS_PERIOD,
149 ) -> tuple[None, None] | tuple[datetime.datetime, datetime.datetime]:
150 """
151 Gets a date range from standard date range params we pass to the api.
152
153 If `statsPeriod` is passed then convert to a time delta and make sure it
154 fits within our min/max period length. Values are in the format
155 <number><period_type>, where period type is one of `s` (seconds),
156 `m` (minutes), `h` (hours) or `d` (days).
157
158 Similarly, `statsPeriodStart` and `statsPeriodEnd` allow for selecting a
159 relative range, for example: 15 days ago through 8 days ago. This uses the same
160 format as `statsPeriod`
161
162 :param params:
163 If `start` end `end` are passed, validate them, convert to `datetime` and
164 returns them if valid.
165 :param optional: When True, if no params passed then return `(None, None)`.
166 :param default_stats_period: When set, this becomes the interval upon which default start
167 and end dates are defined
168 :return: A length 2 tuple containing start/end or raises an `InvalidParams`
169 exception
170 """
171 now = timezone.now()
172
173 start, end = default_start_end_dates(now, default_stats_period)
174
175 stats_period = params.get("statsPeriod")
176 stats_period_start = params.get("statsPeriodStart")
177 stats_period_end = params.get("statsPeriodEnd")
178
179 if stats_period is not None:
180 start = get_datetime_from_stats_period(stats_period, now)
181
182 elif stats_period_start or stats_period_end:
183 if not stats_period_start or not stats_period_end:
184 raise InvalidParams("statsPeriodStart and statsPeriodEnd are both required")
185 start = get_datetime_from_stats_period(stats_period_start, now)
186 end = get_datetime_from_stats_period(stats_period_end, now)
187
188 elif params.get("start") or params.get("end"):
189 if not all([params.get("start"), params.get("end")]):
190 raise InvalidParams("start and end are both required")
191 try:
192 start = parse_datetime_string(params["start"])
193 end = parse_datetime_string(params["end"])
194 except InvalidQuery as e:
195 raise InvalidParams(str(e))
196 elif optional:
197 return None, None
198
199 if start >= end:
200 raise InvalidParams("start must be before end")
201
202 return start, end
203
204
205 # The wide typing allows us to move towards RpcUserOrganizationContext in the future to save RPC calls.
206 # If you can use the wider more correct type, please do.
207 def is_member_disabled_from_limit(
208 request: Request,
209 organization: RpcUserOrganizationContext | RpcOrganization | Organization | int,
210 ) -> bool:
211 user = request.user
212
213 # never limit sentry apps
214 if getattr(user, "is_sentry_app", False):
215 return False
216
217 # don't limit super users
218 if is_active_superuser(request):
219 return False
220
221 # must be a simple user at this point
222
223 member: RpcOrganizationMember | None
224 if isinstance(organization, RpcUserOrganizationContext):
225 member = organization.member
226 else:
227 member = organization_service.check_membership_by_id(
228 organization_id=extract_id_from(organization), user_id=user.id
229 )
230 if member is None:
231 return False
232 else:
233 return member.flags.member_limit__restricted
234
235
236 def generate_organization_hostname(org_slug: str) -> str:
237 url_prefix_hostname: str = urlparse(options.get("system.url-prefix")).netloc
238 org_base_hostname_template: str = options.get("system.organization-base-hostname")
239 if not org_base_hostname_template:
240 return url_prefix_hostname
241 has_org_slug_placeholder = "{slug}" in org_base_hostname_template
242 if not has_org_slug_placeholder:
243 return url_prefix_hostname
244 org_hostname = org_base_hostname_template.replace("{slug}", org_slug)
245 return org_hostname
246
247
248 def generate_organization_url(org_slug: str) -> str:
249 org_url_template: str = options.get("system.organization-url-template")
250 if not org_url_template:
251 return options.get("system.url-prefix") # type: ignore[no-any-return]
252 return org_url_template.replace("{hostname}", generate_organization_hostname(org_slug))
253
254
255 def generate_region_url(region_name: str | None = None) -> str:
256 region_url_template: str | None = options.get("system.region-api-url-template")
257 if region_name is None:
258 region_name = options.get("system.region") or None
259 if not region_url_template or not region_name:
260 return options.get("system.url-prefix") # type: ignore[no-any-return]
261 return region_url_template.replace("{region}", region_name)
262
263
264 _path_patterns: List[Tuple[re.Pattern[str], str]] = [
265 # /organizations/slug/section, but not /organizations/new
266 (re.compile(r"\/?organizations\/(?!new)[^\/]+\/(.*)"), r"/\1"),
267 # For /settings/:orgId/ -> /settings/organization/
268 (
269 re.compile(r"\/settings\/(?!account)(?!projects)(?!teams)[^\/]+\/?$"),
270 "/settings/organization/",
271 ),
272 # Move /settings/:orgId/:section -> /settings/:section
273 # but not /settings/organization or /settings/projects which is a new URL
274 (re.compile(r"\/?settings\/(?!account)(?!projects)(?!teams)[^\/]+\/(.*)"), r"/settings/\1"),
275 (re.compile(r"\/?join-request\/[^\/]+\/?.*"), r"/join-request/"),
276 (re.compile(r"\/?onboarding\/[^\/]+\/(.*)"), r"/onboarding/\1"),
277 (re.compile(r"\/?[^\/]+\/([^\/]+)\/getting-started\/(.*)"), r"/getting-started/\1/\2"),
278 ]
279
280
281 def customer_domain_path(path: str) -> str:
282 """
283 Server side companion to path normalizations found in withDomainRequired
284 """
285 for pattern, replacement in _path_patterns:
286 updated = pattern.sub(replacement, path)
287 if updated != path:
288 return updated
289 return path
290
291
292 def method_dispatch(**dispatch_mapping): # type: ignore[no-untyped-def]
293 """
294 Dispatches a incoming request to a different handler based on the HTTP method
295
296 >>> url('^foo$', method_dispatch(POST = post_handler, GET = get_handler)))
297 """
298
299 def invalid_method(request, *args, **kwargs): # type: ignore[no-untyped-def]
300 return HttpResponseNotAllowed(dispatch_mapping.keys())
301
302 def dispatcher(request, *args, **kwargs): # type: ignore[no-untyped-def]
303 handler = dispatch_mapping.get(request.method, invalid_method)
304 return handler(request, *args, **kwargs)
305
306 if dispatch_mapping.get("csrf_exempt"):
307 return csrf_exempt(dispatcher)
308
309 return dispatcher
310
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/sentry/api/utils.py b/src/sentry/api/utils.py
--- a/src/sentry/api/utils.py
+++ b/src/sentry/api/utils.py
@@ -271,10 +271,13 @@
),
# Move /settings/:orgId/:section -> /settings/:section
# but not /settings/organization or /settings/projects which is a new URL
- (re.compile(r"\/?settings\/(?!account)(?!projects)(?!teams)[^\/]+\/(.*)"), r"/settings/\1"),
- (re.compile(r"\/?join-request\/[^\/]+\/?.*"), r"/join-request/"),
- (re.compile(r"\/?onboarding\/[^\/]+\/(.*)"), r"/onboarding/\1"),
- (re.compile(r"\/?[^\/]+\/([^\/]+)\/getting-started\/(.*)"), r"/getting-started/\1/\2"),
+ (re.compile(r"^\/?settings\/(?!account)(?!projects)(?!teams)[^\/]+\/(.*)"), r"/settings/\1"),
+ (re.compile(r"^\/?join-request\/[^\/]+\/?.*"), r"/join-request/"),
+ (re.compile(r"^\/?onboarding\/[^\/]+\/(.*)"), r"/onboarding/\1"),
+ (
+ re.compile(r"\/?(?:[^\/]+(?<!settings))\/([^\/]+)\/getting-started\/(.*)"),
+ r"/getting-started/\1/\2",
+ ),
]
| {"golden_diff": "diff --git a/src/sentry/api/utils.py b/src/sentry/api/utils.py\n--- a/src/sentry/api/utils.py\n+++ b/src/sentry/api/utils.py\n@@ -271,10 +271,13 @@\n ),\n # Move /settings/:orgId/:section -> /settings/:section\n # but not /settings/organization or /settings/projects which is a new URL\n- (re.compile(r\"\\/?settings\\/(?!account)(?!projects)(?!teams)[^\\/]+\\/(.*)\"), r\"/settings/\\1\"),\n- (re.compile(r\"\\/?join-request\\/[^\\/]+\\/?.*\"), r\"/join-request/\"),\n- (re.compile(r\"\\/?onboarding\\/[^\\/]+\\/(.*)\"), r\"/onboarding/\\1\"),\n- (re.compile(r\"\\/?[^\\/]+\\/([^\\/]+)\\/getting-started\\/(.*)\"), r\"/getting-started/\\1/\\2\"),\n+ (re.compile(r\"^\\/?settings\\/(?!account)(?!projects)(?!teams)[^\\/]+\\/(.*)\"), r\"/settings/\\1\"),\n+ (re.compile(r\"^\\/?join-request\\/[^\\/]+\\/?.*\"), r\"/join-request/\"),\n+ (re.compile(r\"^\\/?onboarding\\/[^\\/]+\\/(.*)\"), r\"/onboarding/\\1\"),\n+ (\n+ re.compile(r\"\\/?(?:[^\\/]+(?<!settings))\\/([^\\/]+)\\/getting-started\\/(.*)\"),\n+ r\"/getting-started/\\1/\\2\",\n+ ),\n ]\n", "issue": "Pege not found when configuring DSN keys for project name \"onboarding\"\n### Environment\n\nSaaS (https://sentry.io/)\n\n### Version\n\n_No response_\n\n### Link\n\n_No response_\n\n### DSN\n\n_No response_\n\n### Steps to Reproduce\n\n1. Create or rename a project to \"onboarding\"\r\n2. Go to Client Keys -> Configure\n\n### Expected Result\n\nOpen the Client key configuration page\n\n### Actual Result\n\nPage not found error, as in the screenshot below.\r\n\r\nCurrent workaround: rename the project.\r\n\r\nURL: https://dev-curumas.sentry.io/settings/projects/onboarding/a9217e8f8e01407a9b6df81101793546/\r\n\r\n\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport datetime\nimport logging\nimport re\nfrom datetime import timedelta\nfrom typing import Any, List, Literal, Tuple, overload\nfrom urllib.parse import urlparse\n\nfrom django.http import HttpResponseNotAllowed\nfrom django.utils import timezone\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.request import Request\n\nfrom sentry import options\n\n# Unfortunately, this function is imported as an export of this module in several places, keep it.\nfrom sentry.auth.access import get_cached_organization_member # noqa\nfrom sentry.auth.superuser import is_active_superuser\nfrom sentry.models.organization import Organization\nfrom sentry.search.utils import InvalidQuery, parse_datetime_string\nfrom sentry.services.hybrid_cloud import extract_id_from\nfrom sentry.services.hybrid_cloud.organization import (\n RpcOrganization,\n RpcOrganizationMember,\n RpcUserOrganizationContext,\n organization_service,\n)\nfrom sentry.utils.dates import parse_stats_period\n\nlogger = logging.getLogger(__name__)\n\nMAX_STATS_PERIOD = timedelta(days=90)\n\n\nclass InvalidParams(Exception):\n pass\n\n\ndef get_datetime_from_stats_period(\n stats_period: str, now: datetime.datetime | None = None\n) -> datetime.datetime:\n if now is None:\n now = timezone.now()\n parsed_stats_period = parse_stats_period(stats_period)\n if parsed_stats_period is None:\n raise InvalidParams(f\"Invalid statsPeriod: {stats_period!r}\")\n try:\n return now - parsed_stats_period\n except OverflowError:\n raise InvalidParams(f\"Invalid statsPeriod: {stats_period!r}\")\n\n\ndef default_start_end_dates(\n now: datetime.datetime | None = None,\n default_stats_period: datetime.timedelta = MAX_STATS_PERIOD,\n) -> tuple[datetime.datetime, datetime.datetime]:\n if now is None:\n now = timezone.now()\n return now - default_stats_period, now\n\n\n@overload\ndef get_date_range_from_params(\n params: dict[str, Any],\n optional: Literal[False] = ...,\n default_stats_period: datetime.timedelta = ...,\n) -> tuple[datetime.datetime, datetime.datetime]:\n ...\n\n\n@overload\ndef get_date_range_from_params(\n params: dict[str, Any],\n optional: bool = ...,\n default_stats_period: datetime.timedelta = ...,\n) -> tuple[None, None] | tuple[datetime.datetime, datetime.datetime]:\n ...\n\n\ndef get_date_range_from_params(\n params: dict[str, Any],\n optional: bool = False,\n default_stats_period: datetime.timedelta = MAX_STATS_PERIOD,\n) -> tuple[None, None] | tuple[datetime.datetime, datetime.datetime]:\n \"\"\"\n A wrapper function for `get_date_range_from_stats_period` that allows us\n to alias `statsPeriod` to ensure backward compatibility.\n\n If `timeframe` is passed then convert to a time delta and make sure it\n fits within our min/max period length. Values are in the format\n <number><period_type>, where period type is one of `s` (seconds),\n `m` (minutes), `h` (hours) or `d` (days).\n\n Similarly, `timeframeStart` and `timeframeEnd` allow for selecting a\n relative range, for example: 15 days ago through 8 days ago. This uses the same\n format as `statsPeriod`.\n\n :param params:\n If `start` end `end` are passed, validate them, convert to `datetime` and\n returns them if valid.\n :param optional: When True, if no params passed then return `(None, None)`.\n :param default_stats_period: When set, this becomes the interval upon which default start\n and end dates are defined\n :return: A length 2 tuple containing start/end or raises an `InvalidParams`\n exception\n \"\"\"\n timeframe = params.get(\"timeframe\")\n timeframe_start = params.get(\"timeframeStart\")\n timeframe_end = params.get(\"timeframeEnd\")\n\n if timeframe is not None:\n params[\"statsPeriod\"] = timeframe\n\n elif timeframe_start or timeframe_end:\n if not all([timeframe_start, timeframe_end]):\n raise InvalidParams(\"timeframeStart and timeframeEnd are both required\")\n else:\n params[\"statsPeriodStart\"] = timeframe_start\n params[\"statsPeriodEnd\"] = timeframe_end\n\n return get_date_range_from_stats_period(\n params, optional=optional, default_stats_period=default_stats_period\n )\n\n\n@overload\ndef get_date_range_from_stats_period(\n params: dict[str, Any],\n optional: Literal[False] = ...,\n default_stats_period: datetime.timedelta = ...,\n) -> tuple[datetime.datetime, datetime.datetime]:\n ...\n\n\n@overload\ndef get_date_range_from_stats_period(\n params: dict[str, Any],\n optional: bool = ...,\n default_stats_period: datetime.timedelta = ...,\n) -> tuple[None, None] | tuple[datetime.datetime, datetime.datetime]:\n ...\n\n\ndef get_date_range_from_stats_period(\n params: dict[str, Any],\n optional: bool = False,\n default_stats_period: datetime.timedelta = MAX_STATS_PERIOD,\n) -> tuple[None, None] | tuple[datetime.datetime, datetime.datetime]:\n \"\"\"\n Gets a date range from standard date range params we pass to the api.\n\n If `statsPeriod` is passed then convert to a time delta and make sure it\n fits within our min/max period length. Values are in the format\n <number><period_type>, where period type is one of `s` (seconds),\n `m` (minutes), `h` (hours) or `d` (days).\n\n Similarly, `statsPeriodStart` and `statsPeriodEnd` allow for selecting a\n relative range, for example: 15 days ago through 8 days ago. This uses the same\n format as `statsPeriod`\n\n :param params:\n If `start` end `end` are passed, validate them, convert to `datetime` and\n returns them if valid.\n :param optional: When True, if no params passed then return `(None, None)`.\n :param default_stats_period: When set, this becomes the interval upon which default start\n and end dates are defined\n :return: A length 2 tuple containing start/end or raises an `InvalidParams`\n exception\n \"\"\"\n now = timezone.now()\n\n start, end = default_start_end_dates(now, default_stats_period)\n\n stats_period = params.get(\"statsPeriod\")\n stats_period_start = params.get(\"statsPeriodStart\")\n stats_period_end = params.get(\"statsPeriodEnd\")\n\n if stats_period is not None:\n start = get_datetime_from_stats_period(stats_period, now)\n\n elif stats_period_start or stats_period_end:\n if not stats_period_start or not stats_period_end:\n raise InvalidParams(\"statsPeriodStart and statsPeriodEnd are both required\")\n start = get_datetime_from_stats_period(stats_period_start, now)\n end = get_datetime_from_stats_period(stats_period_end, now)\n\n elif params.get(\"start\") or params.get(\"end\"):\n if not all([params.get(\"start\"), params.get(\"end\")]):\n raise InvalidParams(\"start and end are both required\")\n try:\n start = parse_datetime_string(params[\"start\"])\n end = parse_datetime_string(params[\"end\"])\n except InvalidQuery as e:\n raise InvalidParams(str(e))\n elif optional:\n return None, None\n\n if start >= end:\n raise InvalidParams(\"start must be before end\")\n\n return start, end\n\n\n# The wide typing allows us to move towards RpcUserOrganizationContext in the future to save RPC calls.\n# If you can use the wider more correct type, please do.\ndef is_member_disabled_from_limit(\n request: Request,\n organization: RpcUserOrganizationContext | RpcOrganization | Organization | int,\n) -> bool:\n user = request.user\n\n # never limit sentry apps\n if getattr(user, \"is_sentry_app\", False):\n return False\n\n # don't limit super users\n if is_active_superuser(request):\n return False\n\n # must be a simple user at this point\n\n member: RpcOrganizationMember | None\n if isinstance(organization, RpcUserOrganizationContext):\n member = organization.member\n else:\n member = organization_service.check_membership_by_id(\n organization_id=extract_id_from(organization), user_id=user.id\n )\n if member is None:\n return False\n else:\n return member.flags.member_limit__restricted\n\n\ndef generate_organization_hostname(org_slug: str) -> str:\n url_prefix_hostname: str = urlparse(options.get(\"system.url-prefix\")).netloc\n org_base_hostname_template: str = options.get(\"system.organization-base-hostname\")\n if not org_base_hostname_template:\n return url_prefix_hostname\n has_org_slug_placeholder = \"{slug}\" in org_base_hostname_template\n if not has_org_slug_placeholder:\n return url_prefix_hostname\n org_hostname = org_base_hostname_template.replace(\"{slug}\", org_slug)\n return org_hostname\n\n\ndef generate_organization_url(org_slug: str) -> str:\n org_url_template: str = options.get(\"system.organization-url-template\")\n if not org_url_template:\n return options.get(\"system.url-prefix\") # type: ignore[no-any-return]\n return org_url_template.replace(\"{hostname}\", generate_organization_hostname(org_slug))\n\n\ndef generate_region_url(region_name: str | None = None) -> str:\n region_url_template: str | None = options.get(\"system.region-api-url-template\")\n if region_name is None:\n region_name = options.get(\"system.region\") or None\n if not region_url_template or not region_name:\n return options.get(\"system.url-prefix\") # type: ignore[no-any-return]\n return region_url_template.replace(\"{region}\", region_name)\n\n\n_path_patterns: List[Tuple[re.Pattern[str], str]] = [\n # /organizations/slug/section, but not /organizations/new\n (re.compile(r\"\\/?organizations\\/(?!new)[^\\/]+\\/(.*)\"), r\"/\\1\"),\n # For /settings/:orgId/ -> /settings/organization/\n (\n re.compile(r\"\\/settings\\/(?!account)(?!projects)(?!teams)[^\\/]+\\/?$\"),\n \"/settings/organization/\",\n ),\n # Move /settings/:orgId/:section -> /settings/:section\n # but not /settings/organization or /settings/projects which is a new URL\n (re.compile(r\"\\/?settings\\/(?!account)(?!projects)(?!teams)[^\\/]+\\/(.*)\"), r\"/settings/\\1\"),\n (re.compile(r\"\\/?join-request\\/[^\\/]+\\/?.*\"), r\"/join-request/\"),\n (re.compile(r\"\\/?onboarding\\/[^\\/]+\\/(.*)\"), r\"/onboarding/\\1\"),\n (re.compile(r\"\\/?[^\\/]+\\/([^\\/]+)\\/getting-started\\/(.*)\"), r\"/getting-started/\\1/\\2\"),\n]\n\n\ndef customer_domain_path(path: str) -> str:\n \"\"\"\n Server side companion to path normalizations found in withDomainRequired\n \"\"\"\n for pattern, replacement in _path_patterns:\n updated = pattern.sub(replacement, path)\n if updated != path:\n return updated\n return path\n\n\ndef method_dispatch(**dispatch_mapping): # type: ignore[no-untyped-def]\n \"\"\"\n Dispatches a incoming request to a different handler based on the HTTP method\n\n >>> url('^foo$', method_dispatch(POST = post_handler, GET = get_handler)))\n \"\"\"\n\n def invalid_method(request, *args, **kwargs): # type: ignore[no-untyped-def]\n return HttpResponseNotAllowed(dispatch_mapping.keys())\n\n def dispatcher(request, *args, **kwargs): # type: ignore[no-untyped-def]\n handler = dispatch_mapping.get(request.method, invalid_method)\n return handler(request, *args, **kwargs)\n\n if dispatch_mapping.get(\"csrf_exempt\"):\n return csrf_exempt(dispatcher)\n\n return dispatcher\n", "path": "src/sentry/api/utils.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport datetime\nimport logging\nimport re\nfrom datetime import timedelta\nfrom typing import Any, List, Literal, Tuple, overload\nfrom urllib.parse import urlparse\n\nfrom django.http import HttpResponseNotAllowed\nfrom django.utils import timezone\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.request import Request\n\nfrom sentry import options\n\n# Unfortunately, this function is imported as an export of this module in several places, keep it.\nfrom sentry.auth.access import get_cached_organization_member # noqa\nfrom sentry.auth.superuser import is_active_superuser\nfrom sentry.models.organization import Organization\nfrom sentry.search.utils import InvalidQuery, parse_datetime_string\nfrom sentry.services.hybrid_cloud import extract_id_from\nfrom sentry.services.hybrid_cloud.organization import (\n RpcOrganization,\n RpcOrganizationMember,\n RpcUserOrganizationContext,\n organization_service,\n)\nfrom sentry.utils.dates import parse_stats_period\n\nlogger = logging.getLogger(__name__)\n\nMAX_STATS_PERIOD = timedelta(days=90)\n\n\nclass InvalidParams(Exception):\n pass\n\n\ndef get_datetime_from_stats_period(\n stats_period: str, now: datetime.datetime | None = None\n) -> datetime.datetime:\n if now is None:\n now = timezone.now()\n parsed_stats_period = parse_stats_period(stats_period)\n if parsed_stats_period is None:\n raise InvalidParams(f\"Invalid statsPeriod: {stats_period!r}\")\n try:\n return now - parsed_stats_period\n except OverflowError:\n raise InvalidParams(f\"Invalid statsPeriod: {stats_period!r}\")\n\n\ndef default_start_end_dates(\n now: datetime.datetime | None = None,\n default_stats_period: datetime.timedelta = MAX_STATS_PERIOD,\n) -> tuple[datetime.datetime, datetime.datetime]:\n if now is None:\n now = timezone.now()\n return now - default_stats_period, now\n\n\n@overload\ndef get_date_range_from_params(\n params: dict[str, Any],\n optional: Literal[False] = ...,\n default_stats_period: datetime.timedelta = ...,\n) -> tuple[datetime.datetime, datetime.datetime]:\n ...\n\n\n@overload\ndef get_date_range_from_params(\n params: dict[str, Any],\n optional: bool = ...,\n default_stats_period: datetime.timedelta = ...,\n) -> tuple[None, None] | tuple[datetime.datetime, datetime.datetime]:\n ...\n\n\ndef get_date_range_from_params(\n params: dict[str, Any],\n optional: bool = False,\n default_stats_period: datetime.timedelta = MAX_STATS_PERIOD,\n) -> tuple[None, None] | tuple[datetime.datetime, datetime.datetime]:\n \"\"\"\n A wrapper function for `get_date_range_from_stats_period` that allows us\n to alias `statsPeriod` to ensure backward compatibility.\n\n If `timeframe` is passed then convert to a time delta and make sure it\n fits within our min/max period length. Values are in the format\n <number><period_type>, where period type is one of `s` (seconds),\n `m` (minutes), `h` (hours) or `d` (days).\n\n Similarly, `timeframeStart` and `timeframeEnd` allow for selecting a\n relative range, for example: 15 days ago through 8 days ago. This uses the same\n format as `statsPeriod`.\n\n :param params:\n If `start` end `end` are passed, validate them, convert to `datetime` and\n returns them if valid.\n :param optional: When True, if no params passed then return `(None, None)`.\n :param default_stats_period: When set, this becomes the interval upon which default start\n and end dates are defined\n :return: A length 2 tuple containing start/end or raises an `InvalidParams`\n exception\n \"\"\"\n timeframe = params.get(\"timeframe\")\n timeframe_start = params.get(\"timeframeStart\")\n timeframe_end = params.get(\"timeframeEnd\")\n\n if timeframe is not None:\n params[\"statsPeriod\"] = timeframe\n\n elif timeframe_start or timeframe_end:\n if not all([timeframe_start, timeframe_end]):\n raise InvalidParams(\"timeframeStart and timeframeEnd are both required\")\n else:\n params[\"statsPeriodStart\"] = timeframe_start\n params[\"statsPeriodEnd\"] = timeframe_end\n\n return get_date_range_from_stats_period(\n params, optional=optional, default_stats_period=default_stats_period\n )\n\n\n@overload\ndef get_date_range_from_stats_period(\n params: dict[str, Any],\n optional: Literal[False] = ...,\n default_stats_period: datetime.timedelta = ...,\n) -> tuple[datetime.datetime, datetime.datetime]:\n ...\n\n\n@overload\ndef get_date_range_from_stats_period(\n params: dict[str, Any],\n optional: bool = ...,\n default_stats_period: datetime.timedelta = ...,\n) -> tuple[None, None] | tuple[datetime.datetime, datetime.datetime]:\n ...\n\n\ndef get_date_range_from_stats_period(\n params: dict[str, Any],\n optional: bool = False,\n default_stats_period: datetime.timedelta = MAX_STATS_PERIOD,\n) -> tuple[None, None] | tuple[datetime.datetime, datetime.datetime]:\n \"\"\"\n Gets a date range from standard date range params we pass to the api.\n\n If `statsPeriod` is passed then convert to a time delta and make sure it\n fits within our min/max period length. Values are in the format\n <number><period_type>, where period type is one of `s` (seconds),\n `m` (minutes), `h` (hours) or `d` (days).\n\n Similarly, `statsPeriodStart` and `statsPeriodEnd` allow for selecting a\n relative range, for example: 15 days ago through 8 days ago. This uses the same\n format as `statsPeriod`\n\n :param params:\n If `start` end `end` are passed, validate them, convert to `datetime` and\n returns them if valid.\n :param optional: When True, if no params passed then return `(None, None)`.\n :param default_stats_period: When set, this becomes the interval upon which default start\n and end dates are defined\n :return: A length 2 tuple containing start/end or raises an `InvalidParams`\n exception\n \"\"\"\n now = timezone.now()\n\n start, end = default_start_end_dates(now, default_stats_period)\n\n stats_period = params.get(\"statsPeriod\")\n stats_period_start = params.get(\"statsPeriodStart\")\n stats_period_end = params.get(\"statsPeriodEnd\")\n\n if stats_period is not None:\n start = get_datetime_from_stats_period(stats_period, now)\n\n elif stats_period_start or stats_period_end:\n if not stats_period_start or not stats_period_end:\n raise InvalidParams(\"statsPeriodStart and statsPeriodEnd are both required\")\n start = get_datetime_from_stats_period(stats_period_start, now)\n end = get_datetime_from_stats_period(stats_period_end, now)\n\n elif params.get(\"start\") or params.get(\"end\"):\n if not all([params.get(\"start\"), params.get(\"end\")]):\n raise InvalidParams(\"start and end are both required\")\n try:\n start = parse_datetime_string(params[\"start\"])\n end = parse_datetime_string(params[\"end\"])\n except InvalidQuery as e:\n raise InvalidParams(str(e))\n elif optional:\n return None, None\n\n if start >= end:\n raise InvalidParams(\"start must be before end\")\n\n return start, end\n\n\n# The wide typing allows us to move towards RpcUserOrganizationContext in the future to save RPC calls.\n# If you can use the wider more correct type, please do.\ndef is_member_disabled_from_limit(\n request: Request,\n organization: RpcUserOrganizationContext | RpcOrganization | Organization | int,\n) -> bool:\n user = request.user\n\n # never limit sentry apps\n if getattr(user, \"is_sentry_app\", False):\n return False\n\n # don't limit super users\n if is_active_superuser(request):\n return False\n\n # must be a simple user at this point\n\n member: RpcOrganizationMember | None\n if isinstance(organization, RpcUserOrganizationContext):\n member = organization.member\n else:\n member = organization_service.check_membership_by_id(\n organization_id=extract_id_from(organization), user_id=user.id\n )\n if member is None:\n return False\n else:\n return member.flags.member_limit__restricted\n\n\ndef generate_organization_hostname(org_slug: str) -> str:\n url_prefix_hostname: str = urlparse(options.get(\"system.url-prefix\")).netloc\n org_base_hostname_template: str = options.get(\"system.organization-base-hostname\")\n if not org_base_hostname_template:\n return url_prefix_hostname\n has_org_slug_placeholder = \"{slug}\" in org_base_hostname_template\n if not has_org_slug_placeholder:\n return url_prefix_hostname\n org_hostname = org_base_hostname_template.replace(\"{slug}\", org_slug)\n return org_hostname\n\n\ndef generate_organization_url(org_slug: str) -> str:\n org_url_template: str = options.get(\"system.organization-url-template\")\n if not org_url_template:\n return options.get(\"system.url-prefix\") # type: ignore[no-any-return]\n return org_url_template.replace(\"{hostname}\", generate_organization_hostname(org_slug))\n\n\ndef generate_region_url(region_name: str | None = None) -> str:\n region_url_template: str | None = options.get(\"system.region-api-url-template\")\n if region_name is None:\n region_name = options.get(\"system.region\") or None\n if not region_url_template or not region_name:\n return options.get(\"system.url-prefix\") # type: ignore[no-any-return]\n return region_url_template.replace(\"{region}\", region_name)\n\n\n_path_patterns: List[Tuple[re.Pattern[str], str]] = [\n # /organizations/slug/section, but not /organizations/new\n (re.compile(r\"\\/?organizations\\/(?!new)[^\\/]+\\/(.*)\"), r\"/\\1\"),\n # For /settings/:orgId/ -> /settings/organization/\n (\n re.compile(r\"\\/settings\\/(?!account)(?!projects)(?!teams)[^\\/]+\\/?$\"),\n \"/settings/organization/\",\n ),\n # Move /settings/:orgId/:section -> /settings/:section\n # but not /settings/organization or /settings/projects which is a new URL\n (re.compile(r\"^\\/?settings\\/(?!account)(?!projects)(?!teams)[^\\/]+\\/(.*)\"), r\"/settings/\\1\"),\n (re.compile(r\"^\\/?join-request\\/[^\\/]+\\/?.*\"), r\"/join-request/\"),\n (re.compile(r\"^\\/?onboarding\\/[^\\/]+\\/(.*)\"), r\"/onboarding/\\1\"),\n (\n re.compile(r\"\\/?(?:[^\\/]+(?<!settings))\\/([^\\/]+)\\/getting-started\\/(.*)\"),\n r\"/getting-started/\\1/\\2\",\n ),\n]\n\n\ndef customer_domain_path(path: str) -> str:\n \"\"\"\n Server side companion to path normalizations found in withDomainRequired\n \"\"\"\n for pattern, replacement in _path_patterns:\n updated = pattern.sub(replacement, path)\n if updated != path:\n return updated\n return path\n\n\ndef method_dispatch(**dispatch_mapping): # type: ignore[no-untyped-def]\n \"\"\"\n Dispatches a incoming request to a different handler based on the HTTP method\n\n >>> url('^foo$', method_dispatch(POST = post_handler, GET = get_handler)))\n \"\"\"\n\n def invalid_method(request, *args, **kwargs): # type: ignore[no-untyped-def]\n return HttpResponseNotAllowed(dispatch_mapping.keys())\n\n def dispatcher(request, *args, **kwargs): # type: ignore[no-untyped-def]\n handler = dispatch_mapping.get(request.method, invalid_method)\n return handler(request, *args, **kwargs)\n\n if dispatch_mapping.get(\"csrf_exempt\"):\n return csrf_exempt(dispatcher)\n\n return dispatcher\n", "path": "src/sentry/api/utils.py"}]} | 3,923 | 341 |
gh_patches_debug_39427 | rasdani/github-patches | git_diff | sublimelsp__LSP-520 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Intelephense v1.0 released with a few issues
I'm happy to announce Intelephense v1.0 is released. More info can be found [here](https://github.com/bmewburn/intelephense-docs)
I'm not entirely sure if this is an LSP issue or a server issue.
When autocompleting method names I have to manually type the parenthesis and tooltip information seems messed up.

* OSX 10.14 - Intelephense 1.0
* Package Control
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugin/signature_help.py`
Content:
```
1 import mdpopups
2 import sublime
3 import sublime_plugin
4 import webbrowser
5 import re
6 import html
7
8 try:
9 from typing import Any, List, Dict, Optional
10 assert Any and List and Dict and Optional
11 except ImportError:
12 pass
13
14 from .core.configurations import is_supported_syntax
15 from .core.registry import config_for_scope, session_for_view, client_for_view
16 from .core.documents import get_document_position
17 from .core.events import global_events
18 from .core.protocol import Request
19 from .core.logging import debug
20 from .core.popups import popup_css, popup_class
21 from .core.settings import settings
22
23
24 class SignatureHelpListener(sublime_plugin.ViewEventListener):
25
26 def __init__(self, view):
27 self.view = view
28 self._initialized = False
29 self._signature_help_triggers = [] # type: List[str]
30 self._visible = False
31 self._language_id = ""
32 self._signatures = [] # type: List[Any]
33 self._active_signature = -1
34 self._active_parameter = -1
35
36 @classmethod
37 def is_applicable(cls, settings):
38 syntax = settings.get('syntax')
39 return syntax and is_supported_syntax(syntax)
40
41 def initialize(self):
42 session = session_for_view(self.view)
43 if session:
44 signatureHelpProvider = session.get_capability(
45 'signatureHelpProvider')
46 if signatureHelpProvider:
47 self._signature_help_triggers = signatureHelpProvider.get(
48 'triggerCharacters')
49
50 config = config_for_scope(self.view)
51 if config:
52 self._language_id = self._view_language(self.view, config.name)
53
54 self._initialized = True
55
56 def on_modified_async(self):
57 pos = self.view.sel()[0].begin()
58 # TODO: this will fire too often, narrow down using scopes or regex
59 if not self._initialized:
60 self.initialize()
61
62 if self._signature_help_triggers:
63 last_char = self.view.substr(pos - 1)
64 if last_char in self._signature_help_triggers:
65 self.request_signature_help(pos)
66 elif self._visible:
67 if last_char.isspace():
68 # Peek behind to find the last non-whitespace character.
69 last_char = self.view.substr(self.view.find_by_class(pos, False, ~0) - 1)
70 if last_char not in self._signature_help_triggers:
71 self.view.hide_popup()
72
73 def request_signature_help(self, point) -> None:
74 client = client_for_view(self.view)
75 if client:
76 global_events.publish("view.on_purge_changes", self.view)
77 document_position = get_document_position(self.view, point)
78 if document_position:
79 client.send_request(
80 Request.signatureHelp(document_position),
81 lambda response: self.handle_response(response, point))
82
83 def handle_response(self, response: 'Optional[Dict]', point) -> None:
84 if response is not None:
85 self._signatures = response.get("signatures", [])
86 self._active_signature = response.get("activeSignature", -1)
87 self._active_parameter = response.get("activeParameter", -1)
88
89 if self._signatures:
90 if not 0 <= self._active_signature < len(self._signatures):
91 debug("activeSignature {} not a valid index for signatures length {}".format(
92 self._active_signature, len(self._signatures)))
93 self._active_signature = 0
94 else:
95 if self._active_signature != -1:
96 debug("activeSignature should be -1 or null when no signatures are returned")
97 self._active_signature = -1
98
99 if len(self._signatures) > 0:
100 if self._visible:
101 self._update_popup()
102 else:
103 self._show_popup(point)
104
105 def on_query_context(self, key, _, operand, __):
106 if key != "lsp.signature_help":
107 return False # Let someone else handle this keybinding.
108 elif not self._visible:
109 if operand == 0:
110 self.request_signature_help(self.view.sel()[0].begin())
111 return True
112 else:
113 return False # Let someone else handle this keybinding.
114 elif len(self._signatures) < 2:
115 return False # Let someone else handle this keybinding.
116 else:
117 # We use the "operand" for the number -1 or +1. See the keybindings.
118 new_index = self._active_signature + operand
119
120 # clamp signature index
121 new_index = max(0, min(new_index, len(self._signatures) - 1))
122
123 # only update when changed
124 if new_index != self._active_signature:
125 self._active_signature = new_index
126 self._update_popup()
127
128 return True # We handled this keybinding.
129
130 def _show_popup(self, point: int) -> None:
131 mdpopups.show_popup(self.view,
132 self._build_popup_content(),
133 css=popup_css,
134 md=True,
135 flags=sublime.HIDE_ON_MOUSE_MOVE_AWAY,
136 location=point,
137 wrapper_class=popup_class,
138 max_width=800,
139 on_hide=self._on_hide,
140 on_navigate=self._on_hover_navigate)
141 self._visible = True
142
143 def _update_popup(self) -> None:
144 mdpopups.update_popup(self.view,
145 self._build_popup_content(),
146 css=popup_css,
147 md=True,
148 wrapper_class=popup_class)
149
150 def _build_popup_content(self) -> str:
151 if settings.highlight_active_signature_parameter:
152 return self._build_popup_content_style_vscode()
153 else:
154 # Default to "sublime".
155 return self._build_popup_content_style_sublime()
156
157 def _view_language(self, view: sublime.View, config_name: str) -> 'Optional[str]':
158 languages = view.settings().get('lsp_language')
159 return languages.get(config_name) if languages else None
160
161 def _on_hide(self):
162 self._visible = False
163
164 def _on_hover_navigate(self, href):
165 webbrowser.open_new_tab(href)
166
167 def _build_overload_selector(self) -> str:
168 return "**{}** of **{}** overloads (use the ↑ ↓ keys to navigate):\n".format(
169 str(self._active_signature + 1), str(len(self._signatures)))
170
171 def _build_popup_content_style_sublime(self) -> str:
172 signature = self._signatures[self._active_signature]
173 formatted = []
174
175 if len(self._signatures) > 1:
176 formatted.append(self._build_overload_selector())
177
178 signature_label = signature.get('label')
179 if len(signature_label) > 400:
180 label = "```{} ...```".format(signature_label[0:400]) # long code blocks = hangs
181 else:
182 label = "```{}\n{}\n```\n".format(self._language_id, signature_label)
183 formatted.append(label)
184
185 params = signature.get('parameters')
186 if params:
187 for parameter in params:
188 paramDocs = parameter.get('documentation', None)
189 if paramDocs:
190 formatted.append("**{}**\n".format(parameter.get('label')))
191 formatted.append("* *{}*\n".format(paramDocs))
192 sigDocs = signature.get('documentation', None)
193 if sigDocs:
194 formatted.append(sigDocs)
195 return "\n".join(formatted)
196
197 def _build_popup_content_style_vscode(self) -> str:
198 # Fetch all the relevant data.
199 signature_label = ""
200 signature_documentation = ""
201 parameter_label = ""
202 parameter_documentation = ""
203 if self._active_signature in range(0, len(self._signatures)):
204 signature = self._signatures[self._active_signature]
205 signature_label = html.escape(signature["label"], quote=False)
206 signature_documentation = signature.get("documentation", "") # Optional.
207 parameters = signature.get("parameters", None)
208 if parameters and self._active_parameter in range(0, len(parameters)):
209 parameter = parameters[self._active_parameter]
210 parameter_label = html.escape(parameter["label"], quote=False)
211 parameter_documentation = parameter.get("documentation", "") # Optional.
212
213 formatted = []
214
215 if len(self._signatures) > 1:
216 formatted.append(self._build_overload_selector())
217
218 # Write the active signature and give special treatment to the active parameter (if found).
219 # Note that this <div> class and the extra <pre> are copied from mdpopups' HTML output. When mdpopups changes
220 # its output style, we must update this literal string accordingly.
221 formatted.append('<div class="highlight"><pre>')
222 if parameter_label:
223 signature_label = self._replace_active_parameter(signature_label, parameter_label)
224 formatted.append(signature_label)
225 formatted.append("</pre></div>")
226
227 if parameter_documentation:
228 formatted.append(parameter_documentation)
229
230 if signature_documentation:
231 formatted.append(signature_documentation)
232
233 return "\n".join(formatted)
234
235 def _replace_active_parameter(self, signature: str, parameter: str) -> str:
236 if parameter[0].isalnum() and parameter[-1].isalnum():
237 pattern = r'\b{}\b'.format(re.escape(parameter))
238 else:
239 # If the left or right boundary of the parameter string is not an alphanumeric character, the \b check will
240 # never match. In this case, it's probably safe to assume the parameter string itself will be a good pattern
241 # to search for.
242 pattern = re.escape(parameter)
243 replacement = '<span style="font-weight: bold; text-decoration: underline">{}</span>'.format(parameter)
244 # FIXME: This is somewhat language-specific to look for an opening parenthesis. Most languages use parentheses
245 # for their parameter lists though.
246 start_of_param_list_pos = signature.find('(')
247 # Note that this works even when we don't find an opening parenthesis, because .find returns -1 in that case.
248 start_of_param_list = signature[start_of_param_list_pos + 1:]
249 return signature[:start_of_param_list_pos + 1] + re.sub(pattern, replacement, start_of_param_list, 1)
250
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugin/signature_help.py b/plugin/signature_help.py
--- a/plugin/signature_help.py
+++ b/plugin/signature_help.py
@@ -21,6 +21,22 @@
from .core.settings import settings
+def get_documentation(d: 'Dict[str, Any]') -> 'Optional[str]':
+ docs = d.get('documentation', None)
+ if docs is None:
+ return None
+ elif isinstance(docs, str):
+ # In older version of the protocol, documentation was just a string.
+ return docs
+ elif isinstance(docs, dict):
+ # This can be either "plaintext" or "markdown" format. For now, we can dump it into the popup box. It would
+ # be nice to handle the markdown in a special way.
+ return docs.get('value', None)
+ else:
+ debug('unknown documentation type:', str(d))
+ return None
+
+
class SignatureHelpListener(sublime_plugin.ViewEventListener):
def __init__(self, view):
@@ -185,10 +201,10 @@
params = signature.get('parameters')
if params:
for parameter in params:
- paramDocs = parameter.get('documentation', None)
- if paramDocs:
+ param_docs = get_documentation(parameter)
+ if param_docs:
formatted.append("**{}**\n".format(parameter.get('label')))
- formatted.append("* *{}*\n".format(paramDocs))
+ formatted.append("* *{}*\n".format(param_docs))
sigDocs = signature.get('documentation', None)
if sigDocs:
formatted.append(sigDocs)
@@ -197,18 +213,18 @@
def _build_popup_content_style_vscode(self) -> str:
# Fetch all the relevant data.
signature_label = ""
- signature_documentation = ""
+ signature_documentation = "" # type: Optional[str]
parameter_label = ""
- parameter_documentation = ""
+ parameter_documentation = "" # type: Optional[str]
if self._active_signature in range(0, len(self._signatures)):
signature = self._signatures[self._active_signature]
signature_label = html.escape(signature["label"], quote=False)
- signature_documentation = signature.get("documentation", "") # Optional.
+ signature_documentation = get_documentation(signature)
parameters = signature.get("parameters", None)
if parameters and self._active_parameter in range(0, len(parameters)):
parameter = parameters[self._active_parameter]
parameter_label = html.escape(parameter["label"], quote=False)
- parameter_documentation = parameter.get("documentation", "") # Optional.
+ parameter_documentation = get_documentation(parameter)
formatted = []
| {"golden_diff": "diff --git a/plugin/signature_help.py b/plugin/signature_help.py\n--- a/plugin/signature_help.py\n+++ b/plugin/signature_help.py\n@@ -21,6 +21,22 @@\n from .core.settings import settings\n \n \n+def get_documentation(d: 'Dict[str, Any]') -> 'Optional[str]':\n+ docs = d.get('documentation', None)\n+ if docs is None:\n+ return None\n+ elif isinstance(docs, str):\n+ # In older version of the protocol, documentation was just a string.\n+ return docs\n+ elif isinstance(docs, dict):\n+ # This can be either \"plaintext\" or \"markdown\" format. For now, we can dump it into the popup box. It would\n+ # be nice to handle the markdown in a special way.\n+ return docs.get('value', None)\n+ else:\n+ debug('unknown documentation type:', str(d))\n+ return None\n+\n+\n class SignatureHelpListener(sublime_plugin.ViewEventListener):\n \n def __init__(self, view):\n@@ -185,10 +201,10 @@\n params = signature.get('parameters')\n if params:\n for parameter in params:\n- paramDocs = parameter.get('documentation', None)\n- if paramDocs:\n+ param_docs = get_documentation(parameter)\n+ if param_docs:\n formatted.append(\"**{}**\\n\".format(parameter.get('label')))\n- formatted.append(\"* *{}*\\n\".format(paramDocs))\n+ formatted.append(\"* *{}*\\n\".format(param_docs))\n sigDocs = signature.get('documentation', None)\n if sigDocs:\n formatted.append(sigDocs)\n@@ -197,18 +213,18 @@\n def _build_popup_content_style_vscode(self) -> str:\n # Fetch all the relevant data.\n signature_label = \"\"\n- signature_documentation = \"\"\n+ signature_documentation = \"\" # type: Optional[str]\n parameter_label = \"\"\n- parameter_documentation = \"\"\n+ parameter_documentation = \"\" # type: Optional[str]\n if self._active_signature in range(0, len(self._signatures)):\n signature = self._signatures[self._active_signature]\n signature_label = html.escape(signature[\"label\"], quote=False)\n- signature_documentation = signature.get(\"documentation\", \"\") # Optional.\n+ signature_documentation = get_documentation(signature)\n parameters = signature.get(\"parameters\", None)\n if parameters and self._active_parameter in range(0, len(parameters)):\n parameter = parameters[self._active_parameter]\n parameter_label = html.escape(parameter[\"label\"], quote=False)\n- parameter_documentation = parameter.get(\"documentation\", \"\") # Optional.\n+ parameter_documentation = get_documentation(parameter)\n \n formatted = []\n", "issue": "Intelephense v1.0 released with a few issues\nI'm happy to announce Intelephense v1.0 is released. More info can be found [here](https://github.com/bmewburn/intelephense-docs)\r\n\r\nI'm not entirely sure if this is an LSP issue or a server issue.\r\n\r\nWhen autocompleting method names I have to manually type the parenthesis and tooltip information seems messed up.\r\n\r\n\r\n\r\n\r\n* OSX 10.14 - Intelephense 1.0\r\n* Package Control\n", "before_files": [{"content": "import mdpopups\nimport sublime\nimport sublime_plugin\nimport webbrowser\nimport re\nimport html\n\ntry:\n from typing import Any, List, Dict, Optional\n assert Any and List and Dict and Optional\nexcept ImportError:\n pass\n\nfrom .core.configurations import is_supported_syntax\nfrom .core.registry import config_for_scope, session_for_view, client_for_view\nfrom .core.documents import get_document_position\nfrom .core.events import global_events\nfrom .core.protocol import Request\nfrom .core.logging import debug\nfrom .core.popups import popup_css, popup_class\nfrom .core.settings import settings\n\n\nclass SignatureHelpListener(sublime_plugin.ViewEventListener):\n\n def __init__(self, view):\n self.view = view\n self._initialized = False\n self._signature_help_triggers = [] # type: List[str]\n self._visible = False\n self._language_id = \"\"\n self._signatures = [] # type: List[Any]\n self._active_signature = -1\n self._active_parameter = -1\n\n @classmethod\n def is_applicable(cls, settings):\n syntax = settings.get('syntax')\n return syntax and is_supported_syntax(syntax)\n\n def initialize(self):\n session = session_for_view(self.view)\n if session:\n signatureHelpProvider = session.get_capability(\n 'signatureHelpProvider')\n if signatureHelpProvider:\n self._signature_help_triggers = signatureHelpProvider.get(\n 'triggerCharacters')\n\n config = config_for_scope(self.view)\n if config:\n self._language_id = self._view_language(self.view, config.name)\n\n self._initialized = True\n\n def on_modified_async(self):\n pos = self.view.sel()[0].begin()\n # TODO: this will fire too often, narrow down using scopes or regex\n if not self._initialized:\n self.initialize()\n\n if self._signature_help_triggers:\n last_char = self.view.substr(pos - 1)\n if last_char in self._signature_help_triggers:\n self.request_signature_help(pos)\n elif self._visible:\n if last_char.isspace():\n # Peek behind to find the last non-whitespace character.\n last_char = self.view.substr(self.view.find_by_class(pos, False, ~0) - 1)\n if last_char not in self._signature_help_triggers:\n self.view.hide_popup()\n\n def request_signature_help(self, point) -> None:\n client = client_for_view(self.view)\n if client:\n global_events.publish(\"view.on_purge_changes\", self.view)\n document_position = get_document_position(self.view, point)\n if document_position:\n client.send_request(\n Request.signatureHelp(document_position),\n lambda response: self.handle_response(response, point))\n\n def handle_response(self, response: 'Optional[Dict]', point) -> None:\n if response is not None:\n self._signatures = response.get(\"signatures\", [])\n self._active_signature = response.get(\"activeSignature\", -1)\n self._active_parameter = response.get(\"activeParameter\", -1)\n\n if self._signatures:\n if not 0 <= self._active_signature < len(self._signatures):\n debug(\"activeSignature {} not a valid index for signatures length {}\".format(\n self._active_signature, len(self._signatures)))\n self._active_signature = 0\n else:\n if self._active_signature != -1:\n debug(\"activeSignature should be -1 or null when no signatures are returned\")\n self._active_signature = -1\n\n if len(self._signatures) > 0:\n if self._visible:\n self._update_popup()\n else:\n self._show_popup(point)\n\n def on_query_context(self, key, _, operand, __):\n if key != \"lsp.signature_help\":\n return False # Let someone else handle this keybinding.\n elif not self._visible:\n if operand == 0:\n self.request_signature_help(self.view.sel()[0].begin())\n return True\n else:\n return False # Let someone else handle this keybinding.\n elif len(self._signatures) < 2:\n return False # Let someone else handle this keybinding.\n else:\n # We use the \"operand\" for the number -1 or +1. See the keybindings.\n new_index = self._active_signature + operand\n\n # clamp signature index\n new_index = max(0, min(new_index, len(self._signatures) - 1))\n\n # only update when changed\n if new_index != self._active_signature:\n self._active_signature = new_index\n self._update_popup()\n\n return True # We handled this keybinding.\n\n def _show_popup(self, point: int) -> None:\n mdpopups.show_popup(self.view,\n self._build_popup_content(),\n css=popup_css,\n md=True,\n flags=sublime.HIDE_ON_MOUSE_MOVE_AWAY,\n location=point,\n wrapper_class=popup_class,\n max_width=800,\n on_hide=self._on_hide,\n on_navigate=self._on_hover_navigate)\n self._visible = True\n\n def _update_popup(self) -> None:\n mdpopups.update_popup(self.view,\n self._build_popup_content(),\n css=popup_css,\n md=True,\n wrapper_class=popup_class)\n\n def _build_popup_content(self) -> str:\n if settings.highlight_active_signature_parameter:\n return self._build_popup_content_style_vscode()\n else:\n # Default to \"sublime\".\n return self._build_popup_content_style_sublime()\n\n def _view_language(self, view: sublime.View, config_name: str) -> 'Optional[str]':\n languages = view.settings().get('lsp_language')\n return languages.get(config_name) if languages else None\n\n def _on_hide(self):\n self._visible = False\n\n def _on_hover_navigate(self, href):\n webbrowser.open_new_tab(href)\n\n def _build_overload_selector(self) -> str:\n return \"**{}** of **{}** overloads (use the \u2191 \u2193 keys to navigate):\\n\".format(\n str(self._active_signature + 1), str(len(self._signatures)))\n\n def _build_popup_content_style_sublime(self) -> str:\n signature = self._signatures[self._active_signature]\n formatted = []\n\n if len(self._signatures) > 1:\n formatted.append(self._build_overload_selector())\n\n signature_label = signature.get('label')\n if len(signature_label) > 400:\n label = \"```{} ...```\".format(signature_label[0:400]) # long code blocks = hangs\n else:\n label = \"```{}\\n{}\\n```\\n\".format(self._language_id, signature_label)\n formatted.append(label)\n\n params = signature.get('parameters')\n if params:\n for parameter in params:\n paramDocs = parameter.get('documentation', None)\n if paramDocs:\n formatted.append(\"**{}**\\n\".format(parameter.get('label')))\n formatted.append(\"* *{}*\\n\".format(paramDocs))\n sigDocs = signature.get('documentation', None)\n if sigDocs:\n formatted.append(sigDocs)\n return \"\\n\".join(formatted)\n\n def _build_popup_content_style_vscode(self) -> str:\n # Fetch all the relevant data.\n signature_label = \"\"\n signature_documentation = \"\"\n parameter_label = \"\"\n parameter_documentation = \"\"\n if self._active_signature in range(0, len(self._signatures)):\n signature = self._signatures[self._active_signature]\n signature_label = html.escape(signature[\"label\"], quote=False)\n signature_documentation = signature.get(\"documentation\", \"\") # Optional.\n parameters = signature.get(\"parameters\", None)\n if parameters and self._active_parameter in range(0, len(parameters)):\n parameter = parameters[self._active_parameter]\n parameter_label = html.escape(parameter[\"label\"], quote=False)\n parameter_documentation = parameter.get(\"documentation\", \"\") # Optional.\n\n formatted = []\n\n if len(self._signatures) > 1:\n formatted.append(self._build_overload_selector())\n\n # Write the active signature and give special treatment to the active parameter (if found).\n # Note that this <div> class and the extra <pre> are copied from mdpopups' HTML output. When mdpopups changes\n # its output style, we must update this literal string accordingly.\n formatted.append('<div class=\"highlight\"><pre>')\n if parameter_label:\n signature_label = self._replace_active_parameter(signature_label, parameter_label)\n formatted.append(signature_label)\n formatted.append(\"</pre></div>\")\n\n if parameter_documentation:\n formatted.append(parameter_documentation)\n\n if signature_documentation:\n formatted.append(signature_documentation)\n\n return \"\\n\".join(formatted)\n\n def _replace_active_parameter(self, signature: str, parameter: str) -> str:\n if parameter[0].isalnum() and parameter[-1].isalnum():\n pattern = r'\\b{}\\b'.format(re.escape(parameter))\n else:\n # If the left or right boundary of the parameter string is not an alphanumeric character, the \\b check will\n # never match. In this case, it's probably safe to assume the parameter string itself will be a good pattern\n # to search for.\n pattern = re.escape(parameter)\n replacement = '<span style=\"font-weight: bold; text-decoration: underline\">{}</span>'.format(parameter)\n # FIXME: This is somewhat language-specific to look for an opening parenthesis. Most languages use parentheses\n # for their parameter lists though.\n start_of_param_list_pos = signature.find('(')\n # Note that this works even when we don't find an opening parenthesis, because .find returns -1 in that case.\n start_of_param_list = signature[start_of_param_list_pos + 1:]\n return signature[:start_of_param_list_pos + 1] + re.sub(pattern, replacement, start_of_param_list, 1)\n", "path": "plugin/signature_help.py"}], "after_files": [{"content": "import mdpopups\nimport sublime\nimport sublime_plugin\nimport webbrowser\nimport re\nimport html\n\ntry:\n from typing import Any, List, Dict, Optional\n assert Any and List and Dict and Optional\nexcept ImportError:\n pass\n\nfrom .core.configurations import is_supported_syntax\nfrom .core.registry import config_for_scope, session_for_view, client_for_view\nfrom .core.documents import get_document_position\nfrom .core.events import global_events\nfrom .core.protocol import Request\nfrom .core.logging import debug\nfrom .core.popups import popup_css, popup_class\nfrom .core.settings import settings\n\n\ndef get_documentation(d: 'Dict[str, Any]') -> 'Optional[str]':\n docs = d.get('documentation', None)\n if docs is None:\n return None\n elif isinstance(docs, str):\n # In older version of the protocol, documentation was just a string.\n return docs\n elif isinstance(docs, dict):\n # This can be either \"plaintext\" or \"markdown\" format. For now, we can dump it into the popup box. It would\n # be nice to handle the markdown in a special way.\n return docs.get('value', None)\n else:\n debug('unknown documentation type:', str(d))\n return None\n\n\nclass SignatureHelpListener(sublime_plugin.ViewEventListener):\n\n def __init__(self, view):\n self.view = view\n self._initialized = False\n self._signature_help_triggers = [] # type: List[str]\n self._visible = False\n self._language_id = \"\"\n self._signatures = [] # type: List[Any]\n self._active_signature = -1\n self._active_parameter = -1\n\n @classmethod\n def is_applicable(cls, settings):\n syntax = settings.get('syntax')\n return syntax and is_supported_syntax(syntax)\n\n def initialize(self):\n session = session_for_view(self.view)\n if session:\n signatureHelpProvider = session.get_capability(\n 'signatureHelpProvider')\n if signatureHelpProvider:\n self._signature_help_triggers = signatureHelpProvider.get(\n 'triggerCharacters')\n\n config = config_for_scope(self.view)\n if config:\n self._language_id = self._view_language(self.view, config.name)\n\n self._initialized = True\n\n def on_modified_async(self):\n pos = self.view.sel()[0].begin()\n # TODO: this will fire too often, narrow down using scopes or regex\n if not self._initialized:\n self.initialize()\n\n if self._signature_help_triggers:\n last_char = self.view.substr(pos - 1)\n if last_char in self._signature_help_triggers:\n self.request_signature_help(pos)\n elif self._visible:\n if last_char.isspace():\n # Peek behind to find the last non-whitespace character.\n last_char = self.view.substr(self.view.find_by_class(pos, False, ~0) - 1)\n if last_char not in self._signature_help_triggers:\n self.view.hide_popup()\n\n def request_signature_help(self, point) -> None:\n client = client_for_view(self.view)\n if client:\n global_events.publish(\"view.on_purge_changes\", self.view)\n document_position = get_document_position(self.view, point)\n if document_position:\n client.send_request(\n Request.signatureHelp(document_position),\n lambda response: self.handle_response(response, point))\n\n def handle_response(self, response: 'Optional[Dict]', point) -> None:\n if response is not None:\n self._signatures = response.get(\"signatures\", [])\n self._active_signature = response.get(\"activeSignature\", -1)\n self._active_parameter = response.get(\"activeParameter\", -1)\n\n if self._signatures:\n if not 0 <= self._active_signature < len(self._signatures):\n debug(\"activeSignature {} not a valid index for signatures length {}\".format(\n self._active_signature, len(self._signatures)))\n self._active_signature = 0\n else:\n if self._active_signature != -1:\n debug(\"activeSignature should be -1 or null when no signatures are returned\")\n self._active_signature = -1\n\n if len(self._signatures) > 0:\n if self._visible:\n self._update_popup()\n else:\n self._show_popup(point)\n\n def on_query_context(self, key, _, operand, __):\n if key != \"lsp.signature_help\":\n return False # Let someone else handle this keybinding.\n elif not self._visible:\n if operand == 0:\n self.request_signature_help(self.view.sel()[0].begin())\n return True\n else:\n return False # Let someone else handle this keybinding.\n elif len(self._signatures) < 2:\n return False # Let someone else handle this keybinding.\n else:\n # We use the \"operand\" for the number -1 or +1. See the keybindings.\n new_index = self._active_signature + operand\n\n # clamp signature index\n new_index = max(0, min(new_index, len(self._signatures) - 1))\n\n # only update when changed\n if new_index != self._active_signature:\n self._active_signature = new_index\n self._update_popup()\n\n return True # We handled this keybinding.\n\n def _show_popup(self, point: int) -> None:\n mdpopups.show_popup(self.view,\n self._build_popup_content(),\n css=popup_css,\n md=True,\n flags=sublime.HIDE_ON_MOUSE_MOVE_AWAY,\n location=point,\n wrapper_class=popup_class,\n max_width=800,\n on_hide=self._on_hide,\n on_navigate=self._on_hover_navigate)\n self._visible = True\n\n def _update_popup(self) -> None:\n mdpopups.update_popup(self.view,\n self._build_popup_content(),\n css=popup_css,\n md=True,\n wrapper_class=popup_class)\n\n def _build_popup_content(self) -> str:\n if settings.highlight_active_signature_parameter:\n return self._build_popup_content_style_vscode()\n else:\n # Default to \"sublime\".\n return self._build_popup_content_style_sublime()\n\n def _view_language(self, view: sublime.View, config_name: str) -> 'Optional[str]':\n languages = view.settings().get('lsp_language')\n return languages.get(config_name) if languages else None\n\n def _on_hide(self):\n self._visible = False\n\n def _on_hover_navigate(self, href):\n webbrowser.open_new_tab(href)\n\n def _build_overload_selector(self) -> str:\n return \"**{}** of **{}** overloads (use the \u2191 \u2193 keys to navigate):\\n\".format(\n str(self._active_signature + 1), str(len(self._signatures)))\n\n def _build_popup_content_style_sublime(self) -> str:\n signature = self._signatures[self._active_signature]\n formatted = []\n\n if len(self._signatures) > 1:\n formatted.append(self._build_overload_selector())\n\n signature_label = signature.get('label')\n if len(signature_label) > 400:\n label = \"```{} ...```\".format(signature_label[0:400]) # long code blocks = hangs\n else:\n label = \"```{}\\n{}\\n```\\n\".format(self._language_id, signature_label)\n formatted.append(label)\n\n params = signature.get('parameters')\n if params:\n for parameter in params:\n param_docs = get_documentation(parameter)\n if param_docs:\n formatted.append(\"**{}**\\n\".format(parameter.get('label')))\n formatted.append(\"* *{}*\\n\".format(param_docs))\n sigDocs = signature.get('documentation', None)\n if sigDocs:\n formatted.append(sigDocs)\n return \"\\n\".join(formatted)\n\n def _build_popup_content_style_vscode(self) -> str:\n # Fetch all the relevant data.\n signature_label = \"\"\n signature_documentation = \"\" # type: Optional[str]\n parameter_label = \"\"\n parameter_documentation = \"\" # type: Optional[str]\n if self._active_signature in range(0, len(self._signatures)):\n signature = self._signatures[self._active_signature]\n signature_label = html.escape(signature[\"label\"], quote=False)\n signature_documentation = get_documentation(signature)\n parameters = signature.get(\"parameters\", None)\n if parameters and self._active_parameter in range(0, len(parameters)):\n parameter = parameters[self._active_parameter]\n parameter_label = html.escape(parameter[\"label\"], quote=False)\n parameter_documentation = get_documentation(parameter)\n\n formatted = []\n\n if len(self._signatures) > 1:\n formatted.append(self._build_overload_selector())\n\n # Write the active signature and give special treatment to the active parameter (if found).\n # Note that this <div> class and the extra <pre> are copied from mdpopups' HTML output. When mdpopups changes\n # its output style, we must update this literal string accordingly.\n formatted.append('<div class=\"highlight\"><pre>')\n if parameter_label:\n signature_label = self._replace_active_parameter(signature_label, parameter_label)\n formatted.append(signature_label)\n formatted.append(\"</pre></div>\")\n\n if parameter_documentation:\n formatted.append(parameter_documentation)\n\n if signature_documentation:\n formatted.append(signature_documentation)\n\n return \"\\n\".join(formatted)\n\n def _replace_active_parameter(self, signature: str, parameter: str) -> str:\n if parameter[0].isalnum() and parameter[-1].isalnum():\n pattern = r'\\b{}\\b'.format(re.escape(parameter))\n else:\n # If the left or right boundary of the parameter string is not an alphanumeric character, the \\b check will\n # never match. In this case, it's probably safe to assume the parameter string itself will be a good pattern\n # to search for.\n pattern = re.escape(parameter)\n replacement = '<span style=\"font-weight: bold; text-decoration: underline\">{}</span>'.format(parameter)\n # FIXME: This is somewhat language-specific to look for an opening parenthesis. Most languages use parentheses\n # for their parameter lists though.\n start_of_param_list_pos = signature.find('(')\n # Note that this works even when we don't find an opening parenthesis, because .find returns -1 in that case.\n start_of_param_list = signature[start_of_param_list_pos + 1:]\n return signature[:start_of_param_list_pos + 1] + re.sub(pattern, replacement, start_of_param_list, 1)\n", "path": "plugin/signature_help.py"}]} | 3,196 | 602 |
gh_patches_debug_11252 | rasdani/github-patches | git_diff | iterative__dvc-4462 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
experiments: show table includes all staged/stashed experiments instead of only the currently applicable ones
```
example-get-started git:executor-tree py:dvc ❯ dvc exp show --no-pager --include-params=featurize
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━┓
┃ Experiment ┃ auc ┃ featurize.max_features ┃ featurize.ngrams ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━┩
│ workspace │ 0.54175 │ 500 │ 5 │
│ bbdfa81 (2020-08-21 11:27:38) │ 0.54175 │ 500 │ 5 │
│ ├── ebbf40d (2020-08-21 11:28:42) │ 0.50822 │ 1500 │ 4 │
│ └── *32c3875 (2020-08-21 12:05:16) │ - │ 1500 │ 7 │
│ ├── *8cb834d (2020-08-21 12:04:59) │ - │ 1500 │ 2 │
│ ├── *32d107b (2020-08-21 12:05:01) │ - │ 1500 │ 5 │
│ └── *4f2c53c (2020-08-21 12:05:04) │ - │ 1500 │ 6 │
└────────────────────────────────────┴─────────┴────────────────────────┴──────────────────┘
```
the last 3 stashed experiments are derived from a different baseline commit and should be excluded by default (unless `--all-commit`/etc are used)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/repo/experiments/show.py`
Content:
```
1 import logging
2 import re
3 from collections import OrderedDict, defaultdict
4 from datetime import datetime
5
6 from dvc.repo import locked
7 from dvc.repo.metrics.show import _collect_metrics, _read_metrics
8 from dvc.repo.params.show import _collect_configs, _read_params
9
10 logger = logging.getLogger(__name__)
11
12
13 EXP_RE = re.compile(r"(?P<rev_sha>[a-f0-9]{7})-(?P<exp_sha>[a-f0-9]+)")
14
15
16 def _collect_experiment(repo, branch, stash=False):
17 res = defaultdict(dict)
18 for rev in repo.brancher(revs=[branch]):
19 if rev == "workspace":
20 res["timestamp"] = None
21 else:
22 commit = repo.scm.repo.rev_parse(rev)
23 res["timestamp"] = datetime.fromtimestamp(commit.committed_date)
24
25 configs = _collect_configs(repo)
26 params = _read_params(repo, configs, rev)
27 if params:
28 res["params"] = params
29
30 res["queued"] = stash
31 if not stash:
32 metrics = _collect_metrics(repo, None, False)
33 vals = _read_metrics(repo, metrics, rev)
34 res["metrics"] = vals
35
36 return res
37
38
39 @locked
40 def show(
41 repo, all_branches=False, all_tags=False, revs=None, all_commits=False
42 ):
43 res = defaultdict(OrderedDict)
44
45 if revs is None:
46 revs = [repo.scm.get_rev()]
47
48 revs = OrderedDict(
49 (rev, None)
50 for rev in repo.brancher(
51 revs=revs,
52 all_branches=all_branches,
53 all_tags=all_tags,
54 all_commits=all_commits,
55 )
56 )
57
58 for rev in revs:
59 res[rev]["baseline"] = _collect_experiment(repo, rev)
60
61 # collect reproduced experiments
62 for exp_branch in repo.experiments.scm.list_branches():
63 m = re.match(EXP_RE, exp_branch)
64 if m:
65 rev = repo.scm.resolve_rev(m.group("rev_sha"))
66 if rev in revs:
67 exp_rev = repo.experiments.scm.resolve_rev(exp_branch)
68 with repo.experiments.chdir():
69 experiment = _collect_experiment(
70 repo.experiments.exp_dvc, exp_branch
71 )
72 res[rev][exp_rev] = experiment
73
74 # collect queued (not yet reproduced) experiments
75 for stash_rev, (_, baseline_rev) in repo.experiments.stash_revs.items():
76 with repo.experiments.chdir():
77 experiment = _collect_experiment(
78 repo.experiments.exp_dvc, stash_rev, stash=True
79 )
80 res[baseline_rev][stash_rev] = experiment
81
82 return res
83
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dvc/repo/experiments/show.py b/dvc/repo/experiments/show.py
--- a/dvc/repo/experiments/show.py
+++ b/dvc/repo/experiments/show.py
@@ -73,10 +73,11 @@
# collect queued (not yet reproduced) experiments
for stash_rev, (_, baseline_rev) in repo.experiments.stash_revs.items():
- with repo.experiments.chdir():
- experiment = _collect_experiment(
- repo.experiments.exp_dvc, stash_rev, stash=True
- )
- res[baseline_rev][stash_rev] = experiment
+ if baseline_rev in revs:
+ with repo.experiments.chdir():
+ experiment = _collect_experiment(
+ repo.experiments.exp_dvc, stash_rev, stash=True
+ )
+ res[baseline_rev][stash_rev] = experiment
return res
| {"golden_diff": "diff --git a/dvc/repo/experiments/show.py b/dvc/repo/experiments/show.py\n--- a/dvc/repo/experiments/show.py\n+++ b/dvc/repo/experiments/show.py\n@@ -73,10 +73,11 @@\n \n # collect queued (not yet reproduced) experiments\n for stash_rev, (_, baseline_rev) in repo.experiments.stash_revs.items():\n- with repo.experiments.chdir():\n- experiment = _collect_experiment(\n- repo.experiments.exp_dvc, stash_rev, stash=True\n- )\n- res[baseline_rev][stash_rev] = experiment\n+ if baseline_rev in revs:\n+ with repo.experiments.chdir():\n+ experiment = _collect_experiment(\n+ repo.experiments.exp_dvc, stash_rev, stash=True\n+ )\n+ res[baseline_rev][stash_rev] = experiment\n \n return res\n", "issue": "experiments: show table includes all staged/stashed experiments instead of only the currently applicable ones\n```\r\nexample-get-started git:executor-tree py:dvc \u276f dvc exp show --no-pager --include-params=featurize\r\n\u250f\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2533\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2533\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2533\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2513\r\n\u2503 Experiment \u2503 auc \u2503 featurize.max_features \u2503 featurize.ngrams \u2503\r\n\u2521\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2547\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2547\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2547\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2529\r\n\u2502 workspace \u2502 0.54175 \u2502 500 \u2502 5 \u2502\r\n\u2502 bbdfa81 (2020-08-21 11:27:38) \u2502 0.54175 \u2502 500 \u2502 5 \u2502\r\n\u2502 \u251c\u2500\u2500 ebbf40d (2020-08-21 11:28:42) \u2502 0.50822 \u2502 1500 \u2502 4 \u2502\r\n\u2502 \u2514\u2500\u2500 *32c3875 (2020-08-21 12:05:16) \u2502 - \u2502 1500 \u2502 7 \u2502\r\n\u2502 \u251c\u2500\u2500 *8cb834d (2020-08-21 12:04:59) \u2502 - \u2502 1500 \u2502 2 \u2502\r\n\u2502 \u251c\u2500\u2500 *32d107b (2020-08-21 12:05:01) \u2502 - \u2502 1500 \u2502 5 \u2502\r\n\u2502 \u2514\u2500\u2500 *4f2c53c (2020-08-21 12:05:04) \u2502 - \u2502 1500 \u2502 6 \u2502\r\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\r\n```\r\n\r\nthe last 3 stashed experiments are derived from a different baseline commit and should be excluded by default (unless `--all-commit`/etc are used)\n", "before_files": [{"content": "import logging\nimport re\nfrom collections import OrderedDict, defaultdict\nfrom datetime import datetime\n\nfrom dvc.repo import locked\nfrom dvc.repo.metrics.show import _collect_metrics, _read_metrics\nfrom dvc.repo.params.show import _collect_configs, _read_params\n\nlogger = logging.getLogger(__name__)\n\n\nEXP_RE = re.compile(r\"(?P<rev_sha>[a-f0-9]{7})-(?P<exp_sha>[a-f0-9]+)\")\n\n\ndef _collect_experiment(repo, branch, stash=False):\n res = defaultdict(dict)\n for rev in repo.brancher(revs=[branch]):\n if rev == \"workspace\":\n res[\"timestamp\"] = None\n else:\n commit = repo.scm.repo.rev_parse(rev)\n res[\"timestamp\"] = datetime.fromtimestamp(commit.committed_date)\n\n configs = _collect_configs(repo)\n params = _read_params(repo, configs, rev)\n if params:\n res[\"params\"] = params\n\n res[\"queued\"] = stash\n if not stash:\n metrics = _collect_metrics(repo, None, False)\n vals = _read_metrics(repo, metrics, rev)\n res[\"metrics\"] = vals\n\n return res\n\n\n@locked\ndef show(\n repo, all_branches=False, all_tags=False, revs=None, all_commits=False\n):\n res = defaultdict(OrderedDict)\n\n if revs is None:\n revs = [repo.scm.get_rev()]\n\n revs = OrderedDict(\n (rev, None)\n for rev in repo.brancher(\n revs=revs,\n all_branches=all_branches,\n all_tags=all_tags,\n all_commits=all_commits,\n )\n )\n\n for rev in revs:\n res[rev][\"baseline\"] = _collect_experiment(repo, rev)\n\n # collect reproduced experiments\n for exp_branch in repo.experiments.scm.list_branches():\n m = re.match(EXP_RE, exp_branch)\n if m:\n rev = repo.scm.resolve_rev(m.group(\"rev_sha\"))\n if rev in revs:\n exp_rev = repo.experiments.scm.resolve_rev(exp_branch)\n with repo.experiments.chdir():\n experiment = _collect_experiment(\n repo.experiments.exp_dvc, exp_branch\n )\n res[rev][exp_rev] = experiment\n\n # collect queued (not yet reproduced) experiments\n for stash_rev, (_, baseline_rev) in repo.experiments.stash_revs.items():\n with repo.experiments.chdir():\n experiment = _collect_experiment(\n repo.experiments.exp_dvc, stash_rev, stash=True\n )\n res[baseline_rev][stash_rev] = experiment\n\n return res\n", "path": "dvc/repo/experiments/show.py"}], "after_files": [{"content": "import logging\nimport re\nfrom collections import OrderedDict, defaultdict\nfrom datetime import datetime\n\nfrom dvc.repo import locked\nfrom dvc.repo.metrics.show import _collect_metrics, _read_metrics\nfrom dvc.repo.params.show import _collect_configs, _read_params\n\nlogger = logging.getLogger(__name__)\n\n\nEXP_RE = re.compile(r\"(?P<rev_sha>[a-f0-9]{7})-(?P<exp_sha>[a-f0-9]+)\")\n\n\ndef _collect_experiment(repo, branch, stash=False):\n res = defaultdict(dict)\n for rev in repo.brancher(revs=[branch]):\n if rev == \"workspace\":\n res[\"timestamp\"] = None\n else:\n commit = repo.scm.repo.rev_parse(rev)\n res[\"timestamp\"] = datetime.fromtimestamp(commit.committed_date)\n\n configs = _collect_configs(repo)\n params = _read_params(repo, configs, rev)\n if params:\n res[\"params\"] = params\n\n res[\"queued\"] = stash\n if not stash:\n metrics = _collect_metrics(repo, None, False)\n vals = _read_metrics(repo, metrics, rev)\n res[\"metrics\"] = vals\n\n return res\n\n\n@locked\ndef show(\n repo, all_branches=False, all_tags=False, revs=None, all_commits=False\n):\n res = defaultdict(OrderedDict)\n\n if revs is None:\n revs = [repo.scm.get_rev()]\n\n revs = OrderedDict(\n (rev, None)\n for rev in repo.brancher(\n revs=revs,\n all_branches=all_branches,\n all_tags=all_tags,\n all_commits=all_commits,\n )\n )\n\n for rev in revs:\n res[rev][\"baseline\"] = _collect_experiment(repo, rev)\n\n # collect reproduced experiments\n for exp_branch in repo.experiments.scm.list_branches():\n m = re.match(EXP_RE, exp_branch)\n if m:\n rev = repo.scm.resolve_rev(m.group(\"rev_sha\"))\n if rev in revs:\n exp_rev = repo.experiments.scm.resolve_rev(exp_branch)\n with repo.experiments.chdir():\n experiment = _collect_experiment(\n repo.experiments.exp_dvc, exp_branch\n )\n res[rev][exp_rev] = experiment\n\n # collect queued (not yet reproduced) experiments\n for stash_rev, (_, baseline_rev) in repo.experiments.stash_revs.items():\n if baseline_rev in revs:\n with repo.experiments.chdir():\n experiment = _collect_experiment(\n repo.experiments.exp_dvc, stash_rev, stash=True\n )\n res[baseline_rev][stash_rev] = experiment\n\n return res\n", "path": "dvc/repo/experiments/show.py"}]} | 1,546 | 196 |
gh_patches_debug_7067 | rasdani/github-patches | git_diff | ansible-collections__community.vmware-1634 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Missing Quotation mark in vmware_object_rename error message
<!--- Verify first that your issue is not already reported on GitHub -->
<!--- Also test if the latest release and devel branch are affected too -->
<!--- Complete *all* sections as described, this form is processed automatically -->
##### SUMMARY
A question mark before `object type` is missing
https://github.com/ansible-collections/community.vmware/blob/b5eb08db178ead605a0f233f8a88ef6435b5c709/plugins/modules/vmware_object_rename.py#L275
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
vmware_object_rename
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/modules/vmware_object_rename.py`
Content:
```
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # Copyright: (c) 2019, Ansible Project
5 # Copyright: (c) 2019, Abhijeet Kasurde <[email protected]>
6 # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
7 # SPDX-License-Identifier: GPL-3.0-or-later
8
9 from __future__ import absolute_import, division, print_function
10 __metaclass__ = type
11
12 DOCUMENTATION = r'''
13 ---
14 module: vmware_object_rename
15 short_description: Renames VMware objects
16 description:
17 - This module can be used to rename VMware objects.
18 - All variables and VMware object names are case sensitive.
19 - Renaming Host and Network is not supported by VMware APIs.
20 author:
21 - Abhijeet Kasurde (@Akasurde)
22 requirements:
23 - vSphere Automation SDK
24 options:
25 object_type:
26 description:
27 - Type of object to work with.
28 - Valid options are Cluster, ClusterComputeResource, Datacenter, Datastore, Folder, ResourcePool, VM or VirtualMachine.
29 required: True
30 type: str
31 choices:
32 - 'ClusterComputeResource'
33 - 'Cluster'
34 - 'Datacenter'
35 - 'Datastore'
36 - 'Folder'
37 - 'Network'
38 - 'ResourcePool'
39 - 'VM'
40 - 'VirtualMachine'
41 object_name:
42 description:
43 - Name of the object to work with.
44 - Mutually exclusive with C(object_moid).
45 type: str
46 object_moid:
47 description:
48 - Managed object id of the VMware object to work with.
49 - Mutually exclusive with C(object_name).
50 type: str
51 new_name:
52 description:
53 - New name for VMware object.
54 required: True
55 aliases: ['object_new_name']
56 type: str
57 extends_documentation_fragment:
58 - community.vmware.vmware_rest_client.documentation
59 '''
60
61 EXAMPLES = r'''
62 - name: Rename a virtual machine
63 community.vmware.vmware_object_rename:
64 hostname: '{{ vcenter_hostname }}'
65 username: '{{ vcenter_username }}'
66 password: '{{ vcenter_password }}'
67 new_name: Fedora_31
68 object_name: Fedora_VM
69 object_type: VirtualMachine
70 delegate_to: localhost
71
72 - name: Rename a virtual machine using moid
73 community.vmware.vmware_object_rename:
74 hostname: '{{ vcenter_hostname }}'
75 username: '{{ vcenter_username }}'
76 password: '{{ vcenter_password }}'
77 new_name: Fedora_31
78 object_moid: vm-14
79 object_type: VirtualMachine
80 delegate_to: localhost
81
82 - name: Rename a datacenter
83 community.vmware.vmware_object_rename:
84 hostname: '{{ vcenter_hostname }}'
85 username: '{{ vcenter_username }}'
86 password: '{{ vcenter_password }}'
87 new_name: Asia_Datacenter
88 object_name: dc1
89 object_type: Datacenter
90 delegate_to: localhost
91
92 - name: Rename a folder with moid
93 community.vmware.vmware_object_rename:
94 hostname: '{{ vcenter_hostname }}'
95 username: '{{ vcenter_username }}'
96 password: '{{ vcenter_password }}'
97 new_name: backup
98 object_moid: group-v46
99 object_type: Folder
100 delegate_to: localhost
101
102 - name: Rename a cluster with moid
103 community.vmware.vmware_object_rename:
104 hostname: '{{ vcenter_hostname }}'
105 username: '{{ vcenter_username }}'
106 password: '{{ vcenter_password }}'
107 new_name: CCR_1
108 object_moid: domain-c33
109 object_type: Cluster
110 delegate_to: localhost
111 '''
112
113 RETURN = r'''
114 rename_status:
115 description: metadata about VMware object rename operation
116 returned: on success
117 type: dict
118 sample: {
119 "current_name": "Fedora_31",
120 "desired_name": "Fedora_31",
121 "previous_name": "Fedora_VM",
122 }
123 '''
124
125 from ansible.module_utils.basic import AnsibleModule
126 from ansible.module_utils._text import to_native
127 from ansible_collections.community.vmware.plugins.module_utils.vmware_rest_client import VmwareRestClient
128 from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, wait_for_task
129 try:
130 from pyVmomi import vim
131 except ImportError:
132 pass
133
134
135 class VmwareObjectRename(VmwareRestClient):
136 def __init__(self, module):
137 """
138 Constructor
139 """
140 super(VmwareObjectRename, self).__init__(module)
141 self.pyv = PyVmomi(module=module)
142 self.soap_stub = self.pyv.si._stub
143
144 self.object_type = self.params.get('object_type')
145 self.object_name = self.params.get('object_name')
146 self.object_new_name = self.params.get('new_name')
147 self.object_moid = self.params.get('object_moid')
148
149 self.managed_object = None
150
151 def ensure_state(self):
152 """
153 Manage the internal state of object rename operation
154
155 """
156 results = dict(
157 changed=False,
158 rename_status=dict(),
159 )
160
161 results['rename_status']['desired_name'] = self.object_new_name
162 changed = False
163
164 vcenter_obj = self.api_client.vcenter
165 available_object_types = [i for i in dir(vcenter_obj) if hasattr(getattr(vcenter_obj, i), 'list') and i != 'Host']
166 available_object_types += ['ClusterComputeResource', 'VirtualMachine']
167
168 if self.object_type not in available_object_types:
169 self.module.fail_json(msg="Object type can be any"
170 " one of [%s]" % ", ".join(available_object_types))
171
172 valid_object_types = {
173 'ClusterComputeResource': [
174 vcenter_obj.Cluster,
175 vim.ClusterComputeResource,
176 'cluster',
177 ],
178 'Cluster': [
179 vcenter_obj.Cluster,
180 vim.ClusterComputeResource,
181 'cluster',
182 ],
183 'Datacenter': [
184 vcenter_obj.Datacenter,
185 vim.Datacenter,
186 'datacenter',
187 ],
188 'Datastore': [
189 vcenter_obj.Datastore,
190 vim.Datastore,
191 'datastore',
192 ],
193 'Folder': [
194 vcenter_obj.Folder,
195 vim.Folder,
196 'folder',
197 ],
198 'Network': [
199 vcenter_obj.Network,
200 vim.ClusterComputeResource,
201 'network',
202 ],
203 'ResourcePool': [
204 vcenter_obj.ResourcePool,
205 vim.ResourcePool,
206 'resource_pool'
207 ],
208 'VM': [
209 vcenter_obj.VM,
210 vim.VirtualMachine,
211 'vm',
212 ],
213 'VirtualMachine': [
214 vcenter_obj.VM,
215 vim.VirtualMachine,
216 'vm',
217 ],
218 }
219
220 target_object = valid_object_types[self.object_type][0]
221
222 # Generate filter spec.
223 # List method will be used in getting objects.
224 # List method can get only up to a max of 1,000 objects.
225 # If VCSA has than 1,000 objects with the specified object_type, the following error will occur.
226 # Error: Too many virtual machines. Add more filter criteria to reduce the number.
227 # To resolve the error, the list method should use the filter spec, to be less than 1,000 objects.
228 filter_spec = target_object.FilterSpec()
229 if self.object_moid:
230 # Make a filter for moid if you specify object_moid.
231 # The moid is a unique id, so get one object if target moid object exists in the vSphere environment.
232 if target_object is vcenter_obj.Datacenter:
233 filter_spec.datacenters = set([self.object_moid])
234
235 if target_object is vcenter_obj.Cluster:
236 filter_spec.clusters = set([self.object_moid])
237
238 if target_object is vcenter_obj.ResourcePool:
239 filter_spec.resource_pools = set([self.object_moid])
240
241 if target_object is vcenter_obj.Folder:
242 filter_spec.folders = set([self.object_moid])
243
244 if target_object is vcenter_obj.VM:
245 filter_spec.vms = set([self.object_moid])
246
247 if target_object is vcenter_obj.Network:
248 filter_spec.networks = set([self.object_moid])
249
250 if target_object is vcenter_obj.Datastore:
251 filter_spec.datastores = set([self.object_moid])
252 else:
253 # If you use object_name parameter, an object will filter with names.
254 filter_spec.names = set([self.object_name])
255
256 # Get an object for changing the object name.
257 all_vmware_objs = target_object.list(filter_spec)
258
259 # Ensure whether already exists an object in the same object_new_name name.
260 existing_obj_moid = None
261 if self.object_moid:
262 if all_vmware_objs:
263 # Ensure whether the same object name as object_new_name.
264 if all_vmware_objs[0].name == self.object_new_name:
265 existing_obj_moid = all_vmware_objs
266 else:
267 existing_obj_moid = target_object.list(target_object.FilterSpec(names=set([self.object_new_name])))
268 if existing_obj_moid:
269 # Object with same name already exists
270 results['rename_status']['current_name'] = results['rename_status']['previous_name'] = self.object_new_name
271 results['changed'] = False
272 self.module.exit_json(**results)
273
274 if not all_vmware_objs:
275 msg = "Failed to find object with %s '%s' and %s' object type"
276 if self.object_name:
277 msg = msg % ('name', self.object_name, self.object_type)
278 elif self.object_moid:
279 msg = msg % ('moid', self.object_moid, self.object_type)
280 self.module.fail_json(msg=msg)
281
282 obj_moid = getattr(all_vmware_objs[0], valid_object_types[self.object_type][2])
283 vmware_obj = valid_object_types[self.object_type][1](obj_moid, self.soap_stub)
284
285 if not vmware_obj:
286 msg = "Failed to create VMware object with object %s %s"
287 if self.object_name:
288 msg = msg % ('name', self.object_name)
289 elif self.object_moid:
290 msg = msg % ('moid', self.object_moid)
291 self.module.fail_json(msg=msg)
292
293 try:
294 results['rename_status']['previous_name'] = vmware_obj.name
295 if not self.module.check_mode:
296 task = vmware_obj.Rename_Task(self.object_new_name)
297 wait_for_task(task)
298 changed = True
299 results['rename_status']['current_name'] = vmware_obj.name
300 except Exception as e:
301 msg = to_native(e)
302 if hasattr(e, 'msg'):
303 msg = to_native(e.msg)
304 self.module.fail_json(msg=msg)
305
306 results['changed'] = changed
307 self.module.exit_json(**results)
308
309
310 def main():
311 argument_spec = VmwareRestClient.vmware_client_argument_spec()
312 argument_spec.update(
313 object_name=dict(),
314 object_moid=dict(),
315 new_name=dict(aliases=['object_new_name'], required=True),
316 object_type=dict(type='str', required=True, choices=['ClusterComputeResource', 'Cluster', 'Datacenter',
317 'Datastore', 'Folder', 'Network', 'ResourcePool', 'VM',
318 'VirtualMachine'])
319 )
320 module = AnsibleModule(
321 argument_spec=argument_spec,
322 mutually_exclusive=[
323 ['object_name', 'object_moid'],
324 ]
325 )
326
327 vmware_object_rename = VmwareObjectRename(module)
328 vmware_object_rename.ensure_state()
329
330
331 if __name__ == '__main__':
332 main()
333
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugins/modules/vmware_object_rename.py b/plugins/modules/vmware_object_rename.py
--- a/plugins/modules/vmware_object_rename.py
+++ b/plugins/modules/vmware_object_rename.py
@@ -272,7 +272,7 @@
self.module.exit_json(**results)
if not all_vmware_objs:
- msg = "Failed to find object with %s '%s' and %s' object type"
+ msg = "Failed to find object with %s '%s' and '%s' object type"
if self.object_name:
msg = msg % ('name', self.object_name, self.object_type)
elif self.object_moid:
| {"golden_diff": "diff --git a/plugins/modules/vmware_object_rename.py b/plugins/modules/vmware_object_rename.py\n--- a/plugins/modules/vmware_object_rename.py\n+++ b/plugins/modules/vmware_object_rename.py\n@@ -272,7 +272,7 @@\n self.module.exit_json(**results)\n \n if not all_vmware_objs:\n- msg = \"Failed to find object with %s '%s' and %s' object type\"\n+ msg = \"Failed to find object with %s '%s' and '%s' object type\"\n if self.object_name:\n msg = msg % ('name', self.object_name, self.object_type)\n elif self.object_moid:\n", "issue": "Missing Quotation mark in vmware_object_rename error message\n<!--- Verify first that your issue is not already reported on GitHub -->\r\n<!--- Also test if the latest release and devel branch are affected too -->\r\n<!--- Complete *all* sections as described, this form is processed automatically -->\r\n\r\n##### SUMMARY\r\nA question mark before `object type` is missing\r\n\r\nhttps://github.com/ansible-collections/community.vmware/blob/b5eb08db178ead605a0f233f8a88ef6435b5c709/plugins/modules/vmware_object_rename.py#L275\r\n\r\n##### ISSUE TYPE\r\n- Bug Report\r\n\r\n##### COMPONENT NAME\r\nvmware_object_rename\r\n\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2019, Ansible Project\n# Copyright: (c) 2019, Abhijeet Kasurde <[email protected]>\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nDOCUMENTATION = r'''\n---\nmodule: vmware_object_rename\nshort_description: Renames VMware objects\ndescription:\n- This module can be used to rename VMware objects.\n- All variables and VMware object names are case sensitive.\n- Renaming Host and Network is not supported by VMware APIs.\nauthor:\n- Abhijeet Kasurde (@Akasurde)\nrequirements:\n- vSphere Automation SDK\noptions:\n object_type:\n description:\n - Type of object to work with.\n - Valid options are Cluster, ClusterComputeResource, Datacenter, Datastore, Folder, ResourcePool, VM or VirtualMachine.\n required: True\n type: str\n choices:\n - 'ClusterComputeResource'\n - 'Cluster'\n - 'Datacenter'\n - 'Datastore'\n - 'Folder'\n - 'Network'\n - 'ResourcePool'\n - 'VM'\n - 'VirtualMachine'\n object_name:\n description:\n - Name of the object to work with.\n - Mutually exclusive with C(object_moid).\n type: str\n object_moid:\n description:\n - Managed object id of the VMware object to work with.\n - Mutually exclusive with C(object_name).\n type: str\n new_name:\n description:\n - New name for VMware object.\n required: True\n aliases: ['object_new_name']\n type: str\nextends_documentation_fragment:\n- community.vmware.vmware_rest_client.documentation\n'''\n\nEXAMPLES = r'''\n- name: Rename a virtual machine\n community.vmware.vmware_object_rename:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n new_name: Fedora_31\n object_name: Fedora_VM\n object_type: VirtualMachine\n delegate_to: localhost\n\n- name: Rename a virtual machine using moid\n community.vmware.vmware_object_rename:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n new_name: Fedora_31\n object_moid: vm-14\n object_type: VirtualMachine\n delegate_to: localhost\n\n- name: Rename a datacenter\n community.vmware.vmware_object_rename:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n new_name: Asia_Datacenter\n object_name: dc1\n object_type: Datacenter\n delegate_to: localhost\n\n- name: Rename a folder with moid\n community.vmware.vmware_object_rename:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n new_name: backup\n object_moid: group-v46\n object_type: Folder\n delegate_to: localhost\n\n- name: Rename a cluster with moid\n community.vmware.vmware_object_rename:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n new_name: CCR_1\n object_moid: domain-c33\n object_type: Cluster\n delegate_to: localhost\n'''\n\nRETURN = r'''\nrename_status:\n description: metadata about VMware object rename operation\n returned: on success\n type: dict\n sample: {\n \"current_name\": \"Fedora_31\",\n \"desired_name\": \"Fedora_31\",\n \"previous_name\": \"Fedora_VM\",\n }\n'''\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils._text import to_native\nfrom ansible_collections.community.vmware.plugins.module_utils.vmware_rest_client import VmwareRestClient\nfrom ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, wait_for_task\ntry:\n from pyVmomi import vim\nexcept ImportError:\n pass\n\n\nclass VmwareObjectRename(VmwareRestClient):\n def __init__(self, module):\n \"\"\"\n Constructor\n \"\"\"\n super(VmwareObjectRename, self).__init__(module)\n self.pyv = PyVmomi(module=module)\n self.soap_stub = self.pyv.si._stub\n\n self.object_type = self.params.get('object_type')\n self.object_name = self.params.get('object_name')\n self.object_new_name = self.params.get('new_name')\n self.object_moid = self.params.get('object_moid')\n\n self.managed_object = None\n\n def ensure_state(self):\n \"\"\"\n Manage the internal state of object rename operation\n\n \"\"\"\n results = dict(\n changed=False,\n rename_status=dict(),\n )\n\n results['rename_status']['desired_name'] = self.object_new_name\n changed = False\n\n vcenter_obj = self.api_client.vcenter\n available_object_types = [i for i in dir(vcenter_obj) if hasattr(getattr(vcenter_obj, i), 'list') and i != 'Host']\n available_object_types += ['ClusterComputeResource', 'VirtualMachine']\n\n if self.object_type not in available_object_types:\n self.module.fail_json(msg=\"Object type can be any\"\n \" one of [%s]\" % \", \".join(available_object_types))\n\n valid_object_types = {\n 'ClusterComputeResource': [\n vcenter_obj.Cluster,\n vim.ClusterComputeResource,\n 'cluster',\n ],\n 'Cluster': [\n vcenter_obj.Cluster,\n vim.ClusterComputeResource,\n 'cluster',\n ],\n 'Datacenter': [\n vcenter_obj.Datacenter,\n vim.Datacenter,\n 'datacenter',\n ],\n 'Datastore': [\n vcenter_obj.Datastore,\n vim.Datastore,\n 'datastore',\n ],\n 'Folder': [\n vcenter_obj.Folder,\n vim.Folder,\n 'folder',\n ],\n 'Network': [\n vcenter_obj.Network,\n vim.ClusterComputeResource,\n 'network',\n ],\n 'ResourcePool': [\n vcenter_obj.ResourcePool,\n vim.ResourcePool,\n 'resource_pool'\n ],\n 'VM': [\n vcenter_obj.VM,\n vim.VirtualMachine,\n 'vm',\n ],\n 'VirtualMachine': [\n vcenter_obj.VM,\n vim.VirtualMachine,\n 'vm',\n ],\n }\n\n target_object = valid_object_types[self.object_type][0]\n\n # Generate filter spec.\n # List method will be used in getting objects.\n # List method can get only up to a max of 1,000 objects.\n # If VCSA has than 1,000 objects with the specified object_type, the following error will occur.\n # Error: Too many virtual machines. Add more filter criteria to reduce the number.\n # To resolve the error, the list method should use the filter spec, to be less than 1,000 objects.\n filter_spec = target_object.FilterSpec()\n if self.object_moid:\n # Make a filter for moid if you specify object_moid.\n # The moid is a unique id, so get one object if target moid object exists in the vSphere environment.\n if target_object is vcenter_obj.Datacenter:\n filter_spec.datacenters = set([self.object_moid])\n\n if target_object is vcenter_obj.Cluster:\n filter_spec.clusters = set([self.object_moid])\n\n if target_object is vcenter_obj.ResourcePool:\n filter_spec.resource_pools = set([self.object_moid])\n\n if target_object is vcenter_obj.Folder:\n filter_spec.folders = set([self.object_moid])\n\n if target_object is vcenter_obj.VM:\n filter_spec.vms = set([self.object_moid])\n\n if target_object is vcenter_obj.Network:\n filter_spec.networks = set([self.object_moid])\n\n if target_object is vcenter_obj.Datastore:\n filter_spec.datastores = set([self.object_moid])\n else:\n # If you use object_name parameter, an object will filter with names.\n filter_spec.names = set([self.object_name])\n\n # Get an object for changing the object name.\n all_vmware_objs = target_object.list(filter_spec)\n\n # Ensure whether already exists an object in the same object_new_name name.\n existing_obj_moid = None\n if self.object_moid:\n if all_vmware_objs:\n # Ensure whether the same object name as object_new_name.\n if all_vmware_objs[0].name == self.object_new_name:\n existing_obj_moid = all_vmware_objs\n else:\n existing_obj_moid = target_object.list(target_object.FilterSpec(names=set([self.object_new_name])))\n if existing_obj_moid:\n # Object with same name already exists\n results['rename_status']['current_name'] = results['rename_status']['previous_name'] = self.object_new_name\n results['changed'] = False\n self.module.exit_json(**results)\n\n if not all_vmware_objs:\n msg = \"Failed to find object with %s '%s' and %s' object type\"\n if self.object_name:\n msg = msg % ('name', self.object_name, self.object_type)\n elif self.object_moid:\n msg = msg % ('moid', self.object_moid, self.object_type)\n self.module.fail_json(msg=msg)\n\n obj_moid = getattr(all_vmware_objs[0], valid_object_types[self.object_type][2])\n vmware_obj = valid_object_types[self.object_type][1](obj_moid, self.soap_stub)\n\n if not vmware_obj:\n msg = \"Failed to create VMware object with object %s %s\"\n if self.object_name:\n msg = msg % ('name', self.object_name)\n elif self.object_moid:\n msg = msg % ('moid', self.object_moid)\n self.module.fail_json(msg=msg)\n\n try:\n results['rename_status']['previous_name'] = vmware_obj.name\n if not self.module.check_mode:\n task = vmware_obj.Rename_Task(self.object_new_name)\n wait_for_task(task)\n changed = True\n results['rename_status']['current_name'] = vmware_obj.name\n except Exception as e:\n msg = to_native(e)\n if hasattr(e, 'msg'):\n msg = to_native(e.msg)\n self.module.fail_json(msg=msg)\n\n results['changed'] = changed\n self.module.exit_json(**results)\n\n\ndef main():\n argument_spec = VmwareRestClient.vmware_client_argument_spec()\n argument_spec.update(\n object_name=dict(),\n object_moid=dict(),\n new_name=dict(aliases=['object_new_name'], required=True),\n object_type=dict(type='str', required=True, choices=['ClusterComputeResource', 'Cluster', 'Datacenter',\n 'Datastore', 'Folder', 'Network', 'ResourcePool', 'VM',\n 'VirtualMachine'])\n )\n module = AnsibleModule(\n argument_spec=argument_spec,\n mutually_exclusive=[\n ['object_name', 'object_moid'],\n ]\n )\n\n vmware_object_rename = VmwareObjectRename(module)\n vmware_object_rename.ensure_state()\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/vmware_object_rename.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2019, Ansible Project\n# Copyright: (c) 2019, Abhijeet Kasurde <[email protected]>\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nDOCUMENTATION = r'''\n---\nmodule: vmware_object_rename\nshort_description: Renames VMware objects\ndescription:\n- This module can be used to rename VMware objects.\n- All variables and VMware object names are case sensitive.\n- Renaming Host and Network is not supported by VMware APIs.\nauthor:\n- Abhijeet Kasurde (@Akasurde)\nrequirements:\n- vSphere Automation SDK\noptions:\n object_type:\n description:\n - Type of object to work with.\n - Valid options are Cluster, ClusterComputeResource, Datacenter, Datastore, Folder, ResourcePool, VM or VirtualMachine.\n required: True\n type: str\n choices:\n - 'ClusterComputeResource'\n - 'Cluster'\n - 'Datacenter'\n - 'Datastore'\n - 'Folder'\n - 'Network'\n - 'ResourcePool'\n - 'VM'\n - 'VirtualMachine'\n object_name:\n description:\n - Name of the object to work with.\n - Mutually exclusive with C(object_moid).\n type: str\n object_moid:\n description:\n - Managed object id of the VMware object to work with.\n - Mutually exclusive with C(object_name).\n type: str\n new_name:\n description:\n - New name for VMware object.\n required: True\n aliases: ['object_new_name']\n type: str\nextends_documentation_fragment:\n- community.vmware.vmware_rest_client.documentation\n'''\n\nEXAMPLES = r'''\n- name: Rename a virtual machine\n community.vmware.vmware_object_rename:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n new_name: Fedora_31\n object_name: Fedora_VM\n object_type: VirtualMachine\n delegate_to: localhost\n\n- name: Rename a virtual machine using moid\n community.vmware.vmware_object_rename:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n new_name: Fedora_31\n object_moid: vm-14\n object_type: VirtualMachine\n delegate_to: localhost\n\n- name: Rename a datacenter\n community.vmware.vmware_object_rename:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n new_name: Asia_Datacenter\n object_name: dc1\n object_type: Datacenter\n delegate_to: localhost\n\n- name: Rename a folder with moid\n community.vmware.vmware_object_rename:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n new_name: backup\n object_moid: group-v46\n object_type: Folder\n delegate_to: localhost\n\n- name: Rename a cluster with moid\n community.vmware.vmware_object_rename:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n new_name: CCR_1\n object_moid: domain-c33\n object_type: Cluster\n delegate_to: localhost\n'''\n\nRETURN = r'''\nrename_status:\n description: metadata about VMware object rename operation\n returned: on success\n type: dict\n sample: {\n \"current_name\": \"Fedora_31\",\n \"desired_name\": \"Fedora_31\",\n \"previous_name\": \"Fedora_VM\",\n }\n'''\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils._text import to_native\nfrom ansible_collections.community.vmware.plugins.module_utils.vmware_rest_client import VmwareRestClient\nfrom ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, wait_for_task\ntry:\n from pyVmomi import vim\nexcept ImportError:\n pass\n\n\nclass VmwareObjectRename(VmwareRestClient):\n def __init__(self, module):\n \"\"\"\n Constructor\n \"\"\"\n super(VmwareObjectRename, self).__init__(module)\n self.pyv = PyVmomi(module=module)\n self.soap_stub = self.pyv.si._stub\n\n self.object_type = self.params.get('object_type')\n self.object_name = self.params.get('object_name')\n self.object_new_name = self.params.get('new_name')\n self.object_moid = self.params.get('object_moid')\n\n self.managed_object = None\n\n def ensure_state(self):\n \"\"\"\n Manage the internal state of object rename operation\n\n \"\"\"\n results = dict(\n changed=False,\n rename_status=dict(),\n )\n\n results['rename_status']['desired_name'] = self.object_new_name\n changed = False\n\n vcenter_obj = self.api_client.vcenter\n available_object_types = [i for i in dir(vcenter_obj) if hasattr(getattr(vcenter_obj, i), 'list') and i != 'Host']\n available_object_types += ['ClusterComputeResource', 'VirtualMachine']\n\n if self.object_type not in available_object_types:\n self.module.fail_json(msg=\"Object type can be any\"\n \" one of [%s]\" % \", \".join(available_object_types))\n\n valid_object_types = {\n 'ClusterComputeResource': [\n vcenter_obj.Cluster,\n vim.ClusterComputeResource,\n 'cluster',\n ],\n 'Cluster': [\n vcenter_obj.Cluster,\n vim.ClusterComputeResource,\n 'cluster',\n ],\n 'Datacenter': [\n vcenter_obj.Datacenter,\n vim.Datacenter,\n 'datacenter',\n ],\n 'Datastore': [\n vcenter_obj.Datastore,\n vim.Datastore,\n 'datastore',\n ],\n 'Folder': [\n vcenter_obj.Folder,\n vim.Folder,\n 'folder',\n ],\n 'Network': [\n vcenter_obj.Network,\n vim.ClusterComputeResource,\n 'network',\n ],\n 'ResourcePool': [\n vcenter_obj.ResourcePool,\n vim.ResourcePool,\n 'resource_pool'\n ],\n 'VM': [\n vcenter_obj.VM,\n vim.VirtualMachine,\n 'vm',\n ],\n 'VirtualMachine': [\n vcenter_obj.VM,\n vim.VirtualMachine,\n 'vm',\n ],\n }\n\n target_object = valid_object_types[self.object_type][0]\n\n # Generate filter spec.\n # List method will be used in getting objects.\n # List method can get only up to a max of 1,000 objects.\n # If VCSA has than 1,000 objects with the specified object_type, the following error will occur.\n # Error: Too many virtual machines. Add more filter criteria to reduce the number.\n # To resolve the error, the list method should use the filter spec, to be less than 1,000 objects.\n filter_spec = target_object.FilterSpec()\n if self.object_moid:\n # Make a filter for moid if you specify object_moid.\n # The moid is a unique id, so get one object if target moid object exists in the vSphere environment.\n if target_object is vcenter_obj.Datacenter:\n filter_spec.datacenters = set([self.object_moid])\n\n if target_object is vcenter_obj.Cluster:\n filter_spec.clusters = set([self.object_moid])\n\n if target_object is vcenter_obj.ResourcePool:\n filter_spec.resource_pools = set([self.object_moid])\n\n if target_object is vcenter_obj.Folder:\n filter_spec.folders = set([self.object_moid])\n\n if target_object is vcenter_obj.VM:\n filter_spec.vms = set([self.object_moid])\n\n if target_object is vcenter_obj.Network:\n filter_spec.networks = set([self.object_moid])\n\n if target_object is vcenter_obj.Datastore:\n filter_spec.datastores = set([self.object_moid])\n else:\n # If you use object_name parameter, an object will filter with names.\n filter_spec.names = set([self.object_name])\n\n # Get an object for changing the object name.\n all_vmware_objs = target_object.list(filter_spec)\n\n # Ensure whether already exists an object in the same object_new_name name.\n existing_obj_moid = None\n if self.object_moid:\n if all_vmware_objs:\n # Ensure whether the same object name as object_new_name.\n if all_vmware_objs[0].name == self.object_new_name:\n existing_obj_moid = all_vmware_objs\n else:\n existing_obj_moid = target_object.list(target_object.FilterSpec(names=set([self.object_new_name])))\n if existing_obj_moid:\n # Object with same name already exists\n results['rename_status']['current_name'] = results['rename_status']['previous_name'] = self.object_new_name\n results['changed'] = False\n self.module.exit_json(**results)\n\n if not all_vmware_objs:\n msg = \"Failed to find object with %s '%s' and '%s' object type\"\n if self.object_name:\n msg = msg % ('name', self.object_name, self.object_type)\n elif self.object_moid:\n msg = msg % ('moid', self.object_moid, self.object_type)\n self.module.fail_json(msg=msg)\n\n obj_moid = getattr(all_vmware_objs[0], valid_object_types[self.object_type][2])\n vmware_obj = valid_object_types[self.object_type][1](obj_moid, self.soap_stub)\n\n if not vmware_obj:\n msg = \"Failed to create VMware object with object %s %s\"\n if self.object_name:\n msg = msg % ('name', self.object_name)\n elif self.object_moid:\n msg = msg % ('moid', self.object_moid)\n self.module.fail_json(msg=msg)\n\n try:\n results['rename_status']['previous_name'] = vmware_obj.name\n if not self.module.check_mode:\n task = vmware_obj.Rename_Task(self.object_new_name)\n wait_for_task(task)\n changed = True\n results['rename_status']['current_name'] = vmware_obj.name\n except Exception as e:\n msg = to_native(e)\n if hasattr(e, 'msg'):\n msg = to_native(e.msg)\n self.module.fail_json(msg=msg)\n\n results['changed'] = changed\n self.module.exit_json(**results)\n\n\ndef main():\n argument_spec = VmwareRestClient.vmware_client_argument_spec()\n argument_spec.update(\n object_name=dict(),\n object_moid=dict(),\n new_name=dict(aliases=['object_new_name'], required=True),\n object_type=dict(type='str', required=True, choices=['ClusterComputeResource', 'Cluster', 'Datacenter',\n 'Datastore', 'Folder', 'Network', 'ResourcePool', 'VM',\n 'VirtualMachine'])\n )\n module = AnsibleModule(\n argument_spec=argument_spec,\n mutually_exclusive=[\n ['object_name', 'object_moid'],\n ]\n )\n\n vmware_object_rename = VmwareObjectRename(module)\n vmware_object_rename.ensure_state()\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/vmware_object_rename.py"}]} | 3,848 | 148 |
gh_patches_debug_27036 | rasdani/github-patches | git_diff | DDMAL__CantusDB-1308 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
We should make sure our `ALLOWED_HOSTS` is complete
As noted by @dchiller in https://github.com/DDMAL/CantusDB/pull/1286#discussion_r1476453258
> If I understand correctly, our staging/production sites are configured to receive traffic from `www.cantusdatabase.org`, `mass.cantusdatabase.org`, `staging.cantusdatabase.org`, etc., and we open ourself up to security issues if these hosts are not all included in `ALLOWED_HOSTS`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `django/cantusdb_project/cantusdb/settings.py`
Content:
```
1 """
2 Django settings for cantusdb project.
3
4 Generated by 'django-admin startproject' using Django 3.0.6.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/3.0/topics/settings/
8
9 For the full list of settings and their values, see
10 https://docs.djangoproject.com/en/3.0/ref/settings/
11 """
12
13 import os
14 from distutils.util import strtobool
15 from django.contrib.messages import constants as messages
16
17 # https://ordinarycoders.com/blog/article/django-messages-framework
18 MESSAGE_TAGS = {
19 messages.DEBUG: "alert-secondary",
20 messages.INFO: "alert-info",
21 messages.SUCCESS: "alert-success",
22 messages.WARNING: "alert-warning",
23 messages.ERROR: "alert-danger",
24 }
25
26 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
27 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
28
29 STATIC_ROOT = os.getenv("CANTUSDB_STATIC_ROOT")
30 MEDIA_ROOT = os.getenv("CANTUSDB_MEDIA_ROOT")
31
32 # Quick-start development settings - unsuitable for production
33 # See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
34
35 # SECURITY WARNING: keep the secret key used in production secret!
36 SECRET_KEY = os.getenv("CANTUSDB_SECRET_KEY")
37
38 # SECURITY WARNING: don't run with debug turned on in production!
39 DEBUG = bool(strtobool(os.getenv("CANTUSDB_DEBUG", "False")))
40 # need to set this to false so that we can display the custom 404 page
41
42 ALLOWED_HOSTS = [os.getenv("CANTUSDB_HOSTS")]
43
44
45 # Application definition
46
47 INSTALLED_APPS = [
48 "dal",
49 "dal_select2",
50 "django.contrib.admin",
51 "django.contrib.auth",
52 "django.contrib.contenttypes",
53 "django.contrib.sessions",
54 "django.contrib.messages",
55 "django.contrib.staticfiles",
56 "django.contrib.sites",
57 "django.contrib.flatpages",
58 "django.contrib.humanize",
59 "django.contrib.postgres",
60 "extra_views",
61 "main_app",
62 "articles",
63 "django_quill", # to provide rich-text field for articles
64 "reversion", # django-reversion, for version history of objects in database
65 "users",
66 ]
67
68 MIDDLEWARE = [
69 "django.middleware.security.SecurityMiddleware",
70 "django.contrib.sessions.middleware.SessionMiddleware",
71 "django.middleware.common.CommonMiddleware",
72 "django.middleware.csrf.CsrfViewMiddleware",
73 "django.contrib.auth.middleware.AuthenticationMiddleware",
74 "django.contrib.messages.middleware.MessageMiddleware",
75 "django.middleware.clickjacking.XFrameOptionsMiddleware",
76 "django.contrib.flatpages.middleware.FlatpageFallbackMiddleware",
77 "reversion.middleware.RevisionMiddleware",
78 ]
79
80 ROOT_URLCONF = "cantusdb.urls"
81
82 TEMPLATES = [
83 {
84 "BACKEND": "django.template.backends.django.DjangoTemplates",
85 "DIRS": [os.path.join(BASE_DIR, "templates")],
86 "APP_DIRS": True,
87 "OPTIONS": {
88 "context_processors": [
89 "django.template.context_processors.debug",
90 "django.template.context_processors.request",
91 "django.contrib.auth.context_processors.auth",
92 "django.contrib.messages.context_processors.messages",
93 "main_app.context_processors.determine_project_environment",
94 ],
95 },
96 },
97 ]
98
99 TEMPLATE_LOADERS = "django.template.loaders.app_directories.load_template_source"
100
101 WSGI_APPLICATION = "cantusdb.wsgi.application"
102
103
104 # Database
105 # https://docs.djangoproject.com/en/3.0/ref/settings/#databases
106
107 DATABASES = {
108 "default": {
109 "ENGINE": "django.db.backends.postgresql",
110 "NAME": os.getenv("POSTGRES_DB"),
111 "USER": os.getenv("POSTGRES_USER"),
112 "HOST": os.getenv("POSTGRES_HOST"),
113 "PORT": os.getenv("POSTGRES_PORT"),
114 "PASSWORD": os.getenv("POSTGRES_PASSWORD"),
115 }
116 }
117
118
119 # Password validation
120 # https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
121
122 AUTH_PASSWORD_VALIDATORS = [
123 {
124 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
125 },
126 {
127 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
128 },
129 {
130 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
131 },
132 {
133 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
134 },
135 ]
136
137
138 # Internationalization
139 # https://docs.djangoproject.com/en/3.0/topics/i18n/
140
141 LANGUAGE_CODE = "en-us"
142
143 TIME_ZONE = "UTC"
144
145 USE_I18N = True
146
147 USE_L10N = True
148
149 USE_TZ = True
150
151
152 # Static files (CSS, JavaScript, Images)
153 # https://docs.djangoproject.com/en/3.0/howto/static-files/
154
155 STATIC_URL = "/static/"
156
157 STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
158
159 AUTH_USER_MODEL = "users.User"
160 LOGIN_REDIRECT_URL = "/"
161 LOGIN_URL = "/login/"
162 LOGOUT_REDIRECT_URL = "/login/"
163
164 SITE_ID = 4
165
166 # New in django 3.2: specify the default type of auto-created primary keys
167 # https://docs.djangoproject.com/en/3.2/releases/3.2/#customizing-type-of-auto-created-primary-keys
168 DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
169
170 EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
171 EMAIL_HOST = "email-smtp.us-west-2.amazonaws.com"
172 EMAIL_PORT = 587
173 EMAIL_HOST_USER = os.getenv("AWS_EMAIL_HOST_USER")
174 EMAIL_HOST_PASSWORD = os.getenv("AWS_EMAIL_HOST_PASSWORD")
175 EMAIL_USE_TLS = True
176
177 DEFAULT_FROM_EMAIL = "[email protected]"
178
179 # automatically disable all panels which user can then manually enable
180 DEBUG_TOOLBAR_CONFIG = {
181 "DISABLE_PANELS": {
182 "debug_toolbar.panels.history.HistoryPanel",
183 "debug_toolbar.panels.versions.VersionsPanel",
184 "debug_toolbar.panels.timer.TimerPanel",
185 "debug_toolbar.panels.settings.SettingsPanel",
186 "debug_toolbar.panels.headers.HeadersPanel",
187 "debug_toolbar.panels.request.RequestPanel",
188 "debug_toolbar.panels.sql.SQLPanel",
189 "debug_toolbar.panels.staticfiles.StaticFilesPanel",
190 "debug_toolbar.panels.templates.TemplatesPanel",
191 "debug_toolbar.panels.cache.CachePanel",
192 "debug_toolbar.panels.signals.SignalsPanel",
193 "debug_toolbar.panels.logging.LoggingPanel",
194 "debug_toolbar.panels.redirects.RedirectsPanel",
195 "debug_toolbar.panels.profiling.ProfilingPanel",
196 },
197 }
198
199 INTERNAL_IPS = [
200 "127.0.0.1",
201 ]
202
203 CSRF_TRUSTED_ORIGINS = ["https://cantusdatabase.org", "https://www.cantusdatabase.org"]
204
205 if DEBUG:
206 INSTALLED_APPS.append("debug_toolbar")
207 # debug toolbar must be inserted as early in the middleware as possible
208 MIDDLEWARE.insert(0, "debug_toolbar.middleware.DebugToolbarMiddleware")
209
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/django/cantusdb_project/cantusdb/settings.py b/django/cantusdb_project/cantusdb/settings.py
--- a/django/cantusdb_project/cantusdb/settings.py
+++ b/django/cantusdb_project/cantusdb/settings.py
@@ -35,11 +35,21 @@
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv("CANTUSDB_SECRET_KEY")
+PROJECT_ENVIRONMENT = os.getenv("PROJECT_ENVIRONMENT")
+
# SECURITY WARNING: don't run with debug turned on in production!
-DEBUG = bool(strtobool(os.getenv("CANTUSDB_DEBUG", "False")))
-# need to set this to false so that we can display the custom 404 page
+DEBUG = False # this is switched to True below when PROJECT_ENVIRONMENT=="DEVELOPMENT"
-ALLOWED_HOSTS = [os.getenv("CANTUSDB_HOSTS")]
+if PROJECT_ENVIRONMENT == "DEVELOPMENT":
+ ALLOWED_HOSTS = os.getenv("CANTUSDB_HOSTS_DEVELOPMENT").split(" ")
+ CSRF_TRUSTED_ORIGINS = os.getenv("CANTUSDB_ORIGINS_DEVELOPMENT").split(" ")
+ DEBUG = True
+if PROJECT_ENVIRONMENT == "STAGING":
+ ALLOWED_HOSTS = os.getenv("CANTUSDB_HOSTS_STAGING").split(" ")
+ CSRF_TRUSTED_ORIGINS = os.getenv("CANTUSDB_ORIGINS_STAGING").split(" ")
+if PROJECT_ENVIRONMENT == "PRODUCTION":
+ ALLOWED_HOSTS = os.getenv("CANTUSDB_HOSTS_PRODUCTION").split(" ")
+ CSRF_TRUSTED_ORIGINS = os.getenv("CANTUSDB_ORIGINS_PRODUCTION").split(" ")
# Application definition
@@ -200,8 +210,6 @@
"127.0.0.1",
]
-CSRF_TRUSTED_ORIGINS = ["https://cantusdatabase.org", "https://www.cantusdatabase.org"]
-
if DEBUG:
INSTALLED_APPS.append("debug_toolbar")
# debug toolbar must be inserted as early in the middleware as possible
| {"golden_diff": "diff --git a/django/cantusdb_project/cantusdb/settings.py b/django/cantusdb_project/cantusdb/settings.py\n--- a/django/cantusdb_project/cantusdb/settings.py\n+++ b/django/cantusdb_project/cantusdb/settings.py\n@@ -35,11 +35,21 @@\n # SECURITY WARNING: keep the secret key used in production secret!\n SECRET_KEY = os.getenv(\"CANTUSDB_SECRET_KEY\")\n \n+PROJECT_ENVIRONMENT = os.getenv(\"PROJECT_ENVIRONMENT\")\n+\n # SECURITY WARNING: don't run with debug turned on in production!\n-DEBUG = bool(strtobool(os.getenv(\"CANTUSDB_DEBUG\", \"False\")))\n-# need to set this to false so that we can display the custom 404 page\n+DEBUG = False # this is switched to True below when PROJECT_ENVIRONMENT==\"DEVELOPMENT\"\n \n-ALLOWED_HOSTS = [os.getenv(\"CANTUSDB_HOSTS\")]\n+if PROJECT_ENVIRONMENT == \"DEVELOPMENT\":\n+ ALLOWED_HOSTS = os.getenv(\"CANTUSDB_HOSTS_DEVELOPMENT\").split(\" \")\n+ CSRF_TRUSTED_ORIGINS = os.getenv(\"CANTUSDB_ORIGINS_DEVELOPMENT\").split(\" \")\n+ DEBUG = True\n+if PROJECT_ENVIRONMENT == \"STAGING\":\n+ ALLOWED_HOSTS = os.getenv(\"CANTUSDB_HOSTS_STAGING\").split(\" \")\n+ CSRF_TRUSTED_ORIGINS = os.getenv(\"CANTUSDB_ORIGINS_STAGING\").split(\" \")\n+if PROJECT_ENVIRONMENT == \"PRODUCTION\":\n+ ALLOWED_HOSTS = os.getenv(\"CANTUSDB_HOSTS_PRODUCTION\").split(\" \")\n+ CSRF_TRUSTED_ORIGINS = os.getenv(\"CANTUSDB_ORIGINS_PRODUCTION\").split(\" \")\n \n \n # Application definition\n@@ -200,8 +210,6 @@\n \"127.0.0.1\",\n ]\n \n-CSRF_TRUSTED_ORIGINS = [\"https://cantusdatabase.org\", \"https://www.cantusdatabase.org\"]\n-\n if DEBUG:\n INSTALLED_APPS.append(\"debug_toolbar\")\n # debug toolbar must be inserted as early in the middleware as possible\n", "issue": "We should make sure our `ALLOWED_HOSTS` is complete\nAs noted by @dchiller in https://github.com/DDMAL/CantusDB/pull/1286#discussion_r1476453258\r\n\r\n> If I understand correctly, our staging/production sites are configured to receive traffic from `www.cantusdatabase.org`, `mass.cantusdatabase.org`, `staging.cantusdatabase.org`, etc., and we open ourself up to security issues if these hosts are not all included in `ALLOWED_HOSTS`.\n", "before_files": [{"content": "\"\"\"\nDjango settings for cantusdb project.\n\nGenerated by 'django-admin startproject' using Django 3.0.6.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.0/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.0/ref/settings/\n\"\"\"\n\nimport os\nfrom distutils.util import strtobool\nfrom django.contrib.messages import constants as messages\n\n# https://ordinarycoders.com/blog/article/django-messages-framework\nMESSAGE_TAGS = {\n messages.DEBUG: \"alert-secondary\",\n messages.INFO: \"alert-info\",\n messages.SUCCESS: \"alert-success\",\n messages.WARNING: \"alert-warning\",\n messages.ERROR: \"alert-danger\",\n}\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nSTATIC_ROOT = os.getenv(\"CANTUSDB_STATIC_ROOT\")\nMEDIA_ROOT = os.getenv(\"CANTUSDB_MEDIA_ROOT\")\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.getenv(\"CANTUSDB_SECRET_KEY\")\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = bool(strtobool(os.getenv(\"CANTUSDB_DEBUG\", \"False\")))\n# need to set this to false so that we can display the custom 404 page\n\nALLOWED_HOSTS = [os.getenv(\"CANTUSDB_HOSTS\")]\n\n\n# Application definition\n\nINSTALLED_APPS = [\n \"dal\",\n \"dal_select2\",\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.sites\",\n \"django.contrib.flatpages\",\n \"django.contrib.humanize\",\n \"django.contrib.postgres\",\n \"extra_views\",\n \"main_app\",\n \"articles\",\n \"django_quill\", # to provide rich-text field for articles\n \"reversion\", # django-reversion, for version history of objects in database\n \"users\",\n]\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"django.contrib.flatpages.middleware.FlatpageFallbackMiddleware\",\n \"reversion.middleware.RevisionMiddleware\",\n]\n\nROOT_URLCONF = \"cantusdb.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [os.path.join(BASE_DIR, \"templates\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"main_app.context_processors.determine_project_environment\",\n ],\n },\n },\n]\n\nTEMPLATE_LOADERS = \"django.template.loaders.app_directories.load_template_source\"\n\nWSGI_APPLICATION = \"cantusdb.wsgi.application\"\n\n\n# Database\n# https://docs.djangoproject.com/en/3.0/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"NAME\": os.getenv(\"POSTGRES_DB\"),\n \"USER\": os.getenv(\"POSTGRES_USER\"),\n \"HOST\": os.getenv(\"POSTGRES_HOST\"),\n \"PORT\": os.getenv(\"POSTGRES_PORT\"),\n \"PASSWORD\": os.getenv(\"POSTGRES_PASSWORD\"),\n }\n}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.0/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.0/howto/static-files/\n\nSTATIC_URL = \"/static/\"\n\nSTATICFILES_DIRS = [os.path.join(BASE_DIR, \"static\")]\n\nAUTH_USER_MODEL = \"users.User\"\nLOGIN_REDIRECT_URL = \"/\"\nLOGIN_URL = \"/login/\"\nLOGOUT_REDIRECT_URL = \"/login/\"\n\nSITE_ID = 4\n\n# New in django 3.2: specify the default type of auto-created primary keys\n# https://docs.djangoproject.com/en/3.2/releases/3.2/#customizing-type-of-auto-created-primary-keys\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\nEMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\nEMAIL_HOST = \"email-smtp.us-west-2.amazonaws.com\"\nEMAIL_PORT = 587\nEMAIL_HOST_USER = os.getenv(\"AWS_EMAIL_HOST_USER\")\nEMAIL_HOST_PASSWORD = os.getenv(\"AWS_EMAIL_HOST_PASSWORD\")\nEMAIL_USE_TLS = True\n\nDEFAULT_FROM_EMAIL = \"[email protected]\"\n\n# automatically disable all panels which user can then manually enable\nDEBUG_TOOLBAR_CONFIG = {\n \"DISABLE_PANELS\": {\n \"debug_toolbar.panels.history.HistoryPanel\",\n \"debug_toolbar.panels.versions.VersionsPanel\",\n \"debug_toolbar.panels.timer.TimerPanel\",\n \"debug_toolbar.panels.settings.SettingsPanel\",\n \"debug_toolbar.panels.headers.HeadersPanel\",\n \"debug_toolbar.panels.request.RequestPanel\",\n \"debug_toolbar.panels.sql.SQLPanel\",\n \"debug_toolbar.panels.staticfiles.StaticFilesPanel\",\n \"debug_toolbar.panels.templates.TemplatesPanel\",\n \"debug_toolbar.panels.cache.CachePanel\",\n \"debug_toolbar.panels.signals.SignalsPanel\",\n \"debug_toolbar.panels.logging.LoggingPanel\",\n \"debug_toolbar.panels.redirects.RedirectsPanel\",\n \"debug_toolbar.panels.profiling.ProfilingPanel\",\n },\n}\n\nINTERNAL_IPS = [\n \"127.0.0.1\",\n]\n\nCSRF_TRUSTED_ORIGINS = [\"https://cantusdatabase.org\", \"https://www.cantusdatabase.org\"]\n\nif DEBUG:\n INSTALLED_APPS.append(\"debug_toolbar\")\n # debug toolbar must be inserted as early in the middleware as possible\n MIDDLEWARE.insert(0, \"debug_toolbar.middleware.DebugToolbarMiddleware\")\n", "path": "django/cantusdb_project/cantusdb/settings.py"}], "after_files": [{"content": "\"\"\"\nDjango settings for cantusdb project.\n\nGenerated by 'django-admin startproject' using Django 3.0.6.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.0/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.0/ref/settings/\n\"\"\"\n\nimport os\nfrom distutils.util import strtobool\nfrom django.contrib.messages import constants as messages\n\n# https://ordinarycoders.com/blog/article/django-messages-framework\nMESSAGE_TAGS = {\n messages.DEBUG: \"alert-secondary\",\n messages.INFO: \"alert-info\",\n messages.SUCCESS: \"alert-success\",\n messages.WARNING: \"alert-warning\",\n messages.ERROR: \"alert-danger\",\n}\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nSTATIC_ROOT = os.getenv(\"CANTUSDB_STATIC_ROOT\")\nMEDIA_ROOT = os.getenv(\"CANTUSDB_MEDIA_ROOT\")\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.getenv(\"CANTUSDB_SECRET_KEY\")\n\nPROJECT_ENVIRONMENT = os.getenv(\"PROJECT_ENVIRONMENT\")\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False # this is switched to True below when PROJECT_ENVIRONMENT==\"DEVELOPMENT\"\n\nif PROJECT_ENVIRONMENT == \"DEVELOPMENT\":\n ALLOWED_HOSTS = os.getenv(\"CANTUSDB_HOSTS_DEVELOPMENT\").split(\" \")\n CSRF_TRUSTED_ORIGINS = os.getenv(\"CANTUSDB_ORIGINS_DEVELOPMENT\").split(\" \")\n DEBUG = True\nif PROJECT_ENVIRONMENT == \"STAGING\":\n ALLOWED_HOSTS = os.getenv(\"CANTUSDB_HOSTS_STAGING\").split(\" \")\n CSRF_TRUSTED_ORIGINS = os.getenv(\"CANTUSDB_ORIGINS_STAGING\").split(\" \")\nif PROJECT_ENVIRONMENT == \"PRODUCTION\":\n ALLOWED_HOSTS = os.getenv(\"CANTUSDB_HOSTS_PRODUCTION\").split(\" \")\n CSRF_TRUSTED_ORIGINS = os.getenv(\"CANTUSDB_ORIGINS_PRODUCTION\").split(\" \")\n\n\n# Application definition\n\nINSTALLED_APPS = [\n \"dal\",\n \"dal_select2\",\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.sites\",\n \"django.contrib.flatpages\",\n \"django.contrib.humanize\",\n \"django.contrib.postgres\",\n \"extra_views\",\n \"main_app\",\n \"articles\",\n \"django_quill\", # to provide rich-text field for articles\n \"reversion\", # django-reversion, for version history of objects in database\n \"users\",\n]\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"django.contrib.flatpages.middleware.FlatpageFallbackMiddleware\",\n \"reversion.middleware.RevisionMiddleware\",\n]\n\nROOT_URLCONF = \"cantusdb.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [os.path.join(BASE_DIR, \"templates\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"main_app.context_processors.determine_project_environment\",\n ],\n },\n },\n]\n\nTEMPLATE_LOADERS = \"django.template.loaders.app_directories.load_template_source\"\n\nWSGI_APPLICATION = \"cantusdb.wsgi.application\"\n\n\n# Database\n# https://docs.djangoproject.com/en/3.0/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"NAME\": os.getenv(\"POSTGRES_DB\"),\n \"USER\": os.getenv(\"POSTGRES_USER\"),\n \"HOST\": os.getenv(\"POSTGRES_HOST\"),\n \"PORT\": os.getenv(\"POSTGRES_PORT\"),\n \"PASSWORD\": os.getenv(\"POSTGRES_PASSWORD\"),\n }\n}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.0/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.0/howto/static-files/\n\nSTATIC_URL = \"/static/\"\n\nSTATICFILES_DIRS = [os.path.join(BASE_DIR, \"static\")]\n\nAUTH_USER_MODEL = \"users.User\"\nLOGIN_REDIRECT_URL = \"/\"\nLOGIN_URL = \"/login/\"\nLOGOUT_REDIRECT_URL = \"/login/\"\n\nSITE_ID = 4\n\n# New in django 3.2: specify the default type of auto-created primary keys\n# https://docs.djangoproject.com/en/3.2/releases/3.2/#customizing-type-of-auto-created-primary-keys\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\nEMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\nEMAIL_HOST = \"email-smtp.us-west-2.amazonaws.com\"\nEMAIL_PORT = 587\nEMAIL_HOST_USER = os.getenv(\"AWS_EMAIL_HOST_USER\")\nEMAIL_HOST_PASSWORD = os.getenv(\"AWS_EMAIL_HOST_PASSWORD\")\nEMAIL_USE_TLS = True\n\nDEFAULT_FROM_EMAIL = \"[email protected]\"\n\n# automatically disable all panels which user can then manually enable\nDEBUG_TOOLBAR_CONFIG = {\n \"DISABLE_PANELS\": {\n \"debug_toolbar.panels.history.HistoryPanel\",\n \"debug_toolbar.panels.versions.VersionsPanel\",\n \"debug_toolbar.panels.timer.TimerPanel\",\n \"debug_toolbar.panels.settings.SettingsPanel\",\n \"debug_toolbar.panels.headers.HeadersPanel\",\n \"debug_toolbar.panels.request.RequestPanel\",\n \"debug_toolbar.panels.sql.SQLPanel\",\n \"debug_toolbar.panels.staticfiles.StaticFilesPanel\",\n \"debug_toolbar.panels.templates.TemplatesPanel\",\n \"debug_toolbar.panels.cache.CachePanel\",\n \"debug_toolbar.panels.signals.SignalsPanel\",\n \"debug_toolbar.panels.logging.LoggingPanel\",\n \"debug_toolbar.panels.redirects.RedirectsPanel\",\n \"debug_toolbar.panels.profiling.ProfilingPanel\",\n },\n}\n\nINTERNAL_IPS = [\n \"127.0.0.1\",\n]\n\nif DEBUG:\n INSTALLED_APPS.append(\"debug_toolbar\")\n # debug toolbar must be inserted as early in the middleware as possible\n MIDDLEWARE.insert(0, \"debug_toolbar.middleware.DebugToolbarMiddleware\")\n", "path": "django/cantusdb_project/cantusdb/settings.py"}]} | 2,406 | 483 |
gh_patches_debug_29924 | rasdani/github-patches | git_diff | cupy__cupy-3130 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Got ValueError when `ord` in `cupy.linalg.norm()` is 2 or -2
Reproducer:
```python
>>> import numpy as np
>>> a = [[2, 0, 1], [-1, 1, 0], [-3, 3, 0]]
>>> a = np.asarray(a, dtype=np.float64)
>>> np.linalg.norm(a, ord=2)
4.723421263784789
>>>
>>> import cupy as cp
>>> b = cp.asarray(a)
>>> cp.linalg.norm(b, ord=2)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/leofang/test/cupy2/cupy/linalg/norms.py", line 124, in norm
raise ValueError('Invalid norm order for matrices.')
ValueError: Invalid norm order for matrices.
```
`ord=2` returns the largest singular value and `-2` the smallest.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cupy/linalg/norms.py`
Content:
```
1 import numpy
2 from numpy import linalg
3
4 import cupy
5 from cupy.linalg import decomposition
6 from cupy.linalg import util
7
8
9 def norm(x, ord=None, axis=None, keepdims=False):
10 """Returns one of matrix norms specified by ``ord`` parameter.
11
12 See numpy.linalg.norm for more detail.
13
14 Args:
15 x (cupy.ndarray): Array to take norm. If ``axis`` is None,
16 ``x`` must be 1-D or 2-D.
17 ord (non-zero int, inf, -inf, 'fro'): Norm type.
18 axis (int, 2-tuple of ints, None): 1-D or 2-D norm is cumputed over
19 ``axis``.
20 keepdims (bool): If this is set ``True``, the axes which are normed
21 over are left.
22
23 Returns:
24 cupy.ndarray
25
26 """
27 if not issubclass(x.dtype.type, numpy.inexact):
28 x = x.astype(float)
29
30 # Immediately handle some default, simple, fast, and common cases.
31 if axis is None:
32 ndim = x.ndim
33 if (ord is None or (ndim == 1 and ord == 2) or
34 (ndim == 2 and ord in ('f', 'fro'))):
35 if x.dtype.kind == 'c':
36 s = abs(x.ravel())
37 s *= s
38 ret = cupy.sqrt(s.sum())
39 else:
40 ret = cupy.sqrt((x * x).sum())
41 if keepdims:
42 ret = ret.reshape((1,) * ndim)
43 return ret
44
45 # Normalize the `axis` argument to a tuple.
46 nd = x.ndim
47 if axis is None:
48 axis = tuple(range(nd))
49 elif not isinstance(axis, tuple):
50 try:
51 axis = int(axis)
52 except Exception:
53 raise TypeError(
54 '\'axis\' must be None, an integer or a tuple of integers')
55 axis = (axis,)
56
57 if len(axis) == 1:
58 if ord == numpy.Inf:
59 return abs(x).max(axis=axis, keepdims=keepdims)
60 elif ord == -numpy.Inf:
61 return abs(x).min(axis=axis, keepdims=keepdims)
62 elif ord == 0:
63 # Zero norm
64 # Convert to Python float in accordance with NumPy
65 return (x != 0).astype(x.real.dtype).sum(
66 axis=axis, keepdims=keepdims)
67 elif ord == 1:
68 # special case for speedup
69 return abs(x).sum(axis=axis, keepdims=keepdims)
70 elif ord is None or ord == 2:
71 # special case for speedup
72 if x.dtype.kind == 'c':
73 s = abs(x)
74 s *= s
75 else:
76 s = x * x
77 return cupy.sqrt(s.sum(axis=axis, keepdims=keepdims))
78 else:
79 try:
80 float(ord)
81 except TypeError:
82 raise ValueError('Invalid norm order for vectors.')
83
84 absx = abs(x)
85 absx **= ord
86 ret = absx.sum(axis=axis, keepdims=keepdims)
87 ret **= cupy.reciprocal(ord, dtype=ret.dtype)
88 return ret
89 elif len(axis) == 2:
90 row_axis, col_axis = axis
91 if row_axis < 0:
92 row_axis += nd
93 if col_axis < 0:
94 col_axis += nd
95 if not (0 <= row_axis < nd and 0 <= col_axis < nd):
96 raise ValueError('Invalid axis %r for an array with shape %r' %
97 (axis, x.shape))
98 if row_axis == col_axis:
99 raise ValueError('Duplicate axes given.')
100 if ord == 1:
101 if col_axis > row_axis:
102 col_axis -= 1
103 ret = abs(x).sum(axis=row_axis).max(axis=col_axis)
104 elif ord == numpy.Inf:
105 if row_axis > col_axis:
106 row_axis -= 1
107 ret = abs(x).sum(axis=col_axis).max(axis=row_axis)
108 elif ord == -1:
109 if col_axis > row_axis:
110 col_axis -= 1
111 ret = abs(x).sum(axis=row_axis).min(axis=col_axis)
112 elif ord == -numpy.Inf:
113 if row_axis > col_axis:
114 row_axis -= 1
115 ret = abs(x).sum(axis=col_axis).min(axis=row_axis)
116 elif ord in [None, 'fro', 'f']:
117 if x.dtype.kind == 'c':
118 s = abs(x)
119 s *= s
120 ret = cupy.sqrt(s.sum(axis=axis))
121 else:
122 ret = cupy.sqrt((x * x).sum(axis=axis))
123 else:
124 raise ValueError('Invalid norm order for matrices.')
125 if keepdims:
126 ret_shape = list(x.shape)
127 ret_shape[axis[0]] = 1
128 ret_shape[axis[1]] = 1
129 ret = ret.reshape(ret_shape)
130 return ret
131 else:
132 raise ValueError('Improper number of dimensions to norm.')
133
134
135 # TODO(okuta): Implement cond
136
137
138 def det(a):
139 """Retruns the deteminant of an array.
140
141 Args:
142 a (cupy.ndarray): The input matrix with dimension ``(..., N, N)``.
143
144 Returns:
145 cupy.ndarray: Determinant of ``a``. Its shape is ``a.shape[:-2]``.
146
147 .. seealso:: :func:`numpy.linalg.det`
148 """
149 sign, logdet = slogdet(a)
150 return sign * cupy.exp(logdet)
151
152
153 def matrix_rank(M, tol=None):
154 """Return matrix rank of array using SVD method
155
156 Args:
157 M (cupy.ndarray): Input array. Its `ndim` must be less than or equal to
158 2.
159 tol (None or float): Threshold of singular value of `M`.
160 When `tol` is `None`, and `eps` is the epsilon value for datatype
161 of `M`, then `tol` is set to `S.max() * max(M.shape) * eps`,
162 where `S` is the singular value of `M`.
163 It obeys :func:`numpy.linalg.matrix_rank`.
164
165 Returns:
166 cupy.ndarray: Rank of `M`.
167
168 .. seealso:: :func:`numpy.linalg.matrix_rank`
169 """
170 if M.ndim < 2:
171 return (M != 0).any().astype(int)
172 S = decomposition.svd(M, compute_uv=False)
173 if tol is None:
174 tol = (S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) *
175 numpy.finfo(S.dtype).eps)
176 return (S > tol).sum(axis=-1, dtype=numpy.intp)
177
178
179 def slogdet(a):
180 """Returns sign and logarithm of the determinant of an array.
181
182 It calculates the natural logarithm of the determinant of a given value.
183
184 Args:
185 a (cupy.ndarray): The input matrix with dimension ``(..., N, N)``.
186
187 Returns:
188 tuple of :class:`~cupy.ndarray`:
189 It returns a tuple ``(sign, logdet)``. ``sign`` represents each
190 sign of the determinant as a real number ``0``, ``1`` or ``-1``.
191 'logdet' represents the natural logarithm of the absolute of the
192 determinant.
193 If the determinant is zero, ``sign`` will be ``0`` and ``logdet``
194 will be ``-inf``.
195 The shapes of both ``sign`` and ``logdet`` are equal to
196 ``a.shape[:-2]``.
197
198 .. warning::
199 This function calls one or more cuSOLVER routine(s) which may yield
200 invalid results if input conditions are not met.
201 To detect these invalid results, you can set the `linalg`
202 configuration to a value that is not `ignore` in
203 :func:`cupyx.errstate` or :func:`cupyx.seterr`.
204
205 .. warning::
206 To produce the same results as :func:`numpy.linalg.slogdet` for
207 singular inputs, set the `linalg` configuration to `raise`.
208
209 .. seealso:: :func:`numpy.linalg.slogdet`
210 """
211 if a.ndim < 2:
212 msg = ('%d-dimensional array given. '
213 'Array must be at least two-dimensional' % a.ndim)
214 raise linalg.LinAlgError(msg)
215 util._assert_nd_squareness(a)
216
217 dtype = numpy.promote_types(a.dtype.char, 'f')
218 real_dtype = dtype
219
220 # TODO(kataoka): support complex types
221 if dtype not in (numpy.float32, numpy.float64):
222 msg = ('dtype must be float32 or float64'
223 ' (actual: {})'.format(a.dtype))
224 raise ValueError(msg)
225
226 a_shape = a.shape
227 shape = a_shape[:-2]
228 n = a_shape[-2]
229
230 if a.size == 0:
231 # empty batch (result is empty, too) or empty matrices det([[]]) == 1
232 sign = cupy.ones(shape, dtype)
233 logdet = cupy.zeros(shape, real_dtype)
234 return sign, logdet
235
236 lu, ipiv, dev_info = decomposition._lu_factor(a, dtype)
237
238 # dev_info < 0 means illegal value (in dimensions, strides, and etc.) that
239 # should never happen even if the matrix contains nan or inf.
240 # TODO(kataoka): assert dev_info >= 0 if synchronization is allowed for
241 # debugging purposes.
242
243 diag = cupy.diagonal(lu, axis1=-2, axis2=-1)
244
245 # ipiv is 1-origin
246 non_zero = (cupy.count_nonzero(ipiv != cupy.arange(1, n + 1), axis=-1) +
247 cupy.count_nonzero(diag < 0, axis=-1))
248
249 # Note: sign == -1 ** (non_zero % 2)
250 sign = (non_zero % 2) * -2 + 1
251 logdet = cupy.log(abs(diag)).sum(axis=-1)
252
253 singular = dev_info > 0
254 return (
255 cupy.where(singular, dtype.type(0), sign.astype(dtype)).reshape(shape),
256 cupy.where(singular, real_dtype.type('-inf'), logdet).reshape(shape),
257 )
258
259
260 def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
261 """Returns the sum along the diagonals of an array.
262
263 It computes the sum along the diagonals at ``axis1`` and ``axis2``.
264
265 Args:
266 a (cupy.ndarray): Array to take trace.
267 offset (int): Index of diagonals. Zero indicates the main diagonal, a
268 positive value an upper diagonal, and a negative value a lower
269 diagonal.
270 axis1 (int): The first axis along which the trace is taken.
271 axis2 (int): The second axis along which the trace is taken.
272 dtype: Data type specifier of the output.
273 out (cupy.ndarray): Output array.
274
275 Returns:
276 cupy.ndarray: The trace of ``a`` along axes ``(axis1, axis2)``.
277
278 .. seealso:: :func:`numpy.trace`
279
280 """
281 # TODO(okuta): check type
282 return a.trace(offset, axis1, axis2, dtype, out)
283
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cupy/linalg/norms.py b/cupy/linalg/norms.py
--- a/cupy/linalg/norms.py
+++ b/cupy/linalg/norms.py
@@ -5,6 +5,14 @@
from cupy.linalg import decomposition
from cupy.linalg import util
+import functools
+
+
+def _multi_svd_norm(x, row_axis, col_axis, op):
+ y = cupy.moveaxis(x, (row_axis, col_axis), (-2, -1))
+ result = op(decomposition.svd(y, compute_uv=False), axis=-1)
+ return result
+
def norm(x, ord=None, axis=None, keepdims=False):
"""Returns one of matrix norms specified by ``ord`` parameter.
@@ -97,7 +105,13 @@
(axis, x.shape))
if row_axis == col_axis:
raise ValueError('Duplicate axes given.')
- if ord == 1:
+ if ord == 2:
+ op_max = functools.partial(cupy.take, indices=0)
+ ret = _multi_svd_norm(x, row_axis, col_axis, op_max)
+ elif ord == -2:
+ op_min = functools.partial(cupy.take, indices=-1)
+ ret = _multi_svd_norm(x, row_axis, col_axis, op_min)
+ elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
ret = abs(x).sum(axis=row_axis).max(axis=col_axis)
@@ -120,6 +134,8 @@
ret = cupy.sqrt(s.sum(axis=axis))
else:
ret = cupy.sqrt((x * x).sum(axis=axis))
+ elif ord == 'nuc':
+ ret = _multi_svd_norm(x, row_axis, col_axis, cupy.sum)
else:
raise ValueError('Invalid norm order for matrices.')
if keepdims:
| {"golden_diff": "diff --git a/cupy/linalg/norms.py b/cupy/linalg/norms.py\n--- a/cupy/linalg/norms.py\n+++ b/cupy/linalg/norms.py\n@@ -5,6 +5,14 @@\n from cupy.linalg import decomposition\n from cupy.linalg import util\n \n+import functools\n+\n+\n+def _multi_svd_norm(x, row_axis, col_axis, op):\n+ y = cupy.moveaxis(x, (row_axis, col_axis), (-2, -1))\n+ result = op(decomposition.svd(y, compute_uv=False), axis=-1)\n+ return result\n+\n \n def norm(x, ord=None, axis=None, keepdims=False):\n \"\"\"Returns one of matrix norms specified by ``ord`` parameter.\n@@ -97,7 +105,13 @@\n (axis, x.shape))\n if row_axis == col_axis:\n raise ValueError('Duplicate axes given.')\n- if ord == 1:\n+ if ord == 2:\n+ op_max = functools.partial(cupy.take, indices=0)\n+ ret = _multi_svd_norm(x, row_axis, col_axis, op_max)\n+ elif ord == -2:\n+ op_min = functools.partial(cupy.take, indices=-1)\n+ ret = _multi_svd_norm(x, row_axis, col_axis, op_min)\n+ elif ord == 1:\n if col_axis > row_axis:\n col_axis -= 1\n ret = abs(x).sum(axis=row_axis).max(axis=col_axis)\n@@ -120,6 +134,8 @@\n ret = cupy.sqrt(s.sum(axis=axis))\n else:\n ret = cupy.sqrt((x * x).sum(axis=axis))\n+ elif ord == 'nuc':\n+ ret = _multi_svd_norm(x, row_axis, col_axis, cupy.sum)\n else:\n raise ValueError('Invalid norm order for matrices.')\n if keepdims:\n", "issue": "Got ValueError when `ord` in `cupy.linalg.norm()` is 2 or -2\nReproducer:\r\n```python\r\n>>> import numpy as np\r\n>>> a = [[2, 0, 1], [-1, 1, 0], [-3, 3, 0]]\r\n>>> a = np.asarray(a, dtype=np.float64)\r\n>>> np.linalg.norm(a, ord=2)\r\n4.723421263784789\r\n>>>\r\n>>> import cupy as cp\r\n>>> b = cp.asarray(a)\r\n>>> cp.linalg.norm(b, ord=2)\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/leofang/test/cupy2/cupy/linalg/norms.py\", line 124, in norm\r\n raise ValueError('Invalid norm order for matrices.')\r\nValueError: Invalid norm order for matrices.\r\n```\r\n`ord=2` returns the largest singular value and `-2` the smallest. \n", "before_files": [{"content": "import numpy\nfrom numpy import linalg\n\nimport cupy\nfrom cupy.linalg import decomposition\nfrom cupy.linalg import util\n\n\ndef norm(x, ord=None, axis=None, keepdims=False):\n \"\"\"Returns one of matrix norms specified by ``ord`` parameter.\n\n See numpy.linalg.norm for more detail.\n\n Args:\n x (cupy.ndarray): Array to take norm. If ``axis`` is None,\n ``x`` must be 1-D or 2-D.\n ord (non-zero int, inf, -inf, 'fro'): Norm type.\n axis (int, 2-tuple of ints, None): 1-D or 2-D norm is cumputed over\n ``axis``.\n keepdims (bool): If this is set ``True``, the axes which are normed\n over are left.\n\n Returns:\n cupy.ndarray\n\n \"\"\"\n if not issubclass(x.dtype.type, numpy.inexact):\n x = x.astype(float)\n\n # Immediately handle some default, simple, fast, and common cases.\n if axis is None:\n ndim = x.ndim\n if (ord is None or (ndim == 1 and ord == 2) or\n (ndim == 2 and ord in ('f', 'fro'))):\n if x.dtype.kind == 'c':\n s = abs(x.ravel())\n s *= s\n ret = cupy.sqrt(s.sum())\n else:\n ret = cupy.sqrt((x * x).sum())\n if keepdims:\n ret = ret.reshape((1,) * ndim)\n return ret\n\n # Normalize the `axis` argument to a tuple.\n nd = x.ndim\n if axis is None:\n axis = tuple(range(nd))\n elif not isinstance(axis, tuple):\n try:\n axis = int(axis)\n except Exception:\n raise TypeError(\n '\\'axis\\' must be None, an integer or a tuple of integers')\n axis = (axis,)\n\n if len(axis) == 1:\n if ord == numpy.Inf:\n return abs(x).max(axis=axis, keepdims=keepdims)\n elif ord == -numpy.Inf:\n return abs(x).min(axis=axis, keepdims=keepdims)\n elif ord == 0:\n # Zero norm\n # Convert to Python float in accordance with NumPy\n return (x != 0).astype(x.real.dtype).sum(\n axis=axis, keepdims=keepdims)\n elif ord == 1:\n # special case for speedup\n return abs(x).sum(axis=axis, keepdims=keepdims)\n elif ord is None or ord == 2:\n # special case for speedup\n if x.dtype.kind == 'c':\n s = abs(x)\n s *= s\n else:\n s = x * x\n return cupy.sqrt(s.sum(axis=axis, keepdims=keepdims))\n else:\n try:\n float(ord)\n except TypeError:\n raise ValueError('Invalid norm order for vectors.')\n\n absx = abs(x)\n absx **= ord\n ret = absx.sum(axis=axis, keepdims=keepdims)\n ret **= cupy.reciprocal(ord, dtype=ret.dtype)\n return ret\n elif len(axis) == 2:\n row_axis, col_axis = axis\n if row_axis < 0:\n row_axis += nd\n if col_axis < 0:\n col_axis += nd\n if not (0 <= row_axis < nd and 0 <= col_axis < nd):\n raise ValueError('Invalid axis %r for an array with shape %r' %\n (axis, x.shape))\n if row_axis == col_axis:\n raise ValueError('Duplicate axes given.')\n if ord == 1:\n if col_axis > row_axis:\n col_axis -= 1\n ret = abs(x).sum(axis=row_axis).max(axis=col_axis)\n elif ord == numpy.Inf:\n if row_axis > col_axis:\n row_axis -= 1\n ret = abs(x).sum(axis=col_axis).max(axis=row_axis)\n elif ord == -1:\n if col_axis > row_axis:\n col_axis -= 1\n ret = abs(x).sum(axis=row_axis).min(axis=col_axis)\n elif ord == -numpy.Inf:\n if row_axis > col_axis:\n row_axis -= 1\n ret = abs(x).sum(axis=col_axis).min(axis=row_axis)\n elif ord in [None, 'fro', 'f']:\n if x.dtype.kind == 'c':\n s = abs(x)\n s *= s\n ret = cupy.sqrt(s.sum(axis=axis))\n else:\n ret = cupy.sqrt((x * x).sum(axis=axis))\n else:\n raise ValueError('Invalid norm order for matrices.')\n if keepdims:\n ret_shape = list(x.shape)\n ret_shape[axis[0]] = 1\n ret_shape[axis[1]] = 1\n ret = ret.reshape(ret_shape)\n return ret\n else:\n raise ValueError('Improper number of dimensions to norm.')\n\n\n# TODO(okuta): Implement cond\n\n\ndef det(a):\n \"\"\"Retruns the deteminant of an array.\n\n Args:\n a (cupy.ndarray): The input matrix with dimension ``(..., N, N)``.\n\n Returns:\n cupy.ndarray: Determinant of ``a``. Its shape is ``a.shape[:-2]``.\n\n .. seealso:: :func:`numpy.linalg.det`\n \"\"\"\n sign, logdet = slogdet(a)\n return sign * cupy.exp(logdet)\n\n\ndef matrix_rank(M, tol=None):\n \"\"\"Return matrix rank of array using SVD method\n\n Args:\n M (cupy.ndarray): Input array. Its `ndim` must be less than or equal to\n 2.\n tol (None or float): Threshold of singular value of `M`.\n When `tol` is `None`, and `eps` is the epsilon value for datatype\n of `M`, then `tol` is set to `S.max() * max(M.shape) * eps`,\n where `S` is the singular value of `M`.\n It obeys :func:`numpy.linalg.matrix_rank`.\n\n Returns:\n cupy.ndarray: Rank of `M`.\n\n .. seealso:: :func:`numpy.linalg.matrix_rank`\n \"\"\"\n if M.ndim < 2:\n return (M != 0).any().astype(int)\n S = decomposition.svd(M, compute_uv=False)\n if tol is None:\n tol = (S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) *\n numpy.finfo(S.dtype).eps)\n return (S > tol).sum(axis=-1, dtype=numpy.intp)\n\n\ndef slogdet(a):\n \"\"\"Returns sign and logarithm of the determinant of an array.\n\n It calculates the natural logarithm of the determinant of a given value.\n\n Args:\n a (cupy.ndarray): The input matrix with dimension ``(..., N, N)``.\n\n Returns:\n tuple of :class:`~cupy.ndarray`:\n It returns a tuple ``(sign, logdet)``. ``sign`` represents each\n sign of the determinant as a real number ``0``, ``1`` or ``-1``.\n 'logdet' represents the natural logarithm of the absolute of the\n determinant.\n If the determinant is zero, ``sign`` will be ``0`` and ``logdet``\n will be ``-inf``.\n The shapes of both ``sign`` and ``logdet`` are equal to\n ``a.shape[:-2]``.\n\n .. warning::\n This function calls one or more cuSOLVER routine(s) which may yield\n invalid results if input conditions are not met.\n To detect these invalid results, you can set the `linalg`\n configuration to a value that is not `ignore` in\n :func:`cupyx.errstate` or :func:`cupyx.seterr`.\n\n .. warning::\n To produce the same results as :func:`numpy.linalg.slogdet` for\n singular inputs, set the `linalg` configuration to `raise`.\n\n .. seealso:: :func:`numpy.linalg.slogdet`\n \"\"\"\n if a.ndim < 2:\n msg = ('%d-dimensional array given. '\n 'Array must be at least two-dimensional' % a.ndim)\n raise linalg.LinAlgError(msg)\n util._assert_nd_squareness(a)\n\n dtype = numpy.promote_types(a.dtype.char, 'f')\n real_dtype = dtype\n\n # TODO(kataoka): support complex types\n if dtype not in (numpy.float32, numpy.float64):\n msg = ('dtype must be float32 or float64'\n ' (actual: {})'.format(a.dtype))\n raise ValueError(msg)\n\n a_shape = a.shape\n shape = a_shape[:-2]\n n = a_shape[-2]\n\n if a.size == 0:\n # empty batch (result is empty, too) or empty matrices det([[]]) == 1\n sign = cupy.ones(shape, dtype)\n logdet = cupy.zeros(shape, real_dtype)\n return sign, logdet\n\n lu, ipiv, dev_info = decomposition._lu_factor(a, dtype)\n\n # dev_info < 0 means illegal value (in dimensions, strides, and etc.) that\n # should never happen even if the matrix contains nan or inf.\n # TODO(kataoka): assert dev_info >= 0 if synchronization is allowed for\n # debugging purposes.\n\n diag = cupy.diagonal(lu, axis1=-2, axis2=-1)\n\n # ipiv is 1-origin\n non_zero = (cupy.count_nonzero(ipiv != cupy.arange(1, n + 1), axis=-1) +\n cupy.count_nonzero(diag < 0, axis=-1))\n\n # Note: sign == -1 ** (non_zero % 2)\n sign = (non_zero % 2) * -2 + 1\n logdet = cupy.log(abs(diag)).sum(axis=-1)\n\n singular = dev_info > 0\n return (\n cupy.where(singular, dtype.type(0), sign.astype(dtype)).reshape(shape),\n cupy.where(singular, real_dtype.type('-inf'), logdet).reshape(shape),\n )\n\n\ndef trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):\n \"\"\"Returns the sum along the diagonals of an array.\n\n It computes the sum along the diagonals at ``axis1`` and ``axis2``.\n\n Args:\n a (cupy.ndarray): Array to take trace.\n offset (int): Index of diagonals. Zero indicates the main diagonal, a\n positive value an upper diagonal, and a negative value a lower\n diagonal.\n axis1 (int): The first axis along which the trace is taken.\n axis2 (int): The second axis along which the trace is taken.\n dtype: Data type specifier of the output.\n out (cupy.ndarray): Output array.\n\n Returns:\n cupy.ndarray: The trace of ``a`` along axes ``(axis1, axis2)``.\n\n .. seealso:: :func:`numpy.trace`\n\n \"\"\"\n # TODO(okuta): check type\n return a.trace(offset, axis1, axis2, dtype, out)\n", "path": "cupy/linalg/norms.py"}], "after_files": [{"content": "import numpy\nfrom numpy import linalg\n\nimport cupy\nfrom cupy.linalg import decomposition\nfrom cupy.linalg import util\n\nimport functools\n\n\ndef _multi_svd_norm(x, row_axis, col_axis, op):\n y = cupy.moveaxis(x, (row_axis, col_axis), (-2, -1))\n result = op(decomposition.svd(y, compute_uv=False), axis=-1)\n return result\n\n\ndef norm(x, ord=None, axis=None, keepdims=False):\n \"\"\"Returns one of matrix norms specified by ``ord`` parameter.\n\n See numpy.linalg.norm for more detail.\n\n Args:\n x (cupy.ndarray): Array to take norm. If ``axis`` is None,\n ``x`` must be 1-D or 2-D.\n ord (non-zero int, inf, -inf, 'fro'): Norm type.\n axis (int, 2-tuple of ints, None): 1-D or 2-D norm is cumputed over\n ``axis``.\n keepdims (bool): If this is set ``True``, the axes which are normed\n over are left.\n\n Returns:\n cupy.ndarray\n\n \"\"\"\n if not issubclass(x.dtype.type, numpy.inexact):\n x = x.astype(float)\n\n # Immediately handle some default, simple, fast, and common cases.\n if axis is None:\n ndim = x.ndim\n if (ord is None or (ndim == 1 and ord == 2) or\n (ndim == 2 and ord in ('f', 'fro'))):\n if x.dtype.kind == 'c':\n s = abs(x.ravel())\n s *= s\n ret = cupy.sqrt(s.sum())\n else:\n ret = cupy.sqrt((x * x).sum())\n if keepdims:\n ret = ret.reshape((1,) * ndim)\n return ret\n\n # Normalize the `axis` argument to a tuple.\n nd = x.ndim\n if axis is None:\n axis = tuple(range(nd))\n elif not isinstance(axis, tuple):\n try:\n axis = int(axis)\n except Exception:\n raise TypeError(\n '\\'axis\\' must be None, an integer or a tuple of integers')\n axis = (axis,)\n\n if len(axis) == 1:\n if ord == numpy.Inf:\n return abs(x).max(axis=axis, keepdims=keepdims)\n elif ord == -numpy.Inf:\n return abs(x).min(axis=axis, keepdims=keepdims)\n elif ord == 0:\n # Zero norm\n # Convert to Python float in accordance with NumPy\n return (x != 0).astype(x.real.dtype).sum(\n axis=axis, keepdims=keepdims)\n elif ord == 1:\n # special case for speedup\n return abs(x).sum(axis=axis, keepdims=keepdims)\n elif ord is None or ord == 2:\n # special case for speedup\n if x.dtype.kind == 'c':\n s = abs(x)\n s *= s\n else:\n s = x * x\n return cupy.sqrt(s.sum(axis=axis, keepdims=keepdims))\n else:\n try:\n float(ord)\n except TypeError:\n raise ValueError('Invalid norm order for vectors.')\n\n absx = abs(x)\n absx **= ord\n ret = absx.sum(axis=axis, keepdims=keepdims)\n ret **= cupy.reciprocal(ord, dtype=ret.dtype)\n return ret\n elif len(axis) == 2:\n row_axis, col_axis = axis\n if row_axis < 0:\n row_axis += nd\n if col_axis < 0:\n col_axis += nd\n if not (0 <= row_axis < nd and 0 <= col_axis < nd):\n raise ValueError('Invalid axis %r for an array with shape %r' %\n (axis, x.shape))\n if row_axis == col_axis:\n raise ValueError('Duplicate axes given.')\n if ord == 2:\n op_max = functools.partial(cupy.take, indices=0)\n ret = _multi_svd_norm(x, row_axis, col_axis, op_max)\n elif ord == -2:\n op_min = functools.partial(cupy.take, indices=-1)\n ret = _multi_svd_norm(x, row_axis, col_axis, op_min)\n elif ord == 1:\n if col_axis > row_axis:\n col_axis -= 1\n ret = abs(x).sum(axis=row_axis).max(axis=col_axis)\n elif ord == numpy.Inf:\n if row_axis > col_axis:\n row_axis -= 1\n ret = abs(x).sum(axis=col_axis).max(axis=row_axis)\n elif ord == -1:\n if col_axis > row_axis:\n col_axis -= 1\n ret = abs(x).sum(axis=row_axis).min(axis=col_axis)\n elif ord == -numpy.Inf:\n if row_axis > col_axis:\n row_axis -= 1\n ret = abs(x).sum(axis=col_axis).min(axis=row_axis)\n elif ord in [None, 'fro', 'f']:\n if x.dtype.kind == 'c':\n s = abs(x)\n s *= s\n ret = cupy.sqrt(s.sum(axis=axis))\n else:\n ret = cupy.sqrt((x * x).sum(axis=axis))\n elif ord == 'nuc':\n ret = _multi_svd_norm(x, row_axis, col_axis, cupy.sum)\n else:\n raise ValueError('Invalid norm order for matrices.')\n if keepdims:\n ret_shape = list(x.shape)\n ret_shape[axis[0]] = 1\n ret_shape[axis[1]] = 1\n ret = ret.reshape(ret_shape)\n return ret\n else:\n raise ValueError('Improper number of dimensions to norm.')\n\n\n# TODO(okuta): Implement cond\n\n\ndef det(a):\n \"\"\"Retruns the deteminant of an array.\n\n Args:\n a (cupy.ndarray): The input matrix with dimension ``(..., N, N)``.\n\n Returns:\n cupy.ndarray: Determinant of ``a``. Its shape is ``a.shape[:-2]``.\n\n .. seealso:: :func:`numpy.linalg.det`\n \"\"\"\n sign, logdet = slogdet(a)\n return sign * cupy.exp(logdet)\n\n\ndef matrix_rank(M, tol=None):\n \"\"\"Return matrix rank of array using SVD method\n\n Args:\n M (cupy.ndarray): Input array. Its `ndim` must be less than or equal to\n 2.\n tol (None or float): Threshold of singular value of `M`.\n When `tol` is `None`, and `eps` is the epsilon value for datatype\n of `M`, then `tol` is set to `S.max() * max(M.shape) * eps`,\n where `S` is the singular value of `M`.\n It obeys :func:`numpy.linalg.matrix_rank`.\n\n Returns:\n cupy.ndarray: Rank of `M`.\n\n .. seealso:: :func:`numpy.linalg.matrix_rank`\n \"\"\"\n if M.ndim < 2:\n return (M != 0).any().astype(int)\n S = decomposition.svd(M, compute_uv=False)\n if tol is None:\n tol = (S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) *\n numpy.finfo(S.dtype).eps)\n return (S > tol).sum(axis=-1, dtype=numpy.intp)\n\n\ndef slogdet(a):\n \"\"\"Returns sign and logarithm of the determinant of an array.\n\n It calculates the natural logarithm of the determinant of a given value.\n\n Args:\n a (cupy.ndarray): The input matrix with dimension ``(..., N, N)``.\n\n Returns:\n tuple of :class:`~cupy.ndarray`:\n It returns a tuple ``(sign, logdet)``. ``sign`` represents each\n sign of the determinant as a real number ``0``, ``1`` or ``-1``.\n 'logdet' represents the natural logarithm of the absolute of the\n determinant.\n If the determinant is zero, ``sign`` will be ``0`` and ``logdet``\n will be ``-inf``.\n The shapes of both ``sign`` and ``logdet`` are equal to\n ``a.shape[:-2]``.\n\n .. warning::\n This function calls one or more cuSOLVER routine(s) which may yield\n invalid results if input conditions are not met.\n To detect these invalid results, you can set the `linalg`\n configuration to a value that is not `ignore` in\n :func:`cupyx.errstate` or :func:`cupyx.seterr`.\n\n .. warning::\n To produce the same results as :func:`numpy.linalg.slogdet` for\n singular inputs, set the `linalg` configuration to `raise`.\n\n .. seealso:: :func:`numpy.linalg.slogdet`\n \"\"\"\n if a.ndim < 2:\n msg = ('%d-dimensional array given. '\n 'Array must be at least two-dimensional' % a.ndim)\n raise linalg.LinAlgError(msg)\n util._assert_nd_squareness(a)\n\n dtype = numpy.promote_types(a.dtype.char, 'f')\n real_dtype = dtype\n\n # TODO(kataoka): support complex types\n if dtype not in (numpy.float32, numpy.float64):\n msg = ('dtype must be float32 or float64'\n ' (actual: {})'.format(a.dtype))\n raise ValueError(msg)\n\n a_shape = a.shape\n shape = a_shape[:-2]\n n = a_shape[-2]\n\n if a.size == 0:\n # empty batch (result is empty, too) or empty matrices det([[]]) == 1\n sign = cupy.ones(shape, dtype)\n logdet = cupy.zeros(shape, real_dtype)\n return sign, logdet\n\n lu, ipiv, dev_info = decomposition._lu_factor(a, dtype)\n\n # dev_info < 0 means illegal value (in dimensions, strides, and etc.) that\n # should never happen even if the matrix contains nan or inf.\n # TODO(kataoka): assert dev_info >= 0 if synchronization is allowed for\n # debugging purposes.\n\n diag = cupy.diagonal(lu, axis1=-2, axis2=-1)\n\n # ipiv is 1-origin\n non_zero = (cupy.count_nonzero(ipiv != cupy.arange(1, n + 1), axis=-1) +\n cupy.count_nonzero(diag < 0, axis=-1))\n\n # Note: sign == -1 ** (non_zero % 2)\n sign = (non_zero % 2) * -2 + 1\n logdet = cupy.log(abs(diag)).sum(axis=-1)\n\n singular = dev_info > 0\n return (\n cupy.where(singular, dtype.type(0), sign.astype(dtype)).reshape(shape),\n cupy.where(singular, real_dtype.type('-inf'), logdet).reshape(shape),\n )\n\n\ndef trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):\n \"\"\"Returns the sum along the diagonals of an array.\n\n It computes the sum along the diagonals at ``axis1`` and ``axis2``.\n\n Args:\n a (cupy.ndarray): Array to take trace.\n offset (int): Index of diagonals. Zero indicates the main diagonal, a\n positive value an upper diagonal, and a negative value a lower\n diagonal.\n axis1 (int): The first axis along which the trace is taken.\n axis2 (int): The second axis along which the trace is taken.\n dtype: Data type specifier of the output.\n out (cupy.ndarray): Output array.\n\n Returns:\n cupy.ndarray: The trace of ``a`` along axes ``(axis1, axis2)``.\n\n .. seealso:: :func:`numpy.trace`\n\n \"\"\"\n # TODO(okuta): check type\n return a.trace(offset, axis1, axis2, dtype, out)\n", "path": "cupy/linalg/norms.py"}]} | 3,728 | 433 |
gh_patches_debug_8445 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-2951 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
L'affichage des pseudos dans la liste des tutoriels / article déconne
L'affichage du pseudo "Bat'" n'est pas correct.

Possible de voir le comportement sur la page: https://zestedesavoir.com/tutoriels/?tag=dot-net
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zds/utils/templatetags/captureas.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 from django import template
4
5 register = template.Library()
6
7 """
8 Define a tag allowing to capture template content as a variable.
9 """
10
11
12 @register.tag(name='captureas')
13 def do_captureas(parser, token):
14 """
15 Define a tag allowing to capture template content as a variable.
16
17 :param parser: The django template parser
18 :param token: tag token (tag_name + variable_name)
19 :return: Template node.
20 """
21
22 try:
23 _, variable_name = token.split_contents()
24 except ValueError:
25 raise template.TemplateSyntaxError("'captureas' node requires a variable name.")
26
27 nodelist = parser.parse(('endcaptureas',))
28 parser.delete_first_token()
29
30 return CaptureasNode(nodelist, variable_name)
31
32
33 class CaptureasNode(template.Node):
34 """
35 Capture end render node content to a variable name.
36 """
37
38 def __init__(self, nodelist, variable_name):
39 """
40 Create a template node which render `nodelist` to `variable_name`.
41
42 :param nodelist: The node list to capture.
43 :param variable_name: The variable name which will gain the rendered content.
44 """
45 self.__node_list = nodelist
46 self.__variable_name = variable_name
47
48 def render(self, context):
49 """
50 Render the node list to the variable name.
51
52 :param context: Current context.
53 :return: Empty string
54 :rtype: str
55 """
56 output = self.__node_list.render(context)
57 context[self.__variable_name] = output.strip()
58 return ''
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zds/utils/templatetags/captureas.py b/zds/utils/templatetags/captureas.py
--- a/zds/utils/templatetags/captureas.py
+++ b/zds/utils/templatetags/captureas.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
from django import template
+from django.utils.safestring import mark_safe
register = template.Library()
@@ -54,5 +55,5 @@
:rtype: str
"""
output = self.__node_list.render(context)
- context[self.__variable_name] = output.strip()
+ context[self.__variable_name] = mark_safe(output.strip())
return ''
| {"golden_diff": "diff --git a/zds/utils/templatetags/captureas.py b/zds/utils/templatetags/captureas.py\n--- a/zds/utils/templatetags/captureas.py\n+++ b/zds/utils/templatetags/captureas.py\n@@ -1,6 +1,7 @@\n # -*- coding: utf-8 -*-\n \n from django import template\n+from django.utils.safestring import mark_safe\n \n register = template.Library()\n \n@@ -54,5 +55,5 @@\n :rtype: str\n \"\"\"\n output = self.__node_list.render(context)\n- context[self.__variable_name] = output.strip()\n+ context[self.__variable_name] = mark_safe(output.strip())\n return ''\n", "issue": "L'affichage des pseudos dans la liste des tutoriels / article d\u00e9conne\nL'affichage du pseudo \"Bat'\" n'est pas correct.\n\n\n\nPossible de voir le comportement sur la page: https://zestedesavoir.com/tutoriels/?tag=dot-net\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom django import template\n\nregister = template.Library()\n\n\"\"\"\nDefine a tag allowing to capture template content as a variable.\n\"\"\"\n\n\[email protected](name='captureas')\ndef do_captureas(parser, token):\n \"\"\"\n Define a tag allowing to capture template content as a variable.\n\n :param parser: The django template parser\n :param token: tag token (tag_name + variable_name)\n :return: Template node.\n \"\"\"\n\n try:\n _, variable_name = token.split_contents()\n except ValueError:\n raise template.TemplateSyntaxError(\"'captureas' node requires a variable name.\")\n\n nodelist = parser.parse(('endcaptureas',))\n parser.delete_first_token()\n\n return CaptureasNode(nodelist, variable_name)\n\n\nclass CaptureasNode(template.Node):\n \"\"\"\n Capture end render node content to a variable name.\n \"\"\"\n\n def __init__(self, nodelist, variable_name):\n \"\"\"\n Create a template node which render `nodelist` to `variable_name`.\n\n :param nodelist: The node list to capture.\n :param variable_name: The variable name which will gain the rendered content.\n \"\"\"\n self.__node_list = nodelist\n self.__variable_name = variable_name\n\n def render(self, context):\n \"\"\"\n Render the node list to the variable name.\n\n :param context: Current context.\n :return: Empty string\n :rtype: str\n \"\"\"\n output = self.__node_list.render(context)\n context[self.__variable_name] = output.strip()\n return ''\n", "path": "zds/utils/templatetags/captureas.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom django import template\nfrom django.utils.safestring import mark_safe\n\nregister = template.Library()\n\n\"\"\"\nDefine a tag allowing to capture template content as a variable.\n\"\"\"\n\n\[email protected](name='captureas')\ndef do_captureas(parser, token):\n \"\"\"\n Define a tag allowing to capture template content as a variable.\n\n :param parser: The django template parser\n :param token: tag token (tag_name + variable_name)\n :return: Template node.\n \"\"\"\n\n try:\n _, variable_name = token.split_contents()\n except ValueError:\n raise template.TemplateSyntaxError(\"'captureas' node requires a variable name.\")\n\n nodelist = parser.parse(('endcaptureas',))\n parser.delete_first_token()\n\n return CaptureasNode(nodelist, variable_name)\n\n\nclass CaptureasNode(template.Node):\n \"\"\"\n Capture end render node content to a variable name.\n \"\"\"\n\n def __init__(self, nodelist, variable_name):\n \"\"\"\n Create a template node which render `nodelist` to `variable_name`.\n\n :param nodelist: The node list to capture.\n :param variable_name: The variable name which will gain the rendered content.\n \"\"\"\n self.__node_list = nodelist\n self.__variable_name = variable_name\n\n def render(self, context):\n \"\"\"\n Render the node list to the variable name.\n\n :param context: Current context.\n :return: Empty string\n :rtype: str\n \"\"\"\n output = self.__node_list.render(context)\n context[self.__variable_name] = mark_safe(output.strip())\n return ''\n", "path": "zds/utils/templatetags/captureas.py"}]} | 834 | 160 |
gh_patches_debug_23627 | rasdani/github-patches | git_diff | internetarchive__openlibrary-5428 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Create `i18n-messages validate` script to check for presence of fuzzy flags
<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->
### Describe the problem that you'd like solved
<!-- A clear and concise description of what you want to happen. -->
When `i18n-messages update` is executed, some translations in `.po` files will have a `fuzzy` annotation. This means that the translation may not be completely accurate, and should be manually approved by a translator. The last step of the `i18n-messages update` flow compiles all `.po` files into `.mo` files, regardless of the presence of fuzzy translations.
### Proposal & Constraints
<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->
Add validation step to i18n flow, in between the update and compile steps. Validation step will read each `.po` file, searching for lines that begin with `#, fuzzy`. If any are found in a `.po` file, that file should not be compiled.
<!-- Which suggestions or requirements should be considered for how feature needs to appear or be implemented? -->
### Additional context
<!-- Add any other context or screenshots about the feature request here. -->
This is a sub-task of #5134.
### Stakeholders
<!-- @ tag stakeholders of this bug -->
@cdrini
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openlibrary/i18n/__init__.py`
Content:
```
1 from __future__ import print_function
2
3 import sys
4 from typing import List
5
6 import web
7 import os
8 import shutil
9
10 import babel
11 from babel._compat import BytesIO
12 from babel.support import Translations
13 from babel.messages import Catalog
14 from babel.messages.pofile import read_po, write_po
15 from babel.messages.mofile import write_mo
16 from babel.messages.extract import extract_from_file, extract_from_dir, extract_python
17
18 root = os.path.dirname(__file__)
19
20 def _compile_translation(po, mo):
21 try:
22 catalog = read_po(open(po, 'rb'))
23
24 f = open(mo, 'wb')
25 write_mo(f, catalog)
26 f.close()
27 print('compiled', po, file=web.debug)
28 except Exception as e:
29 print('failed to compile', po, file=web.debug)
30 raise e
31
32
33 def get_locales():
34 return [
35 d
36 for d in os.listdir(root)
37 if (os.path.isdir(os.path.join(root, d)) and
38 os.path.exists(os.path.join(root, d, 'messages.po')))
39 ]
40
41 def extract_templetor(fileobj, keywords, comment_tags, options):
42 """Extract i18n messages from web.py templates."""
43 try:
44 instring = fileobj.read().decode('utf-8')
45 # Replace/remove inline js '\$' which interferes with the Babel python parser:
46 cleaned_string = instring.replace('\$', '')
47 code = web.template.Template.generate_code(cleaned_string, fileobj.name)
48 f = BytesIO(code.encode('utf-8')) # Babel wants bytes, not strings
49 except Exception as e:
50 print('Failed to extract ' + fileobj.name + ':', repr(e), file=web.debug)
51 return []
52 return extract_python(f, keywords, comment_tags, options)
53
54
55 def extract_messages(dirs: List[str]):
56 catalog = Catalog(
57 project='Open Library',
58 copyright_holder='Internet Archive'
59 )
60 METHODS = [
61 ("**.py", "python"),
62 ("**.html", "openlibrary.i18n:extract_templetor")
63 ]
64 COMMENT_TAGS = ["NOTE:"]
65
66 for d in dirs:
67 extracted = extract_from_dir(d, METHODS, comment_tags=COMMENT_TAGS,
68 strip_comment_tags=True)
69
70 counts = {}
71 for filename, lineno, message, comments, context in extracted:
72 counts[filename] = counts.get(filename, 0) + 1
73 catalog.add(message, None, [(filename, lineno)], auto_comments=comments)
74
75 for filename, count in counts.items():
76 path = filename if d == filename else os.path.join(d, filename)
77 print(f"{count}\t{path}", file=sys.stderr)
78
79 path = os.path.join(root, 'messages.pot')
80 f = open(path, 'wb')
81 write_po(f, catalog)
82 f.close()
83
84 print('wrote template to', path)
85
86 def compile_translations():
87 for locale in get_locales():
88 po_path = os.path.join(root, locale, 'messages.po')
89 mo_path = os.path.join(root, locale, 'messages.mo')
90
91 if os.path.exists(po_path):
92 _compile_translation(po_path, mo_path)
93
94 def update_translations():
95 pot_path = os.path.join(root, 'messages.pot')
96 template = read_po(open(pot_path, 'rb'))
97
98 for locale in get_locales():
99 po_path = os.path.join(root, locale, 'messages.po')
100 mo_path = os.path.join(root, locale, 'messages.mo')
101
102 if os.path.exists(po_path):
103 catalog = read_po(open(po_path, 'rb'))
104 catalog.update(template)
105
106 f = open(po_path, 'wb')
107 write_po(f, catalog)
108 f.close()
109 print('updated', po_path)
110
111 compile_translations()
112
113
114 def generate_po(args):
115 if args:
116 po_dir = os.path.join(root, args[0])
117 pot_src = os.path.join(root, 'messages.pot')
118 po_dest = os.path.join(po_dir, 'messages.po')
119
120 if os.path.exists(po_dir):
121 if os.path.exists(po_dest):
122 print(f"Portable object file already exists at {po_dest}")
123 else:
124 shutil.copy(pot_src, po_dest)
125 os.chmod(po_dest, 0o666)
126 print(f"File created at {po_dest}")
127 else:
128 os.mkdir(po_dir)
129 os.chmod(po_dir, 0o777)
130 shutil.copy(pot_src, po_dest)
131 os.chmod(po_dest, 0o666)
132 print(f"File created at {po_dest}")
133 else:
134 print("Add failed. Missing required locale code.")
135
136
137 @web.memoize
138 def load_translations(lang):
139 po = os.path.join(root, lang, 'messages.po')
140 mo_path = os.path.join(root, lang, 'messages.mo')
141
142 if os.path.exists(mo_path):
143 return Translations(open(mo_path, 'rb'))
144
145 @web.memoize
146 def load_locale(lang):
147 try:
148 return babel.Locale(lang)
149 except babel.UnknownLocaleError:
150 pass
151
152 class GetText:
153 def __call__(self, string, *args, **kwargs):
154 """Translate a given string to the language of the current locale."""
155 # Get the website locale from the global ctx.lang variable, set in i18n_loadhook
156 translations = load_translations(web.ctx.lang)
157 value = (translations and translations.ugettext(string)) or string
158
159 if args:
160 value = value % args
161 elif kwargs:
162 value = value % kwargs
163
164 return value
165
166 def __getattr__(self, key):
167 from infogami.utils.i18n import strings
168 # for backward-compatability
169 return strings.get('', key)
170
171 class LazyGetText:
172 def __call__(self, string, *args, **kwargs):
173 """Translate a given string lazily."""
174 return LazyObject(lambda: GetText()(string, *args, **kwargs))
175
176 class LazyObject:
177 def __init__(self, creator):
178 self._creator = creator
179
180 def __str__(self):
181 return web.safestr(self._creator())
182
183 def __repr__(self):
184 return repr(self._creator())
185
186 def __add__(self, other):
187 return self._creator() + other
188
189 def __radd__(self, other):
190 return other + self._creator()
191
192
193 def ungettext(s1, s2, _n, *a, **kw):
194 # Get the website locale from the global ctx.lang variable, set in i18n_loadhook
195 translations = load_translations(web.ctx.lang)
196 value = translations and translations.ungettext(s1, s2, _n)
197 if not value:
198 # fallback when translation is not provided
199 if _n == 1:
200 value = s1
201 else:
202 value = s2
203
204 if a:
205 return value % a
206 elif kw:
207 return value % kw
208 else:
209 return value
210
211 def gettext_territory(code):
212 """Returns the territory name in the current locale."""
213 # Get the website locale from the global ctx.lang variable, set in i18n_loadhook
214 locale = load_locale(web.ctx.lang)
215 return locale.territories.get(code, code)
216
217 gettext = GetText()
218 ugettext = gettext
219 lgettext = LazyGetText()
220 _ = gettext
221
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/openlibrary/i18n/__init__.py b/openlibrary/i18n/__init__.py
--- a/openlibrary/i18n/__init__.py
+++ b/openlibrary/i18n/__init__.py
@@ -30,6 +30,50 @@
raise e
+def _validate_catalog(catalog, locale):
+ validation_errors = []
+ for message in catalog:
+ if message.fuzzy:
+ if message.lineno:
+ validation_errors.append(
+ f'openlibrary/i18n/{locale}/messages.po:{message.lineno}:'
+ f' "{message.string}" is fuzzy.'
+ )
+ else:
+ validation_errors.append(
+ ' File is fuzzy. Remove line containing "#, fuzzy" found near '
+ 'the beginning of the file.'
+ )
+
+ if validation_errors:
+ print("Validation failed...")
+ print("Please correct the following errors before proceeding:")
+ for e in validation_errors:
+ print(e)
+
+ return len(validation_errors) == 0
+
+
+def validate_translations(args):
+ if args:
+ locale = args[0]
+ po_path = os.path.join(root, locale, 'messages.po')
+
+ if os.path.exists(po_path):
+ catalog = read_po(open(po_path, 'rb'))
+ is_valid = _validate_catalog(catalog, locale)
+
+ if is_valid:
+ print(f'Translations for locale "{locale}" are valid!')
+ return is_valid
+ else:
+ print(f'Portable object file for locale "{locale}" does not exist.')
+ return False
+ else:
+ print('Must include locale code when executing validate.')
+ return False
+
+
def get_locales():
return [
d
| {"golden_diff": "diff --git a/openlibrary/i18n/__init__.py b/openlibrary/i18n/__init__.py\n--- a/openlibrary/i18n/__init__.py\n+++ b/openlibrary/i18n/__init__.py\n@@ -30,6 +30,50 @@\n raise e\n \n \n+def _validate_catalog(catalog, locale):\n+ validation_errors = []\n+ for message in catalog:\n+ if message.fuzzy:\n+ if message.lineno:\n+ validation_errors.append(\n+ f'openlibrary/i18n/{locale}/messages.po:{message.lineno}:'\n+ f' \"{message.string}\" is fuzzy.'\n+ )\n+ else:\n+ validation_errors.append(\n+ ' File is fuzzy. Remove line containing \"#, fuzzy\" found near '\n+ 'the beginning of the file.'\n+ )\n+\n+ if validation_errors:\n+ print(\"Validation failed...\")\n+ print(\"Please correct the following errors before proceeding:\")\n+ for e in validation_errors:\n+ print(e)\n+\n+ return len(validation_errors) == 0\n+\n+\n+def validate_translations(args):\n+ if args:\n+ locale = args[0]\n+ po_path = os.path.join(root, locale, 'messages.po')\n+\n+ if os.path.exists(po_path):\n+ catalog = read_po(open(po_path, 'rb'))\n+ is_valid = _validate_catalog(catalog, locale)\n+\n+ if is_valid:\n+ print(f'Translations for locale \"{locale}\" are valid!')\n+ return is_valid\n+ else:\n+ print(f'Portable object file for locale \"{locale}\" does not exist.')\n+ return False\n+ else:\n+ print('Must include locale code when executing validate.')\n+ return False\n+\n+\n def get_locales():\n return [\n d\n", "issue": "Create `i18n-messages validate` script to check for presence of fuzzy flags\n<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->\r\n\r\n### Describe the problem that you'd like solved\r\n<!-- A clear and concise description of what you want to happen. -->\r\nWhen `i18n-messages update` is executed, some translations in `.po` files will have a `fuzzy` annotation. This means that the translation may not be completely accurate, and should be manually approved by a translator. The last step of the `i18n-messages update` flow compiles all `.po` files into `.mo` files, regardless of the presence of fuzzy translations.\r\n\r\n### Proposal & Constraints\r\n<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->\r\nAdd validation step to i18n flow, in between the update and compile steps. Validation step will read each `.po` file, searching for lines that begin with `#, fuzzy`. If any are found in a `.po` file, that file should not be compiled.\r\n\r\n<!-- Which suggestions or requirements should be considered for how feature needs to appear or be implemented? -->\r\n\r\n### Additional context\r\n<!-- Add any other context or screenshots about the feature request here. -->\r\nThis is a sub-task of #5134.\r\n### Stakeholders\r\n<!-- @ tag stakeholders of this bug -->\r\n@cdrini\r\n\r\n\n", "before_files": [{"content": "from __future__ import print_function\n\nimport sys\nfrom typing import List\n\nimport web\nimport os\nimport shutil\n\nimport babel\nfrom babel._compat import BytesIO\nfrom babel.support import Translations\nfrom babel.messages import Catalog\nfrom babel.messages.pofile import read_po, write_po\nfrom babel.messages.mofile import write_mo\nfrom babel.messages.extract import extract_from_file, extract_from_dir, extract_python\n\nroot = os.path.dirname(__file__)\n\ndef _compile_translation(po, mo):\n try:\n catalog = read_po(open(po, 'rb'))\n\n f = open(mo, 'wb')\n write_mo(f, catalog)\n f.close()\n print('compiled', po, file=web.debug)\n except Exception as e:\n print('failed to compile', po, file=web.debug)\n raise e\n\n\ndef get_locales():\n return [\n d\n for d in os.listdir(root)\n if (os.path.isdir(os.path.join(root, d)) and\n os.path.exists(os.path.join(root, d, 'messages.po')))\n ]\n\ndef extract_templetor(fileobj, keywords, comment_tags, options):\n \"\"\"Extract i18n messages from web.py templates.\"\"\"\n try:\n instring = fileobj.read().decode('utf-8')\n # Replace/remove inline js '\\$' which interferes with the Babel python parser:\n cleaned_string = instring.replace('\\$', '')\n code = web.template.Template.generate_code(cleaned_string, fileobj.name)\n f = BytesIO(code.encode('utf-8')) # Babel wants bytes, not strings\n except Exception as e:\n print('Failed to extract ' + fileobj.name + ':', repr(e), file=web.debug)\n return []\n return extract_python(f, keywords, comment_tags, options)\n\n\ndef extract_messages(dirs: List[str]):\n catalog = Catalog(\n project='Open Library',\n copyright_holder='Internet Archive'\n )\n METHODS = [\n (\"**.py\", \"python\"),\n (\"**.html\", \"openlibrary.i18n:extract_templetor\")\n ]\n COMMENT_TAGS = [\"NOTE:\"]\n\n for d in dirs:\n extracted = extract_from_dir(d, METHODS, comment_tags=COMMENT_TAGS,\n strip_comment_tags=True)\n\n counts = {}\n for filename, lineno, message, comments, context in extracted:\n counts[filename] = counts.get(filename, 0) + 1\n catalog.add(message, None, [(filename, lineno)], auto_comments=comments)\n\n for filename, count in counts.items():\n path = filename if d == filename else os.path.join(d, filename)\n print(f\"{count}\\t{path}\", file=sys.stderr)\n\n path = os.path.join(root, 'messages.pot')\n f = open(path, 'wb')\n write_po(f, catalog)\n f.close()\n\n print('wrote template to', path)\n\ndef compile_translations():\n for locale in get_locales():\n po_path = os.path.join(root, locale, 'messages.po')\n mo_path = os.path.join(root, locale, 'messages.mo')\n\n if os.path.exists(po_path):\n _compile_translation(po_path, mo_path)\n\ndef update_translations():\n pot_path = os.path.join(root, 'messages.pot')\n template = read_po(open(pot_path, 'rb'))\n\n for locale in get_locales():\n po_path = os.path.join(root, locale, 'messages.po')\n mo_path = os.path.join(root, locale, 'messages.mo')\n\n if os.path.exists(po_path):\n catalog = read_po(open(po_path, 'rb'))\n catalog.update(template)\n\n f = open(po_path, 'wb')\n write_po(f, catalog)\n f.close()\n print('updated', po_path)\n\n compile_translations()\n\n\ndef generate_po(args):\n if args:\n po_dir = os.path.join(root, args[0])\n pot_src = os.path.join(root, 'messages.pot')\n po_dest = os.path.join(po_dir, 'messages.po')\n\n if os.path.exists(po_dir):\n if os.path.exists(po_dest):\n print(f\"Portable object file already exists at {po_dest}\")\n else:\n shutil.copy(pot_src, po_dest)\n os.chmod(po_dest, 0o666)\n print(f\"File created at {po_dest}\")\n else:\n os.mkdir(po_dir)\n os.chmod(po_dir, 0o777)\n shutil.copy(pot_src, po_dest)\n os.chmod(po_dest, 0o666)\n print(f\"File created at {po_dest}\")\n else:\n print(\"Add failed. Missing required locale code.\")\n\n\[email protected]\ndef load_translations(lang):\n po = os.path.join(root, lang, 'messages.po')\n mo_path = os.path.join(root, lang, 'messages.mo')\n\n if os.path.exists(mo_path):\n return Translations(open(mo_path, 'rb'))\n\[email protected]\ndef load_locale(lang):\n try:\n return babel.Locale(lang)\n except babel.UnknownLocaleError:\n pass\n\nclass GetText:\n def __call__(self, string, *args, **kwargs):\n \"\"\"Translate a given string to the language of the current locale.\"\"\"\n # Get the website locale from the global ctx.lang variable, set in i18n_loadhook\n translations = load_translations(web.ctx.lang)\n value = (translations and translations.ugettext(string)) or string\n\n if args:\n value = value % args\n elif kwargs:\n value = value % kwargs\n\n return value\n\n def __getattr__(self, key):\n from infogami.utils.i18n import strings\n # for backward-compatability\n return strings.get('', key)\n\nclass LazyGetText:\n def __call__(self, string, *args, **kwargs):\n \"\"\"Translate a given string lazily.\"\"\"\n return LazyObject(lambda: GetText()(string, *args, **kwargs))\n\nclass LazyObject:\n def __init__(self, creator):\n self._creator = creator\n\n def __str__(self):\n return web.safestr(self._creator())\n\n def __repr__(self):\n return repr(self._creator())\n\n def __add__(self, other):\n return self._creator() + other\n\n def __radd__(self, other):\n return other + self._creator()\n\n\ndef ungettext(s1, s2, _n, *a, **kw):\n # Get the website locale from the global ctx.lang variable, set in i18n_loadhook\n translations = load_translations(web.ctx.lang)\n value = translations and translations.ungettext(s1, s2, _n)\n if not value:\n # fallback when translation is not provided\n if _n == 1:\n value = s1\n else:\n value = s2\n\n if a:\n return value % a\n elif kw:\n return value % kw\n else:\n return value\n\ndef gettext_territory(code):\n \"\"\"Returns the territory name in the current locale.\"\"\"\n # Get the website locale from the global ctx.lang variable, set in i18n_loadhook\n locale = load_locale(web.ctx.lang)\n return locale.territories.get(code, code)\n\ngettext = GetText()\nugettext = gettext\nlgettext = LazyGetText()\n_ = gettext\n", "path": "openlibrary/i18n/__init__.py"}], "after_files": [{"content": "from __future__ import print_function\n\nimport sys\nfrom typing import List\n\nimport web\nimport os\nimport shutil\n\nimport babel\nfrom babel._compat import BytesIO\nfrom babel.support import Translations\nfrom babel.messages import Catalog\nfrom babel.messages.pofile import read_po, write_po\nfrom babel.messages.mofile import write_mo\nfrom babel.messages.extract import extract_from_file, extract_from_dir, extract_python\n\nroot = os.path.dirname(__file__)\n\ndef _compile_translation(po, mo):\n try:\n catalog = read_po(open(po, 'rb'))\n\n f = open(mo, 'wb')\n write_mo(f, catalog)\n f.close()\n print('compiled', po, file=web.debug)\n except Exception as e:\n print('failed to compile', po, file=web.debug)\n raise e\n\n\ndef _validate_catalog(catalog, locale):\n validation_errors = []\n for message in catalog:\n if message.fuzzy:\n if message.lineno:\n validation_errors.append(\n f'openlibrary/i18n/{locale}/messages.po:{message.lineno}:'\n f' \"{message.string}\" is fuzzy.'\n )\n else:\n validation_errors.append(\n ' File is fuzzy. Remove line containing \"#, fuzzy\" found near '\n 'the beginning of the file.'\n )\n\n if validation_errors:\n print(\"Validation failed...\")\n print(\"Please correct the following errors before proceeding:\")\n for e in validation_errors:\n print(e)\n\n return len(validation_errors) == 0\n\n\ndef validate_translations(args):\n if args:\n locale = args[0]\n po_path = os.path.join(root, locale, 'messages.po')\n\n if os.path.exists(po_path):\n catalog = read_po(open(po_path, 'rb'))\n is_valid = _validate_catalog(catalog, locale)\n\n if is_valid:\n print(f'Translations for locale \"{locale}\" are valid!')\n return is_valid\n else:\n print(f'Portable object file for locale \"{locale}\" does not exist.')\n return False\n else:\n print('Must include locale code when executing validate.')\n return False\n\n\ndef get_locales():\n return [\n d\n for d in os.listdir(root)\n if (os.path.isdir(os.path.join(root, d)) and\n os.path.exists(os.path.join(root, d, 'messages.po')))\n ]\n\ndef extract_templetor(fileobj, keywords, comment_tags, options):\n \"\"\"Extract i18n messages from web.py templates.\"\"\"\n try:\n instring = fileobj.read().decode('utf-8')\n # Replace/remove inline js '\\$' which interferes with the Babel python parser:\n cleaned_string = instring.replace('\\$', '')\n code = web.template.Template.generate_code(cleaned_string, fileobj.name)\n f = BytesIO(code.encode('utf-8')) # Babel wants bytes, not strings\n except Exception as e:\n print('Failed to extract ' + fileobj.name + ':', repr(e), file=web.debug)\n return []\n return extract_python(f, keywords, comment_tags, options)\n\n\ndef extract_messages(dirs: List[str]):\n catalog = Catalog(\n project='Open Library',\n copyright_holder='Internet Archive'\n )\n METHODS = [\n (\"**.py\", \"python\"),\n (\"**.html\", \"openlibrary.i18n:extract_templetor\")\n ]\n COMMENT_TAGS = [\"NOTE:\"]\n\n for d in dirs:\n extracted = extract_from_dir(d, METHODS, comment_tags=COMMENT_TAGS,\n strip_comment_tags=True)\n\n counts = {}\n for filename, lineno, message, comments, context in extracted:\n counts[filename] = counts.get(filename, 0) + 1\n catalog.add(message, None, [(filename, lineno)], auto_comments=comments)\n\n for filename, count in counts.items():\n path = filename if d == filename else os.path.join(d, filename)\n print(f\"{count}\\t{path}\", file=sys.stderr)\n\n path = os.path.join(root, 'messages.pot')\n f = open(path, 'wb')\n write_po(f, catalog)\n f.close()\n\n print('wrote template to', path)\n\ndef compile_translations():\n for locale in get_locales():\n po_path = os.path.join(root, locale, 'messages.po')\n mo_path = os.path.join(root, locale, 'messages.mo')\n\n if os.path.exists(po_path):\n _compile_translation(po_path, mo_path)\n\ndef update_translations():\n pot_path = os.path.join(root, 'messages.pot')\n template = read_po(open(pot_path, 'rb'))\n\n for locale in get_locales():\n po_path = os.path.join(root, locale, 'messages.po')\n mo_path = os.path.join(root, locale, 'messages.mo')\n\n if os.path.exists(po_path):\n catalog = read_po(open(po_path, 'rb'))\n catalog.update(template)\n\n f = open(po_path, 'wb')\n write_po(f, catalog)\n f.close()\n print('updated', po_path)\n\n compile_translations()\n\n\ndef generate_po(args):\n if args:\n po_dir = os.path.join(root, args[0])\n pot_src = os.path.join(root, 'messages.pot')\n po_dest = os.path.join(po_dir, 'messages.po')\n\n if os.path.exists(po_dir):\n if os.path.exists(po_dest):\n print(f\"Portable object file already exists at {po_dest}\")\n else:\n shutil.copy(pot_src, po_dest)\n os.chmod(po_dest, 0o666)\n print(f\"File created at {po_dest}\")\n else:\n os.mkdir(po_dir)\n os.chmod(po_dir, 0o777)\n shutil.copy(pot_src, po_dest)\n os.chmod(po_dest, 0o666)\n print(f\"File created at {po_dest}\")\n else:\n print(\"Add failed. Missing required locale code.\")\n\n\[email protected]\ndef load_translations(lang):\n po = os.path.join(root, lang, 'messages.po')\n mo_path = os.path.join(root, lang, 'messages.mo')\n\n if os.path.exists(mo_path):\n return Translations(open(mo_path, 'rb'))\n\[email protected]\ndef load_locale(lang):\n try:\n return babel.Locale(lang)\n except babel.UnknownLocaleError:\n pass\n\nclass GetText:\n def __call__(self, string, *args, **kwargs):\n \"\"\"Translate a given string to the language of the current locale.\"\"\"\n # Get the website locale from the global ctx.lang variable, set in i18n_loadhook\n translations = load_translations(web.ctx.lang)\n value = (translations and translations.ugettext(string)) or string\n\n if args:\n value = value % args\n elif kwargs:\n value = value % kwargs\n\n return value\n\n def __getattr__(self, key):\n from infogami.utils.i18n import strings\n # for backward-compatability\n return strings.get('', key)\n\nclass LazyGetText:\n def __call__(self, string, *args, **kwargs):\n \"\"\"Translate a given string lazily.\"\"\"\n return LazyObject(lambda: GetText()(string, *args, **kwargs))\n\nclass LazyObject:\n def __init__(self, creator):\n self._creator = creator\n\n def __str__(self):\n return web.safestr(self._creator())\n\n def __repr__(self):\n return repr(self._creator())\n\n def __add__(self, other):\n return self._creator() + other\n\n def __radd__(self, other):\n return other + self._creator()\n\n\ndef ungettext(s1, s2, _n, *a, **kw):\n # Get the website locale from the global ctx.lang variable, set in i18n_loadhook\n translations = load_translations(web.ctx.lang)\n value = translations and translations.ungettext(s1, s2, _n)\n if not value:\n # fallback when translation is not provided\n if _n == 1:\n value = s1\n else:\n value = s2\n\n if a:\n return value % a\n elif kw:\n return value % kw\n else:\n return value\n\ndef gettext_territory(code):\n \"\"\"Returns the territory name in the current locale.\"\"\"\n # Get the website locale from the global ctx.lang variable, set in i18n_loadhook\n locale = load_locale(web.ctx.lang)\n return locale.territories.get(code, code)\n\ngettext = GetText()\nugettext = gettext\nlgettext = LazyGetText()\n_ = gettext\n", "path": "openlibrary/i18n/__init__.py"}]} | 2,731 | 402 |
gh_patches_debug_1113 | rasdani/github-patches | git_diff | Pylons__pyramid-2225 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update to Sphinx 1.3.4 when released
There is a [bug in Sphinx 1.3.3 and 1.3.1](https://github.com/sphinx-doc/sphinx/issues/2189) (I haven't tried 1.3.2) where next and previous links in Sphinx documentation are broken when going into children and across sibling directories.
When 1.3.4 is released, we need to pin sphinx to 1.3.4, which will include the commit made 8 days after the 1.3.3 release.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 ##############################################################################
2 #
3 # Copyright (c) 2008-2013 Agendaless Consulting and Contributors.
4 # All Rights Reserved.
5 #
6 # This software is subject to the provisions of the BSD-like license at
7 # http://www.repoze.org/LICENSE.txt. A copy of the license should accompany
8 # this distribution. THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL
9 # EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,
10 # THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND
11 # FITNESS FOR A PARTICULAR PURPOSE
12 #
13 ##############################################################################
14
15 import os
16 import sys
17
18 from setuptools import setup, find_packages
19
20 py_version = sys.version_info[:2]
21
22 PY3 = py_version[0] == 3
23
24 if PY3:
25 if py_version < (3, 2):
26 raise RuntimeError('On Python 3, Pyramid requires Python 3.2 or better')
27 else:
28 if py_version < (2, 6):
29 raise RuntimeError('On Python 2, Pyramid requires Python 2.6 or better')
30
31 here = os.path.abspath(os.path.dirname(__file__))
32 try:
33 with open(os.path.join(here, 'README.rst')) as f:
34 README = f.read()
35 with open(os.path.join(here, 'CHANGES.txt')) as f:
36 CHANGES = f.read()
37 except IOError:
38 README = CHANGES = ''
39
40 install_requires=[
41 'setuptools',
42 'WebOb >= 1.3.1', # request.domain and CookieProfile
43 'repoze.lru >= 0.4', # py3 compat
44 'zope.interface >= 3.8.0', # has zope.interface.registry
45 'zope.deprecation >= 3.5.0', # py3 compat
46 'venusian >= 1.0a3', # ``ignore``
47 'translationstring >= 0.4', # py3 compat
48 'PasteDeploy >= 1.5.0', # py3 compat
49 ]
50
51 tests_require = [
52 'WebTest >= 1.3.1', # py3 compat
53 ]
54
55 if not PY3:
56 tests_require.append('zope.component>=3.11.0')
57
58 docs_extras = [
59 'Sphinx >= 1.3.1',
60 'docutils',
61 'repoze.sphinx.autointerface',
62 'pylons_sphinx_latesturl',
63 'pylons-sphinx-themes',
64 'sphinxcontrib-programoutput',
65 ]
66
67 testing_extras = tests_require + [
68 'nose',
69 'coverage',
70 'virtualenv', # for scaffolding tests
71 ]
72
73 setup(name='pyramid',
74 version='1.6',
75 description='The Pyramid Web Framework, a Pylons project',
76 long_description=README + '\n\n' + CHANGES,
77 classifiers=[
78 "Development Status :: 6 - Mature",
79 "Intended Audience :: Developers",
80 "Programming Language :: Python",
81 "Programming Language :: Python :: 2.6",
82 "Programming Language :: Python :: 2.7",
83 "Programming Language :: Python :: 3",
84 "Programming Language :: Python :: 3.2",
85 "Programming Language :: Python :: 3.3",
86 "Programming Language :: Python :: 3.4",
87 "Programming Language :: Python :: 3.5",
88 "Programming Language :: Python :: Implementation :: CPython",
89 "Programming Language :: Python :: Implementation :: PyPy",
90 "Framework :: Pyramid",
91 "Topic :: Internet :: WWW/HTTP",
92 "Topic :: Internet :: WWW/HTTP :: WSGI",
93 "License :: Repoze Public License",
94 ],
95 keywords='web wsgi pylons pyramid',
96 author="Chris McDonough, Agendaless Consulting",
97 author_email="[email protected]",
98 url="http://docs.pylonsproject.org/en/latest/docs/pyramid.html",
99 license="BSD-derived (http://www.repoze.org/LICENSE.txt)",
100 packages=find_packages(),
101 include_package_data=True,
102 zip_safe=False,
103 install_requires = install_requires,
104 extras_require = {
105 'testing':testing_extras,
106 'docs':docs_extras,
107 },
108 tests_require = tests_require,
109 test_suite="pyramid.tests",
110 entry_points = """\
111 [pyramid.scaffold]
112 starter=pyramid.scaffolds:StarterProjectTemplate
113 zodb=pyramid.scaffolds:ZODBProjectTemplate
114 alchemy=pyramid.scaffolds:AlchemyProjectTemplate
115 [pyramid.pshell_runner]
116 python=pyramid.scripts.pshell:python_shell_runner
117 [console_scripts]
118 pcreate = pyramid.scripts.pcreate:main
119 pserve = pyramid.scripts.pserve:main
120 pshell = pyramid.scripts.pshell:main
121 proutes = pyramid.scripts.proutes:main
122 pviews = pyramid.scripts.pviews:main
123 ptweens = pyramid.scripts.ptweens:main
124 prequest = pyramid.scripts.prequest:main
125 pdistreport = pyramid.scripts.pdistreport:main
126 [paste.server_runner]
127 wsgiref = pyramid.scripts.pserve:wsgiref_server_runner
128 cherrypy = pyramid.scripts.pserve:cherrypy_server_runner
129 """
130 )
131
132
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -56,7 +56,7 @@
tests_require.append('zope.component>=3.11.0')
docs_extras = [
- 'Sphinx >= 1.3.1',
+ 'Sphinx >= 1.3.4',
'docutils',
'repoze.sphinx.autointerface',
'pylons_sphinx_latesturl',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -56,7 +56,7 @@\n tests_require.append('zope.component>=3.11.0')\n \n docs_extras = [\n- 'Sphinx >= 1.3.1',\n+ 'Sphinx >= 1.3.4',\n 'docutils',\n 'repoze.sphinx.autointerface',\n 'pylons_sphinx_latesturl',\n", "issue": "Update to Sphinx 1.3.4 when released\nThere is a [bug in Sphinx 1.3.3 and 1.3.1](https://github.com/sphinx-doc/sphinx/issues/2189) (I haven't tried 1.3.2) where next and previous links in Sphinx documentation are broken when going into children and across sibling directories.\n\nWhen 1.3.4 is released, we need to pin sphinx to 1.3.4, which will include the commit made 8 days after the 1.3.3 release.\n\n", "before_files": [{"content": "##############################################################################\n#\n# Copyright (c) 2008-2013 Agendaless Consulting and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the BSD-like license at\n# http://www.repoze.org/LICENSE.txt. A copy of the license should accompany\n# this distribution. THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL\n# EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND\n# FITNESS FOR A PARTICULAR PURPOSE\n#\n##############################################################################\n\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\n\npy_version = sys.version_info[:2]\n\nPY3 = py_version[0] == 3\n\nif PY3:\n if py_version < (3, 2):\n raise RuntimeError('On Python 3, Pyramid requires Python 3.2 or better')\nelse:\n if py_version < (2, 6):\n raise RuntimeError('On Python 2, Pyramid requires Python 2.6 or better')\n\nhere = os.path.abspath(os.path.dirname(__file__))\ntry:\n with open(os.path.join(here, 'README.rst')) as f:\n README = f.read()\n with open(os.path.join(here, 'CHANGES.txt')) as f:\n CHANGES = f.read()\nexcept IOError:\n README = CHANGES = ''\n\ninstall_requires=[\n 'setuptools',\n 'WebOb >= 1.3.1', # request.domain and CookieProfile\n 'repoze.lru >= 0.4', # py3 compat\n 'zope.interface >= 3.8.0', # has zope.interface.registry\n 'zope.deprecation >= 3.5.0', # py3 compat\n 'venusian >= 1.0a3', # ``ignore``\n 'translationstring >= 0.4', # py3 compat\n 'PasteDeploy >= 1.5.0', # py3 compat\n ]\n\ntests_require = [\n 'WebTest >= 1.3.1', # py3 compat\n ]\n\nif not PY3:\n tests_require.append('zope.component>=3.11.0')\n\ndocs_extras = [\n 'Sphinx >= 1.3.1',\n 'docutils',\n 'repoze.sphinx.autointerface',\n 'pylons_sphinx_latesturl',\n 'pylons-sphinx-themes',\n 'sphinxcontrib-programoutput',\n ]\n\ntesting_extras = tests_require + [\n 'nose',\n 'coverage',\n 'virtualenv', # for scaffolding tests\n ]\n\nsetup(name='pyramid',\n version='1.6',\n description='The Pyramid Web Framework, a Pylons project',\n long_description=README + '\\n\\n' + CHANGES,\n classifiers=[\n \"Development Status :: 6 - Mature\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.2\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Framework :: Pyramid\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI\",\n \"License :: Repoze Public License\",\n ],\n keywords='web wsgi pylons pyramid',\n author=\"Chris McDonough, Agendaless Consulting\",\n author_email=\"[email protected]\",\n url=\"http://docs.pylonsproject.org/en/latest/docs/pyramid.html\",\n license=\"BSD-derived (http://www.repoze.org/LICENSE.txt)\",\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires = install_requires,\n extras_require = {\n 'testing':testing_extras,\n 'docs':docs_extras,\n },\n tests_require = tests_require,\n test_suite=\"pyramid.tests\",\n entry_points = \"\"\"\\\n [pyramid.scaffold]\n starter=pyramid.scaffolds:StarterProjectTemplate\n zodb=pyramid.scaffolds:ZODBProjectTemplate\n alchemy=pyramid.scaffolds:AlchemyProjectTemplate\n [pyramid.pshell_runner]\n python=pyramid.scripts.pshell:python_shell_runner\n [console_scripts]\n pcreate = pyramid.scripts.pcreate:main\n pserve = pyramid.scripts.pserve:main\n pshell = pyramid.scripts.pshell:main\n proutes = pyramid.scripts.proutes:main\n pviews = pyramid.scripts.pviews:main\n ptweens = pyramid.scripts.ptweens:main\n prequest = pyramid.scripts.prequest:main\n pdistreport = pyramid.scripts.pdistreport:main\n [paste.server_runner]\n wsgiref = pyramid.scripts.pserve:wsgiref_server_runner\n cherrypy = pyramid.scripts.pserve:cherrypy_server_runner\n \"\"\"\n )\n\n", "path": "setup.py"}], "after_files": [{"content": "##############################################################################\n#\n# Copyright (c) 2008-2013 Agendaless Consulting and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the BSD-like license at\n# http://www.repoze.org/LICENSE.txt. A copy of the license should accompany\n# this distribution. THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL\n# EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND\n# FITNESS FOR A PARTICULAR PURPOSE\n#\n##############################################################################\n\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\n\npy_version = sys.version_info[:2]\n\nPY3 = py_version[0] == 3\n\nif PY3:\n if py_version < (3, 2):\n raise RuntimeError('On Python 3, Pyramid requires Python 3.2 or better')\nelse:\n if py_version < (2, 6):\n raise RuntimeError('On Python 2, Pyramid requires Python 2.6 or better')\n\nhere = os.path.abspath(os.path.dirname(__file__))\ntry:\n with open(os.path.join(here, 'README.rst')) as f:\n README = f.read()\n with open(os.path.join(here, 'CHANGES.txt')) as f:\n CHANGES = f.read()\nexcept IOError:\n README = CHANGES = ''\n\ninstall_requires=[\n 'setuptools',\n 'WebOb >= 1.3.1', # request.domain and CookieProfile\n 'repoze.lru >= 0.4', # py3 compat\n 'zope.interface >= 3.8.0', # has zope.interface.registry\n 'zope.deprecation >= 3.5.0', # py3 compat\n 'venusian >= 1.0a3', # ``ignore``\n 'translationstring >= 0.4', # py3 compat\n 'PasteDeploy >= 1.5.0', # py3 compat\n ]\n\ntests_require = [\n 'WebTest >= 1.3.1', # py3 compat\n ]\n\nif not PY3:\n tests_require.append('zope.component>=3.11.0')\n\ndocs_extras = [\n 'Sphinx >= 1.3.4',\n 'docutils',\n 'repoze.sphinx.autointerface',\n 'pylons_sphinx_latesturl',\n 'pylons-sphinx-themes',\n 'sphinxcontrib-programoutput',\n ]\n\ntesting_extras = tests_require + [\n 'nose',\n 'coverage',\n 'virtualenv', # for scaffolding tests\n ]\n\nsetup(name='pyramid',\n version='1.6',\n description='The Pyramid Web Framework, a Pylons project',\n long_description=README + '\\n\\n' + CHANGES,\n classifiers=[\n \"Development Status :: 6 - Mature\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.2\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Framework :: Pyramid\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI\",\n \"License :: Repoze Public License\",\n ],\n keywords='web wsgi pylons pyramid',\n author=\"Chris McDonough, Agendaless Consulting\",\n author_email=\"[email protected]\",\n url=\"http://docs.pylonsproject.org/en/latest/docs/pyramid.html\",\n license=\"BSD-derived (http://www.repoze.org/LICENSE.txt)\",\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires = install_requires,\n extras_require = {\n 'testing':testing_extras,\n 'docs':docs_extras,\n },\n tests_require = tests_require,\n test_suite=\"pyramid.tests\",\n entry_points = \"\"\"\\\n [pyramid.scaffold]\n starter=pyramid.scaffolds:StarterProjectTemplate\n zodb=pyramid.scaffolds:ZODBProjectTemplate\n alchemy=pyramid.scaffolds:AlchemyProjectTemplate\n [pyramid.pshell_runner]\n python=pyramid.scripts.pshell:python_shell_runner\n [console_scripts]\n pcreate = pyramid.scripts.pcreate:main\n pserve = pyramid.scripts.pserve:main\n pshell = pyramid.scripts.pshell:main\n proutes = pyramid.scripts.proutes:main\n pviews = pyramid.scripts.pviews:main\n ptweens = pyramid.scripts.ptweens:main\n prequest = pyramid.scripts.prequest:main\n pdistreport = pyramid.scripts.pdistreport:main\n [paste.server_runner]\n wsgiref = pyramid.scripts.pserve:wsgiref_server_runner\n cherrypy = pyramid.scripts.pserve:cherrypy_server_runner\n \"\"\"\n )\n\n", "path": "setup.py"}]} | 1,821 | 106 |
gh_patches_debug_5381 | rasdani/github-patches | git_diff | ManimCommunity__manim-1053 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Broken source links on the stable version of the documentation
## Description of bug / unexpected behavior
Source links on the stable version of documentation does not work. It links to something like this: https://github.com/ManimCommunity/manim/blob/stable/manim/mobject/changing.py which is a 404 error.
## Expected behavior
Source links should link to a file containing source code for the stable version.
## How to reproduce the issue
On the documentation website, switch the version to stable. Navigate to and click the source link of any class.
## Additional comments
Perhaps this is an access rights issue, which explains why it evaded detection from community devs for so long?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/source/conf.py`
Content:
```
1 # Configuration file for the Sphinx documentation builder.
2 #
3 # This file only contains a selection of the most common options. For a full
4 # list see the documentation:
5 # https://www.sphinx-doc.org/en/master/usage/configuration.html
6
7 # -- Path setup --------------------------------------------------------------
8
9 # If extensions (or modules to document with autodoc) are in another directory,
10 # add these directories to sys.path here. If the directory is relative to the
11 # documentation root, use os.path.abspath to make it absolute, like shown here.
12
13 import os
14 import sys
15 from distutils.sysconfig import get_python_lib
16 from pathlib import Path
17
18 sys.path.insert(0, os.path.abspath("."))
19
20
21 if os.environ.get("READTHEDOCS") == "True":
22 site_path = get_python_lib()
23 # we need to add ffmpeg to the path
24 ffmpeg_path = os.path.join(site_path, "imageio_ffmpeg", "binaries")
25 # the included binary is named ffmpeg-linux..., create a symlink
26 [ffmpeg_bin] = [
27 file for file in os.listdir(ffmpeg_path) if file.startswith("ffmpeg-")
28 ]
29 os.symlink(
30 os.path.join(ffmpeg_path, ffmpeg_bin), os.path.join(ffmpeg_path, "ffmpeg")
31 )
32 os.environ["PATH"] += os.pathsep + ffmpeg_path
33
34
35 # -- Project information -----------------------------------------------------
36
37 project = "Manim"
38 copyright = "2020, The Manim Community Dev Team"
39 author = "The Manim Community Dev Team"
40
41
42 # -- General configuration ---------------------------------------------------
43
44 # Add any Sphinx extension module names here, as strings. They can be
45 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
46 # ones.
47 extensions = [
48 "sphinx.ext.autodoc",
49 "recommonmark",
50 "sphinx_copybutton",
51 "sphinx.ext.napoleon",
52 "sphinx.ext.autosummary",
53 "sphinx.ext.doctest",
54 "sphinx.ext.extlinks",
55 "sphinx.ext.linkcode",
56 "sphinxext.opengraph",
57 "manim_directive",
58 ]
59
60 # Automatically generate stub pages when using the .. autosummary directive
61 autosummary_generate = True
62
63 # generate documentation from type hints
64 autodoc_typehints = "description"
65 autoclass_content = "both"
66
67 # controls whether functions documented by the autofunction directive
68 # appear with their full module names
69 add_module_names = False
70
71 # Add any paths that contain templates here, relative to this directory.
72 templates_path = ["_templates"]
73
74 # Custom section headings in our documentation
75 napoleon_custom_sections = ["Tests", ("Test", "Tests")]
76
77 # List of patterns, relative to source directory, that match files and
78 # directories to ignore when looking for source files.
79 # This pattern also affects html_static_path and html_extra_path.
80 exclude_patterns = []
81
82
83 # -- Options for HTML output -------------------------------------------------
84
85 # The theme to use for HTML and HTML Help pages. See the documentation for
86 # a list of builtin themes.
87 #
88 import guzzle_sphinx_theme
89
90 html_theme_path = guzzle_sphinx_theme.html_theme_path()
91 html_theme = "guzzle_sphinx_theme"
92 html_favicon = str(Path("_static/favicon.ico"))
93
94 # There's a standing issue with Sphinx's new-style sidebars. This is a
95 # workaround. Taken from
96 # https://github.com/guzzle/guzzle_sphinx_theme/issues/33#issuecomment-637081826
97 html_sidebars = {"**": ["logo-text.html", "globaltoc.html", "searchbox.html"]}
98
99 # Register the theme as an extension to generate a sitemap.xml
100 extensions.append("guzzle_sphinx_theme")
101
102 # Add any paths that contain custom static files (such as style sheets) here,
103 # relative to this directory. They are copied after the builtin static files,
104 # so a file named "default.css" will overwrite the builtin "default.css".
105 html_static_path = ["_static"]
106
107 # This specifies any additional css files that will override the theme's
108 html_css_files = ["custom.css"]
109
110 # source links to github
111 def linkcode_resolve(domain, info):
112 if domain != "py":
113 return None
114 if not info["module"]:
115 return None
116 filename = info["module"].replace(".", "/")
117 version = os.getenv("READTHEDOCS_VERSION", "master")
118 if version == "latest":
119 version = "master"
120 return f"https://github.com/ManimCommunity/manim/blob/{version}/{filename}.py"
121
122
123 # external links
124 extlinks = {
125 "issue": ("https://github.com/ManimCommunity/manim/issues/%s", "issue "),
126 "pr": ("https://github.com/ManimCommunity/manim/pull/%s", "pull request "),
127 }
128
129 # opengraph settings
130 ogp_image = "https://www.manim.community/logo.png"
131 ogp_site_name = "Manim Community | Documentation"
132 ogp_site_url = "https://docs.manim.community/"
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/source/conf.py b/docs/source/conf.py
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -115,7 +115,7 @@
return None
filename = info["module"].replace(".", "/")
version = os.getenv("READTHEDOCS_VERSION", "master")
- if version == "latest":
+ if version == "latest" or version == "stable":
version = "master"
return f"https://github.com/ManimCommunity/manim/blob/{version}/{filename}.py"
| {"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -115,7 +115,7 @@\n return None\n filename = info[\"module\"].replace(\".\", \"/\")\n version = os.getenv(\"READTHEDOCS_VERSION\", \"master\")\n- if version == \"latest\":\n+ if version == \"latest\" or version == \"stable\":\n version = \"master\"\n return f\"https://github.com/ManimCommunity/manim/blob/{version}/{filename}.py\"\n", "issue": "Broken source links on the stable version of the documentation\n## Description of bug / unexpected behavior\nSource links on the stable version of documentation does not work. It links to something like this: https://github.com/ManimCommunity/manim/blob/stable/manim/mobject/changing.py which is a 404 error. \n\n## Expected behavior\nSource links should link to a file containing source code for the stable version. \n\n## How to reproduce the issue\nOn the documentation website, switch the version to stable. Navigate to and click the source link of any class. \n\n## Additional comments\nPerhaps this is an access rights issue, which explains why it evaded detection from community devs for so long?\n\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nimport os\nimport sys\nfrom distutils.sysconfig import get_python_lib\nfrom pathlib import Path\n\nsys.path.insert(0, os.path.abspath(\".\"))\n\n\nif os.environ.get(\"READTHEDOCS\") == \"True\":\n site_path = get_python_lib()\n # we need to add ffmpeg to the path\n ffmpeg_path = os.path.join(site_path, \"imageio_ffmpeg\", \"binaries\")\n # the included binary is named ffmpeg-linux..., create a symlink\n [ffmpeg_bin] = [\n file for file in os.listdir(ffmpeg_path) if file.startswith(\"ffmpeg-\")\n ]\n os.symlink(\n os.path.join(ffmpeg_path, ffmpeg_bin), os.path.join(ffmpeg_path, \"ffmpeg\")\n )\n os.environ[\"PATH\"] += os.pathsep + ffmpeg_path\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Manim\"\ncopyright = \"2020, The Manim Community Dev Team\"\nauthor = \"The Manim Community Dev Team\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"recommonmark\",\n \"sphinx_copybutton\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.extlinks\",\n \"sphinx.ext.linkcode\",\n \"sphinxext.opengraph\",\n \"manim_directive\",\n]\n\n# Automatically generate stub pages when using the .. autosummary directive\nautosummary_generate = True\n\n# generate documentation from type hints\nautodoc_typehints = \"description\"\nautoclass_content = \"both\"\n\n# controls whether functions documented by the autofunction directive\n# appear with their full module names\nadd_module_names = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# Custom section headings in our documentation\nnapoleon_custom_sections = [\"Tests\", (\"Test\", \"Tests\")]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nimport guzzle_sphinx_theme\n\nhtml_theme_path = guzzle_sphinx_theme.html_theme_path()\nhtml_theme = \"guzzle_sphinx_theme\"\nhtml_favicon = str(Path(\"_static/favicon.ico\"))\n\n# There's a standing issue with Sphinx's new-style sidebars. This is a\n# workaround. Taken from\n# https://github.com/guzzle/guzzle_sphinx_theme/issues/33#issuecomment-637081826\nhtml_sidebars = {\"**\": [\"logo-text.html\", \"globaltoc.html\", \"searchbox.html\"]}\n\n# Register the theme as an extension to generate a sitemap.xml\nextensions.append(\"guzzle_sphinx_theme\")\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# This specifies any additional css files that will override the theme's\nhtml_css_files = [\"custom.css\"]\n\n# source links to github\ndef linkcode_resolve(domain, info):\n if domain != \"py\":\n return None\n if not info[\"module\"]:\n return None\n filename = info[\"module\"].replace(\".\", \"/\")\n version = os.getenv(\"READTHEDOCS_VERSION\", \"master\")\n if version == \"latest\":\n version = \"master\"\n return f\"https://github.com/ManimCommunity/manim/blob/{version}/{filename}.py\"\n\n\n# external links\nextlinks = {\n \"issue\": (\"https://github.com/ManimCommunity/manim/issues/%s\", \"issue \"),\n \"pr\": (\"https://github.com/ManimCommunity/manim/pull/%s\", \"pull request \"),\n}\n\n# opengraph settings\nogp_image = \"https://www.manim.community/logo.png\"\nogp_site_name = \"Manim Community | Documentation\"\nogp_site_url = \"https://docs.manim.community/\"\n", "path": "docs/source/conf.py"}], "after_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nimport os\nimport sys\nfrom distutils.sysconfig import get_python_lib\nfrom pathlib import Path\n\nsys.path.insert(0, os.path.abspath(\".\"))\n\n\nif os.environ.get(\"READTHEDOCS\") == \"True\":\n site_path = get_python_lib()\n # we need to add ffmpeg to the path\n ffmpeg_path = os.path.join(site_path, \"imageio_ffmpeg\", \"binaries\")\n # the included binary is named ffmpeg-linux..., create a symlink\n [ffmpeg_bin] = [\n file for file in os.listdir(ffmpeg_path) if file.startswith(\"ffmpeg-\")\n ]\n os.symlink(\n os.path.join(ffmpeg_path, ffmpeg_bin), os.path.join(ffmpeg_path, \"ffmpeg\")\n )\n os.environ[\"PATH\"] += os.pathsep + ffmpeg_path\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Manim\"\ncopyright = \"2020, The Manim Community Dev Team\"\nauthor = \"The Manim Community Dev Team\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"recommonmark\",\n \"sphinx_copybutton\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.extlinks\",\n \"sphinx.ext.linkcode\",\n \"sphinxext.opengraph\",\n \"manim_directive\",\n]\n\n# Automatically generate stub pages when using the .. autosummary directive\nautosummary_generate = True\n\n# generate documentation from type hints\nautodoc_typehints = \"description\"\nautoclass_content = \"both\"\n\n# controls whether functions documented by the autofunction directive\n# appear with their full module names\nadd_module_names = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# Custom section headings in our documentation\nnapoleon_custom_sections = [\"Tests\", (\"Test\", \"Tests\")]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nimport guzzle_sphinx_theme\n\nhtml_theme_path = guzzle_sphinx_theme.html_theme_path()\nhtml_theme = \"guzzle_sphinx_theme\"\nhtml_favicon = str(Path(\"_static/favicon.ico\"))\n\n# There's a standing issue with Sphinx's new-style sidebars. This is a\n# workaround. Taken from\n# https://github.com/guzzle/guzzle_sphinx_theme/issues/33#issuecomment-637081826\nhtml_sidebars = {\"**\": [\"logo-text.html\", \"globaltoc.html\", \"searchbox.html\"]}\n\n# Register the theme as an extension to generate a sitemap.xml\nextensions.append(\"guzzle_sphinx_theme\")\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# This specifies any additional css files that will override the theme's\nhtml_css_files = [\"custom.css\"]\n\n# source links to github\ndef linkcode_resolve(domain, info):\n if domain != \"py\":\n return None\n if not info[\"module\"]:\n return None\n filename = info[\"module\"].replace(\".\", \"/\")\n version = os.getenv(\"READTHEDOCS_VERSION\", \"master\")\n if version == \"latest\" or version == \"stable\":\n version = \"master\"\n return f\"https://github.com/ManimCommunity/manim/blob/{version}/{filename}.py\"\n\n\n# external links\nextlinks = {\n \"issue\": (\"https://github.com/ManimCommunity/manim/issues/%s\", \"issue \"),\n \"pr\": (\"https://github.com/ManimCommunity/manim/pull/%s\", \"pull request \"),\n}\n\n# opengraph settings\nogp_image = \"https://www.manim.community/logo.png\"\nogp_site_name = \"Manim Community | Documentation\"\nogp_site_url = \"https://docs.manim.community/\"\n", "path": "docs/source/conf.py"}]} | 1,767 | 122 |
gh_patches_debug_11888 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-3162 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Additional context for `Mixed types x and y in attribute value sequence` warnings
**Is your feature request related to a problem?**
I'm getting a whole lot of warnings like
```
opentelemetry.attributes.Mixed types str and int in attribute value sequence
```
But they're missing context in order to resolve them. I'd like to know what attribute it is that has mixed types.
**Describe the solution you'd like**
Include `key` value in the message to add some more context and give a better chance to resolve the warning.
**Describe alternatives you've considered**
None
**Additional context**
Log is emitted from the lines below
https://github.com/open-telemetry/opentelemetry-python/blob/e0e6a3a940c16c1df6493e258ccfbc57ac38cf96/opentelemetry-api/src/opentelemetry/attributes/__init__.py#L86-L91
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opentelemetry-api/src/opentelemetry/attributes/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # type: ignore
15
16 import logging
17 import threading
18 from collections import OrderedDict
19 from collections.abc import MutableMapping
20 from typing import Optional, Sequence, Union
21
22 from opentelemetry.util import types
23
24 # bytes are accepted as a user supplied value for attributes but
25 # decoded to strings internally.
26 _VALID_ATTR_VALUE_TYPES = (bool, str, bytes, int, float)
27
28
29 _logger = logging.getLogger(__name__)
30
31
32 def _clean_attribute(
33 key: str, value: types.AttributeValue, max_len: Optional[int]
34 ) -> Optional[types.AttributeValue]:
35 """Checks if attribute value is valid and cleans it if required.
36
37 The function returns the cleaned value or None if the value is not valid.
38
39 An attribute value is valid if it is either:
40 - A primitive type: string, boolean, double precision floating
41 point (IEEE 754-1985) or integer.
42 - An array of primitive type values. The array MUST be homogeneous,
43 i.e. it MUST NOT contain values of different types.
44
45 An attribute needs cleansing if:
46 - Its length is greater than the maximum allowed length.
47 - It needs to be encoded/decoded e.g, bytes to strings.
48 """
49
50 if not (key and isinstance(key, str)):
51 _logger.warning("invalid key `%s`. must be non-empty string.", key)
52 return None
53
54 if isinstance(value, _VALID_ATTR_VALUE_TYPES):
55 return _clean_attribute_value(value, max_len)
56
57 if isinstance(value, Sequence):
58 sequence_first_valid_type = None
59 cleaned_seq = []
60
61 for element in value:
62 element = _clean_attribute_value(element, max_len)
63 if element is None:
64 cleaned_seq.append(element)
65 continue
66
67 element_type = type(element)
68 # Reject attribute value if sequence contains a value with an incompatible type.
69 if element_type not in _VALID_ATTR_VALUE_TYPES:
70 _logger.warning(
71 "Invalid type %s in attribute value sequence. Expected one of "
72 "%s or None",
73 element_type.__name__,
74 [
75 valid_type.__name__
76 for valid_type in _VALID_ATTR_VALUE_TYPES
77 ],
78 )
79 return None
80
81 # The type of the sequence must be homogeneous. The first non-None
82 # element determines the type of the sequence
83 if sequence_first_valid_type is None:
84 sequence_first_valid_type = element_type
85 # use equality instead of isinstance as isinstance(True, int) evaluates to True
86 elif element_type != sequence_first_valid_type:
87 _logger.warning(
88 "Mixed types %s and %s in attribute value sequence",
89 sequence_first_valid_type.__name__,
90 type(element).__name__,
91 )
92 return None
93
94 cleaned_seq.append(element)
95
96 # Freeze mutable sequences defensively
97 return tuple(cleaned_seq)
98
99 _logger.warning(
100 "Invalid type %s for attribute '%s' value. Expected one of %s or a "
101 "sequence of those types",
102 type(value).__name__,
103 key,
104 [valid_type.__name__ for valid_type in _VALID_ATTR_VALUE_TYPES],
105 )
106 return None
107
108
109 def _clean_attribute_value(
110 value: types.AttributeValue, limit: Optional[int]
111 ) -> Union[types.AttributeValue, None]:
112 if value is None:
113 return None
114
115 if isinstance(value, bytes):
116 try:
117 value = value.decode()
118 except UnicodeDecodeError:
119 _logger.warning("Byte attribute could not be decoded.")
120 return None
121
122 if limit is not None and isinstance(value, str):
123 value = value[:limit]
124 return value
125
126
127 class BoundedAttributes(MutableMapping):
128 """An ordered dict with a fixed max capacity.
129
130 Oldest elements are dropped when the dict is full and a new element is
131 added.
132 """
133
134 def __init__(
135 self,
136 maxlen: Optional[int] = None,
137 attributes: types.Attributes = None,
138 immutable: bool = True,
139 max_value_len: Optional[int] = None,
140 ):
141 if maxlen is not None:
142 if not isinstance(maxlen, int) or maxlen < 0:
143 raise ValueError(
144 "maxlen must be valid int greater or equal to 0"
145 )
146 self.maxlen = maxlen
147 self.dropped = 0
148 self.max_value_len = max_value_len
149 self._dict = OrderedDict() # type: OrderedDict
150 self._lock = threading.Lock() # type: threading.Lock
151 if attributes:
152 for key, value in attributes.items():
153 self[key] = value
154 self._immutable = immutable
155
156 def __repr__(self):
157 return (
158 f"{type(self).__name__}({dict(self._dict)}, maxlen={self.maxlen})"
159 )
160
161 def __getitem__(self, key):
162 return self._dict[key]
163
164 def __setitem__(self, key, value):
165 if getattr(self, "_immutable", False):
166 raise TypeError
167 with self._lock:
168 if self.maxlen is not None and self.maxlen == 0:
169 self.dropped += 1
170 return
171
172 value = _clean_attribute(key, value, self.max_value_len)
173 if value is not None:
174 if key in self._dict:
175 del self._dict[key]
176 elif (
177 self.maxlen is not None and len(self._dict) == self.maxlen
178 ):
179 self._dict.popitem(last=False)
180 self.dropped += 1
181
182 self._dict[key] = value
183
184 def __delitem__(self, key):
185 if getattr(self, "_immutable", False):
186 raise TypeError
187 with self._lock:
188 del self._dict[key]
189
190 def __iter__(self):
191 with self._lock:
192 return iter(self._dict.copy())
193
194 def __len__(self):
195 return len(self._dict)
196
197 def copy(self):
198 return self._dict.copy()
199
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opentelemetry-api/src/opentelemetry/attributes/__init__.py b/opentelemetry-api/src/opentelemetry/attributes/__init__.py
--- a/opentelemetry-api/src/opentelemetry/attributes/__init__.py
+++ b/opentelemetry-api/src/opentelemetry/attributes/__init__.py
@@ -85,7 +85,8 @@
# use equality instead of isinstance as isinstance(True, int) evaluates to True
elif element_type != sequence_first_valid_type:
_logger.warning(
- "Mixed types %s and %s in attribute value sequence",
+ "Attribute %r mixes types %s and %s in attribute value sequence",
+ key,
sequence_first_valid_type.__name__,
type(element).__name__,
)
| {"golden_diff": "diff --git a/opentelemetry-api/src/opentelemetry/attributes/__init__.py b/opentelemetry-api/src/opentelemetry/attributes/__init__.py\n--- a/opentelemetry-api/src/opentelemetry/attributes/__init__.py\n+++ b/opentelemetry-api/src/opentelemetry/attributes/__init__.py\n@@ -85,7 +85,8 @@\n # use equality instead of isinstance as isinstance(True, int) evaluates to True\n elif element_type != sequence_first_valid_type:\n _logger.warning(\n- \"Mixed types %s and %s in attribute value sequence\",\n+ \"Attribute %r mixes types %s and %s in attribute value sequence\",\n+ key,\n sequence_first_valid_type.__name__,\n type(element).__name__,\n )\n", "issue": "Additional context for `Mixed types x and y in attribute value sequence` warnings\n**Is your feature request related to a problem?**\r\nI'm getting a whole lot of warnings like\r\n\r\n```\r\nopentelemetry.attributes.Mixed types str and int in attribute value sequence\r\n```\r\n\r\nBut they're missing context in order to resolve them. I'd like to know what attribute it is that has mixed types.\r\n\r\n**Describe the solution you'd like**\r\nInclude `key` value in the message to add some more context and give a better chance to resolve the warning.\r\n\r\n**Describe alternatives you've considered**\r\n\r\nNone\r\n\r\n**Additional context**\r\n\r\nLog is emitted from the lines below\r\n\r\nhttps://github.com/open-telemetry/opentelemetry-python/blob/e0e6a3a940c16c1df6493e258ccfbc57ac38cf96/opentelemetry-api/src/opentelemetry/attributes/__init__.py#L86-L91\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# type: ignore\n\nimport logging\nimport threading\nfrom collections import OrderedDict\nfrom collections.abc import MutableMapping\nfrom typing import Optional, Sequence, Union\n\nfrom opentelemetry.util import types\n\n# bytes are accepted as a user supplied value for attributes but\n# decoded to strings internally.\n_VALID_ATTR_VALUE_TYPES = (bool, str, bytes, int, float)\n\n\n_logger = logging.getLogger(__name__)\n\n\ndef _clean_attribute(\n key: str, value: types.AttributeValue, max_len: Optional[int]\n) -> Optional[types.AttributeValue]:\n \"\"\"Checks if attribute value is valid and cleans it if required.\n\n The function returns the cleaned value or None if the value is not valid.\n\n An attribute value is valid if it is either:\n - A primitive type: string, boolean, double precision floating\n point (IEEE 754-1985) or integer.\n - An array of primitive type values. The array MUST be homogeneous,\n i.e. it MUST NOT contain values of different types.\n\n An attribute needs cleansing if:\n - Its length is greater than the maximum allowed length.\n - It needs to be encoded/decoded e.g, bytes to strings.\n \"\"\"\n\n if not (key and isinstance(key, str)):\n _logger.warning(\"invalid key `%s`. must be non-empty string.\", key)\n return None\n\n if isinstance(value, _VALID_ATTR_VALUE_TYPES):\n return _clean_attribute_value(value, max_len)\n\n if isinstance(value, Sequence):\n sequence_first_valid_type = None\n cleaned_seq = []\n\n for element in value:\n element = _clean_attribute_value(element, max_len)\n if element is None:\n cleaned_seq.append(element)\n continue\n\n element_type = type(element)\n # Reject attribute value if sequence contains a value with an incompatible type.\n if element_type not in _VALID_ATTR_VALUE_TYPES:\n _logger.warning(\n \"Invalid type %s in attribute value sequence. Expected one of \"\n \"%s or None\",\n element_type.__name__,\n [\n valid_type.__name__\n for valid_type in _VALID_ATTR_VALUE_TYPES\n ],\n )\n return None\n\n # The type of the sequence must be homogeneous. The first non-None\n # element determines the type of the sequence\n if sequence_first_valid_type is None:\n sequence_first_valid_type = element_type\n # use equality instead of isinstance as isinstance(True, int) evaluates to True\n elif element_type != sequence_first_valid_type:\n _logger.warning(\n \"Mixed types %s and %s in attribute value sequence\",\n sequence_first_valid_type.__name__,\n type(element).__name__,\n )\n return None\n\n cleaned_seq.append(element)\n\n # Freeze mutable sequences defensively\n return tuple(cleaned_seq)\n\n _logger.warning(\n \"Invalid type %s for attribute '%s' value. Expected one of %s or a \"\n \"sequence of those types\",\n type(value).__name__,\n key,\n [valid_type.__name__ for valid_type in _VALID_ATTR_VALUE_TYPES],\n )\n return None\n\n\ndef _clean_attribute_value(\n value: types.AttributeValue, limit: Optional[int]\n) -> Union[types.AttributeValue, None]:\n if value is None:\n return None\n\n if isinstance(value, bytes):\n try:\n value = value.decode()\n except UnicodeDecodeError:\n _logger.warning(\"Byte attribute could not be decoded.\")\n return None\n\n if limit is not None and isinstance(value, str):\n value = value[:limit]\n return value\n\n\nclass BoundedAttributes(MutableMapping):\n \"\"\"An ordered dict with a fixed max capacity.\n\n Oldest elements are dropped when the dict is full and a new element is\n added.\n \"\"\"\n\n def __init__(\n self,\n maxlen: Optional[int] = None,\n attributes: types.Attributes = None,\n immutable: bool = True,\n max_value_len: Optional[int] = None,\n ):\n if maxlen is not None:\n if not isinstance(maxlen, int) or maxlen < 0:\n raise ValueError(\n \"maxlen must be valid int greater or equal to 0\"\n )\n self.maxlen = maxlen\n self.dropped = 0\n self.max_value_len = max_value_len\n self._dict = OrderedDict() # type: OrderedDict\n self._lock = threading.Lock() # type: threading.Lock\n if attributes:\n for key, value in attributes.items():\n self[key] = value\n self._immutable = immutable\n\n def __repr__(self):\n return (\n f\"{type(self).__name__}({dict(self._dict)}, maxlen={self.maxlen})\"\n )\n\n def __getitem__(self, key):\n return self._dict[key]\n\n def __setitem__(self, key, value):\n if getattr(self, \"_immutable\", False):\n raise TypeError\n with self._lock:\n if self.maxlen is not None and self.maxlen == 0:\n self.dropped += 1\n return\n\n value = _clean_attribute(key, value, self.max_value_len)\n if value is not None:\n if key in self._dict:\n del self._dict[key]\n elif (\n self.maxlen is not None and len(self._dict) == self.maxlen\n ):\n self._dict.popitem(last=False)\n self.dropped += 1\n\n self._dict[key] = value\n\n def __delitem__(self, key):\n if getattr(self, \"_immutable\", False):\n raise TypeError\n with self._lock:\n del self._dict[key]\n\n def __iter__(self):\n with self._lock:\n return iter(self._dict.copy())\n\n def __len__(self):\n return len(self._dict)\n\n def copy(self):\n return self._dict.copy()\n", "path": "opentelemetry-api/src/opentelemetry/attributes/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# type: ignore\n\nimport logging\nimport threading\nfrom collections import OrderedDict\nfrom collections.abc import MutableMapping\nfrom typing import Optional, Sequence, Union\n\nfrom opentelemetry.util import types\n\n# bytes are accepted as a user supplied value for attributes but\n# decoded to strings internally.\n_VALID_ATTR_VALUE_TYPES = (bool, str, bytes, int, float)\n\n\n_logger = logging.getLogger(__name__)\n\n\ndef _clean_attribute(\n key: str, value: types.AttributeValue, max_len: Optional[int]\n) -> Optional[types.AttributeValue]:\n \"\"\"Checks if attribute value is valid and cleans it if required.\n\n The function returns the cleaned value or None if the value is not valid.\n\n An attribute value is valid if it is either:\n - A primitive type: string, boolean, double precision floating\n point (IEEE 754-1985) or integer.\n - An array of primitive type values. The array MUST be homogeneous,\n i.e. it MUST NOT contain values of different types.\n\n An attribute needs cleansing if:\n - Its length is greater than the maximum allowed length.\n - It needs to be encoded/decoded e.g, bytes to strings.\n \"\"\"\n\n if not (key and isinstance(key, str)):\n _logger.warning(\"invalid key `%s`. must be non-empty string.\", key)\n return None\n\n if isinstance(value, _VALID_ATTR_VALUE_TYPES):\n return _clean_attribute_value(value, max_len)\n\n if isinstance(value, Sequence):\n sequence_first_valid_type = None\n cleaned_seq = []\n\n for element in value:\n element = _clean_attribute_value(element, max_len)\n if element is None:\n cleaned_seq.append(element)\n continue\n\n element_type = type(element)\n # Reject attribute value if sequence contains a value with an incompatible type.\n if element_type not in _VALID_ATTR_VALUE_TYPES:\n _logger.warning(\n \"Invalid type %s in attribute value sequence. Expected one of \"\n \"%s or None\",\n element_type.__name__,\n [\n valid_type.__name__\n for valid_type in _VALID_ATTR_VALUE_TYPES\n ],\n )\n return None\n\n # The type of the sequence must be homogeneous. The first non-None\n # element determines the type of the sequence\n if sequence_first_valid_type is None:\n sequence_first_valid_type = element_type\n # use equality instead of isinstance as isinstance(True, int) evaluates to True\n elif element_type != sequence_first_valid_type:\n _logger.warning(\n \"Attribute %r mixes types %s and %s in attribute value sequence\",\n key,\n sequence_first_valid_type.__name__,\n type(element).__name__,\n )\n return None\n\n cleaned_seq.append(element)\n\n # Freeze mutable sequences defensively\n return tuple(cleaned_seq)\n\n _logger.warning(\n \"Invalid type %s for attribute '%s' value. Expected one of %s or a \"\n \"sequence of those types\",\n type(value).__name__,\n key,\n [valid_type.__name__ for valid_type in _VALID_ATTR_VALUE_TYPES],\n )\n return None\n\n\ndef _clean_attribute_value(\n value: types.AttributeValue, limit: Optional[int]\n) -> Union[types.AttributeValue, None]:\n if value is None:\n return None\n\n if isinstance(value, bytes):\n try:\n value = value.decode()\n except UnicodeDecodeError:\n _logger.warning(\"Byte attribute could not be decoded.\")\n return None\n\n if limit is not None and isinstance(value, str):\n value = value[:limit]\n return value\n\n\nclass BoundedAttributes(MutableMapping):\n \"\"\"An ordered dict with a fixed max capacity.\n\n Oldest elements are dropped when the dict is full and a new element is\n added.\n \"\"\"\n\n def __init__(\n self,\n maxlen: Optional[int] = None,\n attributes: types.Attributes = None,\n immutable: bool = True,\n max_value_len: Optional[int] = None,\n ):\n if maxlen is not None:\n if not isinstance(maxlen, int) or maxlen < 0:\n raise ValueError(\n \"maxlen must be valid int greater or equal to 0\"\n )\n self.maxlen = maxlen\n self.dropped = 0\n self.max_value_len = max_value_len\n self._dict = OrderedDict() # type: OrderedDict\n self._lock = threading.Lock() # type: threading.Lock\n if attributes:\n for key, value in attributes.items():\n self[key] = value\n self._immutable = immutable\n\n def __repr__(self):\n return (\n f\"{type(self).__name__}({dict(self._dict)}, maxlen={self.maxlen})\"\n )\n\n def __getitem__(self, key):\n return self._dict[key]\n\n def __setitem__(self, key, value):\n if getattr(self, \"_immutable\", False):\n raise TypeError\n with self._lock:\n if self.maxlen is not None and self.maxlen == 0:\n self.dropped += 1\n return\n\n value = _clean_attribute(key, value, self.max_value_len)\n if value is not None:\n if key in self._dict:\n del self._dict[key]\n elif (\n self.maxlen is not None and len(self._dict) == self.maxlen\n ):\n self._dict.popitem(last=False)\n self.dropped += 1\n\n self._dict[key] = value\n\n def __delitem__(self, key):\n if getattr(self, \"_immutable\", False):\n raise TypeError\n with self._lock:\n del self._dict[key]\n\n def __iter__(self):\n with self._lock:\n return iter(self._dict.copy())\n\n def __len__(self):\n return len(self._dict)\n\n def copy(self):\n return self._dict.copy()\n", "path": "opentelemetry-api/src/opentelemetry/attributes/__init__.py"}]} | 2,360 | 165 |
gh_patches_debug_7772 | rasdani/github-patches | git_diff | OctoPrint__OctoPrint-3054 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Software update fails on Debian testing/unstable
Tested with Octoprint 1.3.10
Right now, the software update will not work in Debian testing and unstable. The problem here is that Debian decided to name its python version `2.7.15+` (yes the '+' is part of the version string returned by `python --version`. Octoprint's version compare cannot cope with this and sees this as < 2.7.9 (which leads to a very confusing output of the software update component telling you why it doesn't want to update: `Python: 2.7.9 (you have: 2.7.15+)` ... took me some time to figure out what is actually going on)
There is a bug report for Debian's python2.7 package already here https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=914072
Sadly there is no feedback from the Debian maintainers on why they named it this way and if this might be changed again in the future.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/octoprint/util/version.py`
Content:
```
1 # coding=utf-8
2 """
3 This module provides a bunch of utility methods and helpers for version handling.
4 """
5 from __future__ import absolute_import, division, print_function
6
7 __license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
8
9 import pkg_resources
10 import logging
11
12 from octoprint import __version__
13
14
15 def get_octoprint_version_string():
16 return __version__
17
18
19 def get_octoprint_version(base=False):
20 octoprint_version_string = get_octoprint_version_string()
21 return get_comparable_version(octoprint_version_string, base=base)
22
23
24 def is_released_octoprint_version(version=None):
25 """
26 >>> import pkg_resources
27 >>> is_released_octoprint_version(version=pkg_resources.parse_version("1.3.6rc3"))
28 True
29 >>> is_released_octoprint_version(version=pkg_resources.parse_version("1.3.6rc3.dev2+g1234"))
30 False
31 >>> is_released_octoprint_version(version=pkg_resources.parse_version("1.3.6"))
32 True
33 >>> is_released_octoprint_version(version=pkg_resources.parse_version("1.3.6.post1+g1234"))
34 True
35 >>> is_released_octoprint_version(version=pkg_resources.parse_version("1.3.6.post1.dev0+g1234"))
36 False
37 >>> is_released_octoprint_version(version=pkg_resources.parse_version("1.3.7.dev123+g23545"))
38 False
39 """
40
41 if version is None:
42 version = get_octoprint_version()
43
44 if isinstance(version, tuple):
45 # old setuptools
46 return "*@" not in version
47 else:
48 # new setuptools
49 return "dev" not in version.public
50
51
52 def is_stable_octoprint_version(version=None):
53 """
54 >>> import pkg_resources
55 >>> is_stable_octoprint_version(version=pkg_resources.parse_version("1.3.6rc3"))
56 False
57 >>> is_stable_octoprint_version(version=pkg_resources.parse_version("1.3.6rc3.dev2+g1234"))
58 False
59 >>> is_stable_octoprint_version(version=pkg_resources.parse_version("1.3.6"))
60 True
61 >>> is_stable_octoprint_version(version=pkg_resources.parse_version("1.3.6.post1+g1234"))
62 True
63 >>> is_stable_octoprint_version(version=pkg_resources.parse_version("1.3.6.post1.dev0+g1234"))
64 False
65 >>> is_stable_octoprint_version(version=pkg_resources.parse_version("1.3.7.dev123+g23545"))
66 False
67 """
68
69 if version is None:
70 version = get_octoprint_version()
71
72 if not is_released_octoprint_version(version=version):
73 return False
74
75 if isinstance(version, tuple):
76 return "*a" not in version and "*b" not in version and "*c" not in version
77 else:
78 return not version.is_prerelease
79
80
81 def is_octoprint_compatible(*compatibility_entries, **kwargs):
82 """
83 Tests if the current ``octoprint_version`` is compatible to any of the provided ``compatibility_entries``.
84
85 Arguments:
86 compatibility_entries (str): compatibility string(s) to test against, result will be `True` if any match
87 is found
88 octoprint_version (tuple or SetuptoolsVersion): optional OctoPrint version to match against, if not current
89 base version will be determined via :func:`get_octoprint_version`.
90
91 Returns:
92 (bool) ``True`` if any of the provided compatibility entries matches or there are no entries, else ``False``
93 """
94
95 logger = logging.getLogger(__name__)
96
97 if not compatibility_entries:
98 return True
99
100 octoprint_version = kwargs.get("octoprint_version")
101 if octoprint_version is None:
102 octoprint_version = get_octoprint_version(base=True)
103
104 for octo_compat in compatibility_entries:
105 try:
106 if not any(octo_compat.startswith(c) for c in ("<", "<=", "!=", "==", ">=", ">", "~=", "===")):
107 octo_compat = ">={}".format(octo_compat)
108
109 s = pkg_resources.Requirement.parse("OctoPrint" + octo_compat)
110 if octoprint_version in s:
111 break
112 except:
113 logger.exception("Something is wrong with this compatibility string for OctoPrint: {}".format(octo_compat))
114 else:
115 return False
116
117 return True
118
119
120 def get_comparable_version(version_string, base=False):
121 if "-" in version_string:
122 version_string = version_string[:version_string.find("-")]
123
124 version = pkg_resources.parse_version(version_string)
125
126 # A leading v is common in github release tags and old setuptools doesn't remove it.
127 if version and isinstance(version, tuple) and version[0].lower() == "*v":
128 version = version[1:]
129
130 if base:
131 if isinstance(version, tuple):
132 # old setuptools
133 base_version = []
134 for part in version:
135 if part.startswith("*"):
136 break
137 base_version.append(part)
138 base_version.append("*final")
139 version = tuple(base_version)
140 else:
141 # new setuptools
142 version = pkg_resources.parse_version(version.base_version)
143 return version
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/octoprint/util/version.py b/src/octoprint/util/version.py
--- a/src/octoprint/util/version.py
+++ b/src/octoprint/util/version.py
@@ -121,6 +121,10 @@
if "-" in version_string:
version_string = version_string[:version_string.find("-")]
+ # Debian has the python version set to 2.7.15+ which is not PEP440 compliant (bug 914072)
+ if version_string.endswith("+"):
+ version_string = version_string[:-1]
+
version = pkg_resources.parse_version(version_string)
# A leading v is common in github release tags and old setuptools doesn't remove it.
| {"golden_diff": "diff --git a/src/octoprint/util/version.py b/src/octoprint/util/version.py\n--- a/src/octoprint/util/version.py\n+++ b/src/octoprint/util/version.py\n@@ -121,6 +121,10 @@\n \tif \"-\" in version_string:\n \t\tversion_string = version_string[:version_string.find(\"-\")]\n \n+\t# Debian has the python version set to 2.7.15+ which is not PEP440 compliant (bug 914072)\n+\tif version_string.endswith(\"+\"):\n+\t\tversion_string = version_string[:-1]\n+\n \tversion = pkg_resources.parse_version(version_string)\n \n \t# A leading v is common in github release tags and old setuptools doesn't remove it.\n", "issue": "Software update fails on Debian testing/unstable\nTested with Octoprint 1.3.10\r\n\r\nRight now, the software update will not work in Debian testing and unstable. The problem here is that Debian decided to name its python version `2.7.15+` (yes the '+' is part of the version string returned by `python --version`. Octoprint's version compare cannot cope with this and sees this as < 2.7.9 (which leads to a very confusing output of the software update component telling you why it doesn't want to update: `Python: 2.7.9 (you have: 2.7.15+)` ... took me some time to figure out what is actually going on)\r\n\r\nThere is a bug report for Debian's python2.7 package already here https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=914072\r\nSadly there is no feedback from the Debian maintainers on why they named it this way and if this might be changed again in the future.\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"\nThis module provides a bunch of utility methods and helpers for version handling.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\n__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'\n\nimport pkg_resources\nimport logging\n\nfrom octoprint import __version__\n\n\ndef get_octoprint_version_string():\n\treturn __version__\n\n\ndef get_octoprint_version(base=False):\n\toctoprint_version_string = get_octoprint_version_string()\n\treturn get_comparable_version(octoprint_version_string, base=base)\n\n\ndef is_released_octoprint_version(version=None):\n\t\"\"\"\n\t>>> import pkg_resources\n\t>>> is_released_octoprint_version(version=pkg_resources.parse_version(\"1.3.6rc3\"))\n\tTrue\n\t>>> is_released_octoprint_version(version=pkg_resources.parse_version(\"1.3.6rc3.dev2+g1234\"))\n\tFalse\n\t>>> is_released_octoprint_version(version=pkg_resources.parse_version(\"1.3.6\"))\n\tTrue\n\t>>> is_released_octoprint_version(version=pkg_resources.parse_version(\"1.3.6.post1+g1234\"))\n\tTrue\n\t>>> is_released_octoprint_version(version=pkg_resources.parse_version(\"1.3.6.post1.dev0+g1234\"))\n\tFalse\n\t>>> is_released_octoprint_version(version=pkg_resources.parse_version(\"1.3.7.dev123+g23545\"))\n\tFalse\n\t\"\"\"\n\n\tif version is None:\n\t\tversion = get_octoprint_version()\n\n\tif isinstance(version, tuple):\n\t\t# old setuptools\n\t\treturn \"*@\" not in version\n\telse:\n\t\t# new setuptools\n\t\treturn \"dev\" not in version.public\n\n\ndef is_stable_octoprint_version(version=None):\n\t\"\"\"\n\t>>> import pkg_resources\n\t>>> is_stable_octoprint_version(version=pkg_resources.parse_version(\"1.3.6rc3\"))\n\tFalse\n\t>>> is_stable_octoprint_version(version=pkg_resources.parse_version(\"1.3.6rc3.dev2+g1234\"))\n\tFalse\n\t>>> is_stable_octoprint_version(version=pkg_resources.parse_version(\"1.3.6\"))\n\tTrue\n\t>>> is_stable_octoprint_version(version=pkg_resources.parse_version(\"1.3.6.post1+g1234\"))\n\tTrue\n\t>>> is_stable_octoprint_version(version=pkg_resources.parse_version(\"1.3.6.post1.dev0+g1234\"))\n\tFalse\n\t>>> is_stable_octoprint_version(version=pkg_resources.parse_version(\"1.3.7.dev123+g23545\"))\n\tFalse\n\t\"\"\"\n\n\tif version is None:\n\t\tversion = get_octoprint_version()\n\n\tif not is_released_octoprint_version(version=version):\n\t\treturn False\n\n\tif isinstance(version, tuple):\n\t\treturn \"*a\" not in version and \"*b\" not in version and \"*c\" not in version\n\telse:\n\t\treturn not version.is_prerelease\n\n\ndef is_octoprint_compatible(*compatibility_entries, **kwargs):\n\t\"\"\"\n\tTests if the current ``octoprint_version`` is compatible to any of the provided ``compatibility_entries``.\n\n\tArguments:\n\t\tcompatibility_entries (str): compatibility string(s) to test against, result will be `True` if any match\n\t\t\tis found\n\t\toctoprint_version (tuple or SetuptoolsVersion): optional OctoPrint version to match against, if not current\n\t\t\tbase version will be determined via :func:`get_octoprint_version`.\n\n\tReturns:\n\t\t(bool) ``True`` if any of the provided compatibility entries matches or there are no entries, else ``False``\n\t\"\"\"\n\n\tlogger = logging.getLogger(__name__)\n\n\tif not compatibility_entries:\n\t\treturn True\n\n\toctoprint_version = kwargs.get(\"octoprint_version\")\n\tif octoprint_version is None:\n\t\toctoprint_version = get_octoprint_version(base=True)\n\n\tfor octo_compat in compatibility_entries:\n\t\ttry:\n\t\t\tif not any(octo_compat.startswith(c) for c in (\"<\", \"<=\", \"!=\", \"==\", \">=\", \">\", \"~=\", \"===\")):\n\t\t\t\tocto_compat = \">={}\".format(octo_compat)\n\n\t\t\ts = pkg_resources.Requirement.parse(\"OctoPrint\" + octo_compat)\n\t\t\tif octoprint_version in s:\n\t\t\t\tbreak\n\t\texcept:\n\t\t\tlogger.exception(\"Something is wrong with this compatibility string for OctoPrint: {}\".format(octo_compat))\n\telse:\n\t\treturn False\n\n\treturn True\n\n\ndef get_comparable_version(version_string, base=False):\n\tif \"-\" in version_string:\n\t\tversion_string = version_string[:version_string.find(\"-\")]\n\n\tversion = pkg_resources.parse_version(version_string)\n\n\t# A leading v is common in github release tags and old setuptools doesn't remove it.\n\tif version and isinstance(version, tuple) and version[0].lower() == \"*v\":\n\t\tversion = version[1:]\n\n\tif base:\n\t\tif isinstance(version, tuple):\n\t\t\t# old setuptools\n\t\t\tbase_version = []\n\t\t\tfor part in version:\n\t\t\t\tif part.startswith(\"*\"):\n\t\t\t\t\tbreak\n\t\t\t\tbase_version.append(part)\n\t\t\tbase_version.append(\"*final\")\n\t\t\tversion = tuple(base_version)\n\t\telse:\n\t\t\t# new setuptools\n\t\t\tversion = pkg_resources.parse_version(version.base_version)\n\treturn version\n", "path": "src/octoprint/util/version.py"}], "after_files": [{"content": "# coding=utf-8\n\"\"\"\nThis module provides a bunch of utility methods and helpers for version handling.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\n__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'\n\nimport pkg_resources\nimport logging\n\nfrom octoprint import __version__\n\n\ndef get_octoprint_version_string():\n\treturn __version__\n\n\ndef get_octoprint_version(base=False):\n\toctoprint_version_string = get_octoprint_version_string()\n\treturn get_comparable_version(octoprint_version_string, base=base)\n\n\ndef is_released_octoprint_version(version=None):\n\t\"\"\"\n\t>>> import pkg_resources\n\t>>> is_released_octoprint_version(version=pkg_resources.parse_version(\"1.3.6rc3\"))\n\tTrue\n\t>>> is_released_octoprint_version(version=pkg_resources.parse_version(\"1.3.6rc3.dev2+g1234\"))\n\tFalse\n\t>>> is_released_octoprint_version(version=pkg_resources.parse_version(\"1.3.6\"))\n\tTrue\n\t>>> is_released_octoprint_version(version=pkg_resources.parse_version(\"1.3.6.post1+g1234\"))\n\tTrue\n\t>>> is_released_octoprint_version(version=pkg_resources.parse_version(\"1.3.6.post1.dev0+g1234\"))\n\tFalse\n\t>>> is_released_octoprint_version(version=pkg_resources.parse_version(\"1.3.7.dev123+g23545\"))\n\tFalse\n\t\"\"\"\n\n\tif version is None:\n\t\tversion = get_octoprint_version()\n\n\tif isinstance(version, tuple):\n\t\t# old setuptools\n\t\treturn \"*@\" not in version\n\telse:\n\t\t# new setuptools\n\t\treturn \"dev\" not in version.public\n\n\ndef is_stable_octoprint_version(version=None):\n\t\"\"\"\n\t>>> import pkg_resources\n\t>>> is_stable_octoprint_version(version=pkg_resources.parse_version(\"1.3.6rc3\"))\n\tFalse\n\t>>> is_stable_octoprint_version(version=pkg_resources.parse_version(\"1.3.6rc3.dev2+g1234\"))\n\tFalse\n\t>>> is_stable_octoprint_version(version=pkg_resources.parse_version(\"1.3.6\"))\n\tTrue\n\t>>> is_stable_octoprint_version(version=pkg_resources.parse_version(\"1.3.6.post1+g1234\"))\n\tTrue\n\t>>> is_stable_octoprint_version(version=pkg_resources.parse_version(\"1.3.6.post1.dev0+g1234\"))\n\tFalse\n\t>>> is_stable_octoprint_version(version=pkg_resources.parse_version(\"1.3.7.dev123+g23545\"))\n\tFalse\n\t\"\"\"\n\n\tif version is None:\n\t\tversion = get_octoprint_version()\n\n\tif not is_released_octoprint_version(version=version):\n\t\treturn False\n\n\tif isinstance(version, tuple):\n\t\treturn \"*a\" not in version and \"*b\" not in version and \"*c\" not in version\n\telse:\n\t\treturn not version.is_prerelease\n\n\ndef is_octoprint_compatible(*compatibility_entries, **kwargs):\n\t\"\"\"\n\tTests if the current ``octoprint_version`` is compatible to any of the provided ``compatibility_entries``.\n\n\tArguments:\n\t\tcompatibility_entries (str): compatibility string(s) to test against, result will be `True` if any match\n\t\t\tis found\n\t\toctoprint_version (tuple or SetuptoolsVersion): optional OctoPrint version to match against, if not current\n\t\t\tbase version will be determined via :func:`get_octoprint_version`.\n\n\tReturns:\n\t\t(bool) ``True`` if any of the provided compatibility entries matches or there are no entries, else ``False``\n\t\"\"\"\n\n\tlogger = logging.getLogger(__name__)\n\n\tif not compatibility_entries:\n\t\treturn True\n\n\toctoprint_version = kwargs.get(\"octoprint_version\")\n\tif octoprint_version is None:\n\t\toctoprint_version = get_octoprint_version(base=True)\n\n\tfor octo_compat in compatibility_entries:\n\t\ttry:\n\t\t\tif not any(octo_compat.startswith(c) for c in (\"<\", \"<=\", \"!=\", \"==\", \">=\", \">\", \"~=\", \"===\")):\n\t\t\t\tocto_compat = \">={}\".format(octo_compat)\n\n\t\t\ts = pkg_resources.Requirement.parse(\"OctoPrint\" + octo_compat)\n\t\t\tif octoprint_version in s:\n\t\t\t\tbreak\n\t\texcept:\n\t\t\tlogger.exception(\"Something is wrong with this compatibility string for OctoPrint: {}\".format(octo_compat))\n\telse:\n\t\treturn False\n\n\treturn True\n\n\ndef get_comparable_version(version_string, base=False):\n\tif \"-\" in version_string:\n\t\tversion_string = version_string[:version_string.find(\"-\")]\n\n\t# Debian has the python version set to 2.7.15+ which is not PEP440 compliant (bug 914072)\n\tif version_string.endswith(\"+\"):\n\t\tversion_string = version_string[:-1]\n\n\tversion = pkg_resources.parse_version(version_string)\n\n\t# A leading v is common in github release tags and old setuptools doesn't remove it.\n\tif version and isinstance(version, tuple) and version[0].lower() == \"*v\":\n\t\tversion = version[1:]\n\n\tif base:\n\t\tif isinstance(version, tuple):\n\t\t\t# old setuptools\n\t\t\tbase_version = []\n\t\t\tfor part in version:\n\t\t\t\tif part.startswith(\"*\"):\n\t\t\t\t\tbreak\n\t\t\t\tbase_version.append(part)\n\t\t\tbase_version.append(\"*final\")\n\t\t\tversion = tuple(base_version)\n\t\telse:\n\t\t\t# new setuptools\n\t\t\tversion = pkg_resources.parse_version(version.base_version)\n\treturn version\n", "path": "src/octoprint/util/version.py"}]} | 2,044 | 161 |
gh_patches_debug_28067 | rasdani/github-patches | git_diff | mne-tools__mne-bids-pipeline-773 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add command line option for deriv_root
As I develop a Docker App using mne-bids-pipeline, I want a straightforward way to overwrite the bids_root and deriv_root values defined in the configuration file from the command line. This enhancement aims to simplify Docker volume mapping.
Currently, the option to override `bids_root `is already available ([source](https://github.com/mne-tools/mne-bids-pipeline/blob/ea95979fdb0ef807b3cd262da409cf010ada3da3/mne_bids_pipeline/_main.py#L56)), but there is no similar option for `deriv_root`.
I suggest adding a command-line option to the mne-bids-pipeline tool to allow users to specify the deriv_root directory. This behavior is similar to other BidsApps such as `FMRIprep `and `MRIQC`, where command line arguments overwrite configuration files.
If you are interested, I would be happy to contribute by taking care of the pull request.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mne_bids_pipeline/_main.py`
Content:
```
1 import argparse
2 import pathlib
3 from textwrap import dedent
4 import time
5 from typing import List
6 from types import ModuleType, SimpleNamespace
7
8 import numpy as np
9
10 from ._config_utils import _get_step_modules
11 from ._config_import import _import_config
12 from ._config_template import create_template_config
13 from ._logging import logger, gen_log_kwargs
14 from ._parallel import get_parallel_backend
15 from ._run import _short_step_path
16
17
18 def main():
19 from . import __version__
20
21 parser = argparse.ArgumentParser()
22 parser.add_argument(
23 "--version", action="version", version=f"%(prog)s {__version__}"
24 )
25 parser.add_argument("config", nargs="?", default=None)
26 parser.add_argument(
27 "--config",
28 dest="config_switch",
29 default=None,
30 metavar="FILE",
31 help="The path of the pipeline configuration file to use.",
32 )
33 parser.add_argument(
34 "--create-config",
35 dest="create_config",
36 default=None,
37 metavar="FILE",
38 help="Create a template configuration file with the specified name. "
39 "If specified, all other parameters will be ignored.",
40 ),
41 parser.add_argument(
42 "--steps",
43 dest="steps",
44 default="all",
45 help=dedent(
46 """\
47 The processing steps to run.
48 Can either be one of the processing groups 'preprocessing', sensor',
49 'source', 'report', or 'all', or the name of a processing group plus
50 the desired step sans the step number and
51 filename extension, separated by a '/'. For example, to run ICA, you
52 would pass 'sensor/run_ica`. If unspecified, will run all processing
53 steps. Can also be a tuple of steps."""
54 ),
55 )
56 parser.add_argument(
57 "--root-dir",
58 dest="root_dir",
59 default=None,
60 help="BIDS root directory of the data to process.",
61 )
62 parser.add_argument(
63 "--subject", dest="subject", default=None, help="The subject to process."
64 )
65 parser.add_argument(
66 "--session", dest="session", default=None, help="The session to process."
67 )
68 parser.add_argument(
69 "--task", dest="task", default=None, help="The task to process."
70 )
71 parser.add_argument("--run", dest="run", default=None, help="The run to process.")
72 parser.add_argument(
73 "--n_jobs",
74 dest="n_jobs",
75 type=int,
76 default=None,
77 help="The number of parallel processes to execute.",
78 )
79 parser.add_argument(
80 "--interactive",
81 dest="interactive",
82 action="store_true",
83 help="Enable interactive mode.",
84 )
85 parser.add_argument(
86 "--debug", dest="debug", action="store_true", help="Enable debugging on error."
87 )
88 parser.add_argument(
89 "--no-cache",
90 dest="no_cache",
91 action="store_true",
92 help="Disable caching of intermediate results.",
93 )
94 options = parser.parse_args()
95
96 if options.create_config is not None:
97 target_path = pathlib.Path(options.create_config)
98 create_template_config(target_path=target_path, overwrite=False)
99 return
100
101 config = options.config
102 config_switch = options.config_switch
103 bad = False
104 if config is None:
105 if config_switch is None:
106 bad = "neither was provided"
107 else:
108 config = config_switch
109 elif config_switch is not None:
110 bad = "both were provided"
111 if bad:
112 parser.error(
113 "❌ You must specify a configuration file either as a single "
114 f"argument or with --config, but {bad}."
115 )
116 steps = options.steps
117 root_dir = options.root_dir
118 subject, session = options.subject, options.session
119 task, run = options.task, options.run
120 n_jobs = options.n_jobs
121 interactive, debug = options.interactive, options.debug
122 cache = not options.no_cache
123
124 if isinstance(steps, str) and "," in steps:
125 # Work around limitation in Fire: --steps=foo,bar/baz won't produce a
126 # tuple ('foo', 'bar/baz'), but a string 'foo,bar/baz'.
127 steps = tuple(steps.split(","))
128 elif isinstance(steps, str):
129 steps = (steps,)
130
131 on_error = "debug" if debug else None
132 cache = "1" if cache else "0"
133
134 processing_stages = []
135 processing_steps = []
136 for steps_ in steps:
137 if "/" in steps_:
138 stage, step = steps_.split("/")
139 processing_stages.append(stage)
140 processing_steps.append(step)
141 else:
142 # User specified "sensor", "preprocessing" or similar, but without
143 # any further grouping.
144 processing_stages.append(steps_)
145 processing_steps.append(None)
146
147 config_path = pathlib.Path(config).expanduser().resolve(strict=True)
148 overrides = SimpleNamespace()
149 if root_dir:
150 overrides.bids_root = pathlib.Path(root_dir).expanduser().resolve(strict=True)
151 if subject:
152 overrides.subjects = [subject]
153 if session:
154 overrides.sessions = [session]
155 if task:
156 overrides.task = task
157 if run:
158 overrides.runs = run
159 if interactive:
160 overrides.interactive = interactive
161 if n_jobs:
162 overrides.n_jobs = int(n_jobs)
163 if on_error:
164 overrides.on_error = on_error
165 if not cache:
166 overrides.memory_location = False
167
168 step_modules: List[ModuleType] = []
169 STEP_MODULES = _get_step_modules()
170 for stage, step in zip(processing_stages, processing_steps):
171 if stage not in STEP_MODULES.keys():
172 raise ValueError(
173 f"Invalid step requested: '{stage}'. "
174 f"It should be one of {list(STEP_MODULES.keys())}."
175 )
176
177 if step is None:
178 # User specified `sensors`, `source`, or similar
179 step_modules.extend(STEP_MODULES[stage])
180 else:
181 # User specified 'stage/step'
182 for step_module in STEP_MODULES[stage]:
183 step_name = pathlib.Path(step_module.__file__).name
184 if step in step_name:
185 step_modules.append(step_module)
186 break
187 else:
188 # We've iterated over all steps, but none matched!
189 raise ValueError(f"Invalid steps requested: {stage}/{step}")
190
191 if processing_stages[0] != "all":
192 # Always run the directory initialization steps, but skip for 'all',
193 # because it already includes them – and we want to avoid running
194 # them twice.
195 step_modules = [*STEP_MODULES["init"], *step_modules]
196
197 logger.title("Welcome aboard MNE-BIDS-Pipeline! 👋")
198 msg = f"Using configuration: {config}"
199 __mne_bids_pipeline_step__ = pathlib.Path(__file__) # used for logging
200 logger.info(**gen_log_kwargs(message=msg, emoji="📝"))
201 config_imported = _import_config(
202 config_path=config_path,
203 overrides=overrides,
204 )
205 # Initialize dask now
206 with get_parallel_backend(config_imported.exec_params):
207 pass
208 del __mne_bids_pipeline_step__
209 logger.end()
210
211 for step_module in step_modules:
212 start = time.time()
213 step = _short_step_path(pathlib.Path(step_module.__file__))
214 logger.title(title=f"{step}")
215 step_module.main(config=config_imported)
216 elapsed = time.time() - start
217 hours, remainder = divmod(elapsed, 3600)
218 hours = int(hours)
219 minutes, seconds = divmod(remainder, 60)
220 minutes = int(minutes)
221 seconds = int(np.ceil(seconds)) # always take full seconds
222 elapsed = f"{seconds}s"
223 if minutes:
224 elapsed = f"{minutes}m {elapsed}"
225 if hours:
226 elapsed = f"{hours}h {elapsed}"
227 logger.end(f"done ({elapsed})")
228
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mne_bids_pipeline/_main.py b/mne_bids_pipeline/_main.py
--- a/mne_bids_pipeline/_main.py
+++ b/mne_bids_pipeline/_main.py
@@ -59,6 +59,18 @@
default=None,
help="BIDS root directory of the data to process.",
)
+ parser.add_argument(
+ "--deriv_root",
+ dest="deriv_root",
+ default=None,
+ help=dedent(
+ """\
+ The root of the derivatives directory
+ in which the pipeline will store the processing results.
+ If unspecified, this will be derivatives/mne-bids-pipeline
+ inside the BIDS root."""
+ ),
+ ),
parser.add_argument(
"--subject", dest="subject", default=None, help="The subject to process."
)
@@ -115,6 +127,7 @@
)
steps = options.steps
root_dir = options.root_dir
+ deriv_root = options.deriv_root
subject, session = options.subject, options.session
task, run = options.task, options.run
n_jobs = options.n_jobs
@@ -148,6 +161,10 @@
overrides = SimpleNamespace()
if root_dir:
overrides.bids_root = pathlib.Path(root_dir).expanduser().resolve(strict=True)
+ if deriv_root:
+ overrides.deriv_root = (
+ pathlib.Path(deriv_root).expanduser().resolve(strict=False)
+ )
if subject:
overrides.subjects = [subject]
if session:
| {"golden_diff": "diff --git a/mne_bids_pipeline/_main.py b/mne_bids_pipeline/_main.py\n--- a/mne_bids_pipeline/_main.py\n+++ b/mne_bids_pipeline/_main.py\n@@ -59,6 +59,18 @@\n default=None,\n help=\"BIDS root directory of the data to process.\",\n )\n+ parser.add_argument(\n+ \"--deriv_root\",\n+ dest=\"deriv_root\",\n+ default=None,\n+ help=dedent(\n+ \"\"\"\\\n+ The root of the derivatives directory\n+ in which the pipeline will store the processing results.\n+ If unspecified, this will be derivatives/mne-bids-pipeline\n+ inside the BIDS root.\"\"\"\n+ ),\n+ ),\n parser.add_argument(\n \"--subject\", dest=\"subject\", default=None, help=\"The subject to process.\"\n )\n@@ -115,6 +127,7 @@\n )\n steps = options.steps\n root_dir = options.root_dir\n+ deriv_root = options.deriv_root\n subject, session = options.subject, options.session\n task, run = options.task, options.run\n n_jobs = options.n_jobs\n@@ -148,6 +161,10 @@\n overrides = SimpleNamespace()\n if root_dir:\n overrides.bids_root = pathlib.Path(root_dir).expanduser().resolve(strict=True)\n+ if deriv_root:\n+ overrides.deriv_root = (\n+ pathlib.Path(deriv_root).expanduser().resolve(strict=False)\n+ )\n if subject:\n overrides.subjects = [subject]\n if session:\n", "issue": "Add command line option for deriv_root\nAs I develop a Docker App using mne-bids-pipeline, I want a straightforward way to overwrite the bids_root and deriv_root values defined in the configuration file from the command line. This enhancement aims to simplify Docker volume mapping.\r\n\r\nCurrently, the option to override `bids_root `is already available ([source](https://github.com/mne-tools/mne-bids-pipeline/blob/ea95979fdb0ef807b3cd262da409cf010ada3da3/mne_bids_pipeline/_main.py#L56)), but there is no similar option for `deriv_root`.\r\n\r\nI suggest adding a command-line option to the mne-bids-pipeline tool to allow users to specify the deriv_root directory. This behavior is similar to other BidsApps such as `FMRIprep `and `MRIQC`, where command line arguments overwrite configuration files.\r\n\r\nIf you are interested, I would be happy to contribute by taking care of the pull request.\n", "before_files": [{"content": "import argparse\nimport pathlib\nfrom textwrap import dedent\nimport time\nfrom typing import List\nfrom types import ModuleType, SimpleNamespace\n\nimport numpy as np\n\nfrom ._config_utils import _get_step_modules\nfrom ._config_import import _import_config\nfrom ._config_template import create_template_config\nfrom ._logging import logger, gen_log_kwargs\nfrom ._parallel import get_parallel_backend\nfrom ._run import _short_step_path\n\n\ndef main():\n from . import __version__\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--version\", action=\"version\", version=f\"%(prog)s {__version__}\"\n )\n parser.add_argument(\"config\", nargs=\"?\", default=None)\n parser.add_argument(\n \"--config\",\n dest=\"config_switch\",\n default=None,\n metavar=\"FILE\",\n help=\"The path of the pipeline configuration file to use.\",\n )\n parser.add_argument(\n \"--create-config\",\n dest=\"create_config\",\n default=None,\n metavar=\"FILE\",\n help=\"Create a template configuration file with the specified name. \"\n \"If specified, all other parameters will be ignored.\",\n ),\n parser.add_argument(\n \"--steps\",\n dest=\"steps\",\n default=\"all\",\n help=dedent(\n \"\"\"\\\n The processing steps to run.\n Can either be one of the processing groups 'preprocessing', sensor',\n 'source', 'report', or 'all', or the name of a processing group plus\n the desired step sans the step number and\n filename extension, separated by a '/'. For example, to run ICA, you\n would pass 'sensor/run_ica`. If unspecified, will run all processing\n steps. Can also be a tuple of steps.\"\"\"\n ),\n )\n parser.add_argument(\n \"--root-dir\",\n dest=\"root_dir\",\n default=None,\n help=\"BIDS root directory of the data to process.\",\n )\n parser.add_argument(\n \"--subject\", dest=\"subject\", default=None, help=\"The subject to process.\"\n )\n parser.add_argument(\n \"--session\", dest=\"session\", default=None, help=\"The session to process.\"\n )\n parser.add_argument(\n \"--task\", dest=\"task\", default=None, help=\"The task to process.\"\n )\n parser.add_argument(\"--run\", dest=\"run\", default=None, help=\"The run to process.\")\n parser.add_argument(\n \"--n_jobs\",\n dest=\"n_jobs\",\n type=int,\n default=None,\n help=\"The number of parallel processes to execute.\",\n )\n parser.add_argument(\n \"--interactive\",\n dest=\"interactive\",\n action=\"store_true\",\n help=\"Enable interactive mode.\",\n )\n parser.add_argument(\n \"--debug\", dest=\"debug\", action=\"store_true\", help=\"Enable debugging on error.\"\n )\n parser.add_argument(\n \"--no-cache\",\n dest=\"no_cache\",\n action=\"store_true\",\n help=\"Disable caching of intermediate results.\",\n )\n options = parser.parse_args()\n\n if options.create_config is not None:\n target_path = pathlib.Path(options.create_config)\n create_template_config(target_path=target_path, overwrite=False)\n return\n\n config = options.config\n config_switch = options.config_switch\n bad = False\n if config is None:\n if config_switch is None:\n bad = \"neither was provided\"\n else:\n config = config_switch\n elif config_switch is not None:\n bad = \"both were provided\"\n if bad:\n parser.error(\n \"\u274c You must specify a configuration file either as a single \"\n f\"argument or with --config, but {bad}.\"\n )\n steps = options.steps\n root_dir = options.root_dir\n subject, session = options.subject, options.session\n task, run = options.task, options.run\n n_jobs = options.n_jobs\n interactive, debug = options.interactive, options.debug\n cache = not options.no_cache\n\n if isinstance(steps, str) and \",\" in steps:\n # Work around limitation in Fire: --steps=foo,bar/baz won't produce a\n # tuple ('foo', 'bar/baz'), but a string 'foo,bar/baz'.\n steps = tuple(steps.split(\",\"))\n elif isinstance(steps, str):\n steps = (steps,)\n\n on_error = \"debug\" if debug else None\n cache = \"1\" if cache else \"0\"\n\n processing_stages = []\n processing_steps = []\n for steps_ in steps:\n if \"/\" in steps_:\n stage, step = steps_.split(\"/\")\n processing_stages.append(stage)\n processing_steps.append(step)\n else:\n # User specified \"sensor\", \"preprocessing\" or similar, but without\n # any further grouping.\n processing_stages.append(steps_)\n processing_steps.append(None)\n\n config_path = pathlib.Path(config).expanduser().resolve(strict=True)\n overrides = SimpleNamespace()\n if root_dir:\n overrides.bids_root = pathlib.Path(root_dir).expanduser().resolve(strict=True)\n if subject:\n overrides.subjects = [subject]\n if session:\n overrides.sessions = [session]\n if task:\n overrides.task = task\n if run:\n overrides.runs = run\n if interactive:\n overrides.interactive = interactive\n if n_jobs:\n overrides.n_jobs = int(n_jobs)\n if on_error:\n overrides.on_error = on_error\n if not cache:\n overrides.memory_location = False\n\n step_modules: List[ModuleType] = []\n STEP_MODULES = _get_step_modules()\n for stage, step in zip(processing_stages, processing_steps):\n if stage not in STEP_MODULES.keys():\n raise ValueError(\n f\"Invalid step requested: '{stage}'. \"\n f\"It should be one of {list(STEP_MODULES.keys())}.\"\n )\n\n if step is None:\n # User specified `sensors`, `source`, or similar\n step_modules.extend(STEP_MODULES[stage])\n else:\n # User specified 'stage/step'\n for step_module in STEP_MODULES[stage]:\n step_name = pathlib.Path(step_module.__file__).name\n if step in step_name:\n step_modules.append(step_module)\n break\n else:\n # We've iterated over all steps, but none matched!\n raise ValueError(f\"Invalid steps requested: {stage}/{step}\")\n\n if processing_stages[0] != \"all\":\n # Always run the directory initialization steps, but skip for 'all',\n # because it already includes them \u2013 and we want to avoid running\n # them twice.\n step_modules = [*STEP_MODULES[\"init\"], *step_modules]\n\n logger.title(\"Welcome aboard MNE-BIDS-Pipeline! \ud83d\udc4b\")\n msg = f\"Using configuration: {config}\"\n __mne_bids_pipeline_step__ = pathlib.Path(__file__) # used for logging\n logger.info(**gen_log_kwargs(message=msg, emoji=\"\ud83d\udcdd\"))\n config_imported = _import_config(\n config_path=config_path,\n overrides=overrides,\n )\n # Initialize dask now\n with get_parallel_backend(config_imported.exec_params):\n pass\n del __mne_bids_pipeline_step__\n logger.end()\n\n for step_module in step_modules:\n start = time.time()\n step = _short_step_path(pathlib.Path(step_module.__file__))\n logger.title(title=f\"{step}\")\n step_module.main(config=config_imported)\n elapsed = time.time() - start\n hours, remainder = divmod(elapsed, 3600)\n hours = int(hours)\n minutes, seconds = divmod(remainder, 60)\n minutes = int(minutes)\n seconds = int(np.ceil(seconds)) # always take full seconds\n elapsed = f\"{seconds}s\"\n if minutes:\n elapsed = f\"{minutes}m {elapsed}\"\n if hours:\n elapsed = f\"{hours}h {elapsed}\"\n logger.end(f\"done ({elapsed})\")\n", "path": "mne_bids_pipeline/_main.py"}], "after_files": [{"content": "import argparse\nimport pathlib\nfrom textwrap import dedent\nimport time\nfrom typing import List\nfrom types import ModuleType, SimpleNamespace\n\nimport numpy as np\n\nfrom ._config_utils import _get_step_modules\nfrom ._config_import import _import_config\nfrom ._config_template import create_template_config\nfrom ._logging import logger, gen_log_kwargs\nfrom ._parallel import get_parallel_backend\nfrom ._run import _short_step_path\n\n\ndef main():\n from . import __version__\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--version\", action=\"version\", version=f\"%(prog)s {__version__}\"\n )\n parser.add_argument(\"config\", nargs=\"?\", default=None)\n parser.add_argument(\n \"--config\",\n dest=\"config_switch\",\n default=None,\n metavar=\"FILE\",\n help=\"The path of the pipeline configuration file to use.\",\n )\n parser.add_argument(\n \"--create-config\",\n dest=\"create_config\",\n default=None,\n metavar=\"FILE\",\n help=\"Create a template configuration file with the specified name. \"\n \"If specified, all other parameters will be ignored.\",\n ),\n parser.add_argument(\n \"--steps\",\n dest=\"steps\",\n default=\"all\",\n help=dedent(\n \"\"\"\\\n The processing steps to run.\n Can either be one of the processing groups 'preprocessing', sensor',\n 'source', 'report', or 'all', or the name of a processing group plus\n the desired step sans the step number and\n filename extension, separated by a '/'. For example, to run ICA, you\n would pass 'sensor/run_ica`. If unspecified, will run all processing\n steps. Can also be a tuple of steps.\"\"\"\n ),\n )\n parser.add_argument(\n \"--root-dir\",\n dest=\"root_dir\",\n default=None,\n help=\"BIDS root directory of the data to process.\",\n )\n parser.add_argument(\n \"--deriv_root\",\n dest=\"deriv_root\",\n default=None,\n help=dedent(\n \"\"\"\\\n The root of the derivatives directory\n in which the pipeline will store the processing results.\n If unspecified, this will be derivatives/mne-bids-pipeline\n inside the BIDS root.\"\"\"\n ),\n ),\n parser.add_argument(\n \"--subject\", dest=\"subject\", default=None, help=\"The subject to process.\"\n )\n parser.add_argument(\n \"--session\", dest=\"session\", default=None, help=\"The session to process.\"\n )\n parser.add_argument(\n \"--task\", dest=\"task\", default=None, help=\"The task to process.\"\n )\n parser.add_argument(\"--run\", dest=\"run\", default=None, help=\"The run to process.\")\n parser.add_argument(\n \"--n_jobs\",\n dest=\"n_jobs\",\n type=int,\n default=None,\n help=\"The number of parallel processes to execute.\",\n )\n parser.add_argument(\n \"--interactive\",\n dest=\"interactive\",\n action=\"store_true\",\n help=\"Enable interactive mode.\",\n )\n parser.add_argument(\n \"--debug\", dest=\"debug\", action=\"store_true\", help=\"Enable debugging on error.\"\n )\n parser.add_argument(\n \"--no-cache\",\n dest=\"no_cache\",\n action=\"store_true\",\n help=\"Disable caching of intermediate results.\",\n )\n options = parser.parse_args()\n\n if options.create_config is not None:\n target_path = pathlib.Path(options.create_config)\n create_template_config(target_path=target_path, overwrite=False)\n return\n\n config = options.config\n config_switch = options.config_switch\n bad = False\n if config is None:\n if config_switch is None:\n bad = \"neither was provided\"\n else:\n config = config_switch\n elif config_switch is not None:\n bad = \"both were provided\"\n if bad:\n parser.error(\n \"\u274c You must specify a configuration file either as a single \"\n f\"argument or with --config, but {bad}.\"\n )\n steps = options.steps\n root_dir = options.root_dir\n deriv_root = options.deriv_root\n subject, session = options.subject, options.session\n task, run = options.task, options.run\n n_jobs = options.n_jobs\n interactive, debug = options.interactive, options.debug\n cache = not options.no_cache\n\n if isinstance(steps, str) and \",\" in steps:\n # Work around limitation in Fire: --steps=foo,bar/baz won't produce a\n # tuple ('foo', 'bar/baz'), but a string 'foo,bar/baz'.\n steps = tuple(steps.split(\",\"))\n elif isinstance(steps, str):\n steps = (steps,)\n\n on_error = \"debug\" if debug else None\n cache = \"1\" if cache else \"0\"\n\n processing_stages = []\n processing_steps = []\n for steps_ in steps:\n if \"/\" in steps_:\n stage, step = steps_.split(\"/\")\n processing_stages.append(stage)\n processing_steps.append(step)\n else:\n # User specified \"sensor\", \"preprocessing\" or similar, but without\n # any further grouping.\n processing_stages.append(steps_)\n processing_steps.append(None)\n\n config_path = pathlib.Path(config).expanduser().resolve(strict=True)\n overrides = SimpleNamespace()\n if root_dir:\n overrides.bids_root = pathlib.Path(root_dir).expanduser().resolve(strict=True)\n if deriv_root:\n overrides.deriv_root = (\n pathlib.Path(deriv_root).expanduser().resolve(strict=False)\n )\n if subject:\n overrides.subjects = [subject]\n if session:\n overrides.sessions = [session]\n if task:\n overrides.task = task\n if run:\n overrides.runs = run\n if interactive:\n overrides.interactive = interactive\n if n_jobs:\n overrides.n_jobs = int(n_jobs)\n if on_error:\n overrides.on_error = on_error\n if not cache:\n overrides.memory_location = False\n\n step_modules: List[ModuleType] = []\n STEP_MODULES = _get_step_modules()\n for stage, step in zip(processing_stages, processing_steps):\n if stage not in STEP_MODULES.keys():\n raise ValueError(\n f\"Invalid step requested: '{stage}'. \"\n f\"It should be one of {list(STEP_MODULES.keys())}.\"\n )\n\n if step is None:\n # User specified `sensors`, `source`, or similar\n step_modules.extend(STEP_MODULES[stage])\n else:\n # User specified 'stage/step'\n for step_module in STEP_MODULES[stage]:\n step_name = pathlib.Path(step_module.__file__).name\n if step in step_name:\n step_modules.append(step_module)\n break\n else:\n # We've iterated over all steps, but none matched!\n raise ValueError(f\"Invalid steps requested: {stage}/{step}\")\n\n if processing_stages[0] != \"all\":\n # Always run the directory initialization steps, but skip for 'all',\n # because it already includes them \u2013 and we want to avoid running\n # them twice.\n step_modules = [*STEP_MODULES[\"init\"], *step_modules]\n\n logger.title(\"Welcome aboard MNE-BIDS-Pipeline! \ud83d\udc4b\")\n msg = f\"Using configuration: {config}\"\n __mne_bids_pipeline_step__ = pathlib.Path(__file__) # used for logging\n logger.info(**gen_log_kwargs(message=msg, emoji=\"\ud83d\udcdd\"))\n config_imported = _import_config(\n config_path=config_path,\n overrides=overrides,\n )\n # Initialize dask now\n with get_parallel_backend(config_imported.exec_params):\n pass\n del __mne_bids_pipeline_step__\n logger.end()\n\n for step_module in step_modules:\n start = time.time()\n step = _short_step_path(pathlib.Path(step_module.__file__))\n logger.title(title=f\"{step}\")\n step_module.main(config=config_imported)\n elapsed = time.time() - start\n hours, remainder = divmod(elapsed, 3600)\n hours = int(hours)\n minutes, seconds = divmod(remainder, 60)\n minutes = int(minutes)\n seconds = int(np.ceil(seconds)) # always take full seconds\n elapsed = f\"{seconds}s\"\n if minutes:\n elapsed = f\"{minutes}m {elapsed}\"\n if hours:\n elapsed = f\"{hours}h {elapsed}\"\n logger.end(f\"done ({elapsed})\")\n", "path": "mne_bids_pipeline/_main.py"}]} | 2,758 | 349 |
gh_patches_debug_4319 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-5651 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CA-SK production parser down
## Description
This is an automatic error report generated for Canada Saskatchewan (CA-SK).
Issues:
- No recent data found for `production` parser
- No recent data found for `consumption` parser
## Suggestions
- Try running the parser locally using the command `poetry run test_parser CA-SK production`
- <a href="https://storage.googleapis.com/electricitymap-parser-logs/CA-SK.html">Explore the runtime logs</a>
You can see an overview of all parser issues [here](https://github.com/tmrowco/electricitymap-contrib/wiki/Parser-issues).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsers/CA_SK.py`
Content:
```
1 from datetime import datetime, timedelta
2 from logging import Logger, getLogger
3 from typing import List, Optional
4
5 from pytz import timezone
6 from requests import Response, Session
7
8 from parsers.lib.exceptions import ParserException
9
10 TIMEZONE = timezone("America/Regina")
11
12 # URLs for the different endpoints.
13 PRODUCTION_URL = (
14 "https://www.saskpower.com/ignitionapi/PowerUseDashboard/GetPowerUseDashboardData"
15 )
16 CONSUMPTION_URL = "https://www.saskpower.com/ignitionapi/Content/GetNetLoad"
17
18 PRODUCTION_MAPPING = {
19 "Hydro": "hydro",
20 "Wind": "wind",
21 "Solar": "solar",
22 "Natural Gas": "gas",
23 "Coal": "coal",
24 "Other": "unknown", # This is internal consumption, losses, heat recovery facilities and small independent power producers.
25 }
26
27 USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36"
28
29
30 def validate_zone_key(zone_key: str) -> None:
31 if zone_key != "CA-SK":
32 raise ParserException(
33 "CA_SK.py",
34 f"CA_SK.py is not designed to parse zone_key: {zone_key}.",
35 zone_key,
36 )
37
38
39 def validate_no_datetime(target_datetime: Optional[datetime], zone_key) -> None:
40 if target_datetime:
41 raise ParserException(
42 "CA_SK.py",
43 "This parser is unable to fetch historical data.",
44 zone_key,
45 )
46
47
48 def fetch_production(
49 zone_key: str = "CA-SK",
50 session: Optional[Session] = None,
51 target_datetime: Optional[datetime] = None,
52 logger: Logger = getLogger(__name__),
53 ):
54 """This parser function will currently return the daily average of the day in question as hourly data.
55 This is because the API only returns daily data but the backend expects hourly values.
56 This is in order to facilitate the estimation of the hourly values from the daily average.
57 """
58 # Validate that the zone key is equal to CA-SK.
59 validate_zone_key(zone_key)
60 # Validate that the target_datetime is None as this parser is unable to fetch historical data.
61 validate_no_datetime(target_datetime, zone_key)
62
63 session = session or Session()
64
65 # Set the headers to mimic a user browser as the API will return a 403 if not.
66 headers = {"user-agent": USER_AGENT}
67 response: Response = session.get(PRODUCTION_URL, headers=headers)
68
69 if not response.ok:
70 raise ParserException(
71 "CA_SK.py",
72 f"Failed to fetch production data. Response Code: {response.status_code}\nError:\n{response.text}",
73 zone_key,
74 )
75
76 raw_data = response.json()
77 # Date is in the format "Jan 01, 2020"
78 raw_date = raw_data["SupplyDataText"]
79 date = datetime.strptime(raw_date, "%b %d, %Y")
80 production_data = {}
81
82 for value in raw_data["PowerCacheData"]["generationByType"]:
83 production_data[PRODUCTION_MAPPING[value["type"]]] = value[
84 "totalGenerationForType"
85 ]
86
87 data_list: List[dict] = []
88 # Hack to return hourly data from daily data for the backend as it expects hourly data.
89 for hour in range(0, 24):
90 data_list.append(
91 {
92 "zoneKey": zone_key,
93 "datetime": date.replace(hour=hour, tzinfo=TIMEZONE),
94 "production": production_data,
95 "source": "saskpower.com",
96 }
97 )
98
99 return data_list
100
101
102 def fetch_consumption(
103 zone_key: str = "CA-SK",
104 session: Optional[Session] = None,
105 target_datetime: Optional[datetime] = None,
106 logger: Logger = getLogger(__name__),
107 ):
108 # Validate that the zone key is equal to CA-SK.
109 validate_zone_key(zone_key)
110 # Validate that the target_datetime is None as this parser is unable to fetch historical data.
111 validate_no_datetime(target_datetime, zone_key)
112
113 session = session or Session()
114
115 # Set the headers to mimic a user browser as the API will return a 403 if not.
116 headers = {"user-agent": USER_AGENT}
117
118 response: Response = session.get(CONSUMPTION_URL) # , headers=headers)
119
120 if not response.ok:
121 raise ParserException(
122 "CA_SK.py",
123 f"Failed to fetch consumption data. Response Code: {response.status_code}\nError:\n{response.text}",
124 zone_key,
125 )
126
127 raw_data = response.json()
128
129 now = datetime.now(TIMEZONE)
130
131 # Data is updated every 5 minutes so we assume the data is from a multiple of 5 minutes and has a 5 minute delay from that multiple.
132 assumed_datetime = now.replace(second=0, microsecond=0) - timedelta(
133 minutes=(now.minute % 5) + 5
134 )
135
136 return [
137 {
138 "zoneKey": zone_key,
139 "datetime": assumed_datetime,
140 "consumption": int(raw_data),
141 "source": "saskpower.com",
142 }
143 ]
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parsers/CA_SK.py b/parsers/CA_SK.py
--- a/parsers/CA_SK.py
+++ b/parsers/CA_SK.py
@@ -115,7 +115,7 @@
# Set the headers to mimic a user browser as the API will return a 403 if not.
headers = {"user-agent": USER_AGENT}
- response: Response = session.get(CONSUMPTION_URL) # , headers=headers)
+ response: Response = session.get(CONSUMPTION_URL, headers=headers)
if not response.ok:
raise ParserException(
| {"golden_diff": "diff --git a/parsers/CA_SK.py b/parsers/CA_SK.py\n--- a/parsers/CA_SK.py\n+++ b/parsers/CA_SK.py\n@@ -115,7 +115,7 @@\n # Set the headers to mimic a user browser as the API will return a 403 if not.\n headers = {\"user-agent\": USER_AGENT}\n \n- response: Response = session.get(CONSUMPTION_URL) # , headers=headers)\n+ response: Response = session.get(CONSUMPTION_URL, headers=headers)\n \n if not response.ok:\n raise ParserException(\n", "issue": "CA-SK production parser down\n## Description\n\nThis is an automatic error report generated for Canada Saskatchewan (CA-SK).\n\nIssues:\n- No recent data found for `production` parser\n- No recent data found for `consumption` parser\n\n## Suggestions\n- Try running the parser locally using the command `poetry run test_parser CA-SK production`\n- <a href=\"https://storage.googleapis.com/electricitymap-parser-logs/CA-SK.html\">Explore the runtime logs</a>\n\nYou can see an overview of all parser issues [here](https://github.com/tmrowco/electricitymap-contrib/wiki/Parser-issues).\n\n", "before_files": [{"content": "from datetime import datetime, timedelta\nfrom logging import Logger, getLogger\nfrom typing import List, Optional\n\nfrom pytz import timezone\nfrom requests import Response, Session\n\nfrom parsers.lib.exceptions import ParserException\n\nTIMEZONE = timezone(\"America/Regina\")\n\n# URLs for the different endpoints.\nPRODUCTION_URL = (\n \"https://www.saskpower.com/ignitionapi/PowerUseDashboard/GetPowerUseDashboardData\"\n)\nCONSUMPTION_URL = \"https://www.saskpower.com/ignitionapi/Content/GetNetLoad\"\n\nPRODUCTION_MAPPING = {\n \"Hydro\": \"hydro\",\n \"Wind\": \"wind\",\n \"Solar\": \"solar\",\n \"Natural Gas\": \"gas\",\n \"Coal\": \"coal\",\n \"Other\": \"unknown\", # This is internal consumption, losses, heat recovery facilities and small independent power producers.\n}\n\nUSER_AGENT = \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36\"\n\n\ndef validate_zone_key(zone_key: str) -> None:\n if zone_key != \"CA-SK\":\n raise ParserException(\n \"CA_SK.py\",\n f\"CA_SK.py is not designed to parse zone_key: {zone_key}.\",\n zone_key,\n )\n\n\ndef validate_no_datetime(target_datetime: Optional[datetime], zone_key) -> None:\n if target_datetime:\n raise ParserException(\n \"CA_SK.py\",\n \"This parser is unable to fetch historical data.\",\n zone_key,\n )\n\n\ndef fetch_production(\n zone_key: str = \"CA-SK\",\n session: Optional[Session] = None,\n target_datetime: Optional[datetime] = None,\n logger: Logger = getLogger(__name__),\n):\n \"\"\"This parser function will currently return the daily average of the day in question as hourly data.\n This is because the API only returns daily data but the backend expects hourly values.\n This is in order to facilitate the estimation of the hourly values from the daily average.\n \"\"\"\n # Validate that the zone key is equal to CA-SK.\n validate_zone_key(zone_key)\n # Validate that the target_datetime is None as this parser is unable to fetch historical data.\n validate_no_datetime(target_datetime, zone_key)\n\n session = session or Session()\n\n # Set the headers to mimic a user browser as the API will return a 403 if not.\n headers = {\"user-agent\": USER_AGENT}\n response: Response = session.get(PRODUCTION_URL, headers=headers)\n\n if not response.ok:\n raise ParserException(\n \"CA_SK.py\",\n f\"Failed to fetch production data. Response Code: {response.status_code}\\nError:\\n{response.text}\",\n zone_key,\n )\n\n raw_data = response.json()\n # Date is in the format \"Jan 01, 2020\"\n raw_date = raw_data[\"SupplyDataText\"]\n date = datetime.strptime(raw_date, \"%b %d, %Y\")\n production_data = {}\n\n for value in raw_data[\"PowerCacheData\"][\"generationByType\"]:\n production_data[PRODUCTION_MAPPING[value[\"type\"]]] = value[\n \"totalGenerationForType\"\n ]\n\n data_list: List[dict] = []\n # Hack to return hourly data from daily data for the backend as it expects hourly data.\n for hour in range(0, 24):\n data_list.append(\n {\n \"zoneKey\": zone_key,\n \"datetime\": date.replace(hour=hour, tzinfo=TIMEZONE),\n \"production\": production_data,\n \"source\": \"saskpower.com\",\n }\n )\n\n return data_list\n\n\ndef fetch_consumption(\n zone_key: str = \"CA-SK\",\n session: Optional[Session] = None,\n target_datetime: Optional[datetime] = None,\n logger: Logger = getLogger(__name__),\n):\n # Validate that the zone key is equal to CA-SK.\n validate_zone_key(zone_key)\n # Validate that the target_datetime is None as this parser is unable to fetch historical data.\n validate_no_datetime(target_datetime, zone_key)\n\n session = session or Session()\n\n # Set the headers to mimic a user browser as the API will return a 403 if not.\n headers = {\"user-agent\": USER_AGENT}\n\n response: Response = session.get(CONSUMPTION_URL) # , headers=headers)\n\n if not response.ok:\n raise ParserException(\n \"CA_SK.py\",\n f\"Failed to fetch consumption data. Response Code: {response.status_code}\\nError:\\n{response.text}\",\n zone_key,\n )\n\n raw_data = response.json()\n\n now = datetime.now(TIMEZONE)\n\n # Data is updated every 5 minutes so we assume the data is from a multiple of 5 minutes and has a 5 minute delay from that multiple.\n assumed_datetime = now.replace(second=0, microsecond=0) - timedelta(\n minutes=(now.minute % 5) + 5\n )\n\n return [\n {\n \"zoneKey\": zone_key,\n \"datetime\": assumed_datetime,\n \"consumption\": int(raw_data),\n \"source\": \"saskpower.com\",\n }\n ]\n", "path": "parsers/CA_SK.py"}], "after_files": [{"content": "from datetime import datetime, timedelta\nfrom logging import Logger, getLogger\nfrom typing import List, Optional\n\nfrom pytz import timezone\nfrom requests import Response, Session\n\nfrom parsers.lib.exceptions import ParserException\n\nTIMEZONE = timezone(\"America/Regina\")\n\n# URLs for the different endpoints.\nPRODUCTION_URL = (\n \"https://www.saskpower.com/ignitionapi/PowerUseDashboard/GetPowerUseDashboardData\"\n)\nCONSUMPTION_URL = \"https://www.saskpower.com/ignitionapi/Content/GetNetLoad\"\n\nPRODUCTION_MAPPING = {\n \"Hydro\": \"hydro\",\n \"Wind\": \"wind\",\n \"Solar\": \"solar\",\n \"Natural Gas\": \"gas\",\n \"Coal\": \"coal\",\n \"Other\": \"unknown\", # This is internal consumption, losses, heat recovery facilities and small independent power producers.\n}\n\nUSER_AGENT = \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36\"\n\n\ndef validate_zone_key(zone_key: str) -> None:\n if zone_key != \"CA-SK\":\n raise ParserException(\n \"CA_SK.py\",\n f\"CA_SK.py is not designed to parse zone_key: {zone_key}.\",\n zone_key,\n )\n\n\ndef validate_no_datetime(target_datetime: Optional[datetime], zone_key) -> None:\n if target_datetime:\n raise ParserException(\n \"CA_SK.py\",\n \"This parser is unable to fetch historical data.\",\n zone_key,\n )\n\n\ndef fetch_production(\n zone_key: str = \"CA-SK\",\n session: Optional[Session] = None,\n target_datetime: Optional[datetime] = None,\n logger: Logger = getLogger(__name__),\n):\n \"\"\"This parser function will currently return the daily average of the day in question as hourly data.\n This is because the API only returns daily data but the backend expects hourly values.\n This is in order to facilitate the estimation of the hourly values from the daily average.\n \"\"\"\n # Validate that the zone key is equal to CA-SK.\n validate_zone_key(zone_key)\n # Validate that the target_datetime is None as this parser is unable to fetch historical data.\n validate_no_datetime(target_datetime, zone_key)\n\n session = session or Session()\n\n # Set the headers to mimic a user browser as the API will return a 403 if not.\n headers = {\"user-agent\": USER_AGENT}\n response: Response = session.get(PRODUCTION_URL, headers=headers)\n\n if not response.ok:\n raise ParserException(\n \"CA_SK.py\",\n f\"Failed to fetch production data. Response Code: {response.status_code}\\nError:\\n{response.text}\",\n zone_key,\n )\n\n raw_data = response.json()\n # Date is in the format \"Jan 01, 2020\"\n raw_date = raw_data[\"SupplyDataText\"]\n date = datetime.strptime(raw_date, \"%b %d, %Y\")\n production_data = {}\n\n for value in raw_data[\"PowerCacheData\"][\"generationByType\"]:\n production_data[PRODUCTION_MAPPING[value[\"type\"]]] = value[\n \"totalGenerationForType\"\n ]\n\n data_list: List[dict] = []\n # Hack to return hourly data from daily data for the backend as it expects hourly data.\n for hour in range(0, 24):\n data_list.append(\n {\n \"zoneKey\": zone_key,\n \"datetime\": date.replace(hour=hour, tzinfo=TIMEZONE),\n \"production\": production_data,\n \"source\": \"saskpower.com\",\n }\n )\n\n return data_list\n\n\ndef fetch_consumption(\n zone_key: str = \"CA-SK\",\n session: Optional[Session] = None,\n target_datetime: Optional[datetime] = None,\n logger: Logger = getLogger(__name__),\n):\n # Validate that the zone key is equal to CA-SK.\n validate_zone_key(zone_key)\n # Validate that the target_datetime is None as this parser is unable to fetch historical data.\n validate_no_datetime(target_datetime, zone_key)\n\n session = session or Session()\n\n # Set the headers to mimic a user browser as the API will return a 403 if not.\n headers = {\"user-agent\": USER_AGENT}\n\n response: Response = session.get(CONSUMPTION_URL, headers=headers)\n\n if not response.ok:\n raise ParserException(\n \"CA_SK.py\",\n f\"Failed to fetch consumption data. Response Code: {response.status_code}\\nError:\\n{response.text}\",\n zone_key,\n )\n\n raw_data = response.json()\n\n now = datetime.now(TIMEZONE)\n\n # Data is updated every 5 minutes so we assume the data is from a multiple of 5 minutes and has a 5 minute delay from that multiple.\n assumed_datetime = now.replace(second=0, microsecond=0) - timedelta(\n minutes=(now.minute % 5) + 5\n )\n\n return [\n {\n \"zoneKey\": zone_key,\n \"datetime\": assumed_datetime,\n \"consumption\": int(raw_data),\n \"source\": \"saskpower.com\",\n }\n ]\n", "path": "parsers/CA_SK.py"}]} | 1,880 | 131 |
gh_patches_debug_18454 | rasdani/github-patches | git_diff | napalm-automation__napalm-514 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[2.0] "pip3 install napalm" doesn't install requirements
Debian 9.2 (Stretch) with python3 v3.5.3, pip3 v9.0.1
With v1.2.0 a `pip3 install napalm==1.2.0` installs also the required modules (MarkupSafe, jinja2, netaddr, pyYAML, pyeapi, future, pynacl, bcrypt, paramiko, pyFG, scp, netmiko, lxml, pyIOSXR, ncclient, pyserial, junos-eznc, urllib3, idna, certifi, chardet, requests, pynxos, pan-python, requests-toolbelt, xmltodict, pyPluribus, chainmap, librouteros, vyattaconfparser).
With Napalm v2.0.0 no required module is installed with `pip3 install napalm`, so napalm won't work.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 """setup.py file."""
2 import uuid
3 import os
4
5 from distutils.core import Command
6 from setuptools import setup, find_packages
7 from setuptools.command import install
8
9
10 from pip.req import parse_requirements
11
12 import pip
13 import sys
14
15 __author__ = 'David Barroso <[email protected]>'
16
17 # Read SUPPORTED_DRIVERS from file (without importing)
18 _locals = {}
19 filename = os.path.join('napalm', '_SUPPORTED_DRIVERS.py')
20 with open(filename) as supported:
21 exec(supported.read(), None, _locals)
22 SUPPORTED_DRIVERS = _locals['SUPPORTED_DRIVERS']
23
24
25 def process_requirements(dep):
26 print("PROCESSING DEPENDENCIES FOR {}".format(dep))
27 u = uuid.uuid1()
28 iter_reqs = parse_requirements("requirements/{}".format(dep), session=u)
29 [pip.main(['install', (str(ir.req))]) for ir in iter_reqs]
30
31
32 def custom_command_driver(driver):
33 class CustomCommand(Command):
34 """A custom command to run Pylint on all Python source files."""
35 user_options = []
36
37 def initialize_options(self):
38 pass
39
40 def finalize_options(self):
41 pass
42
43 def run(self):
44 """Run command."""
45 process_requirements(driver)
46
47 return CustomCommand
48
49
50 class CustomInstall(install.install):
51 """A custom command to run Pylint on all Python source files."""
52
53 def run(self):
54 """Run command."""
55 if any([d in sys.argv for d in SUPPORTED_DRIVERS]):
56 process_requirements('base')
57 else:
58 process_requirements('all')
59 install.install.run(self)
60
61
62 custom_commands = {d: custom_command_driver(d) for d in SUPPORTED_DRIVERS}
63 custom_commands['install'] = CustomInstall
64
65 setup(
66 cmdclass=custom_commands,
67 name="napalm",
68 version='2.0.0',
69 packages=find_packages(exclude=("test*", )),
70 test_suite='test_base',
71 author="David Barroso, Kirk Byers, Mircea Ulinic",
72 author_email="[email protected], [email protected], [email protected]",
73 description="Network Automation and Programmability Abstraction Layer with Multivendor support",
74 classifiers=[
75 'Topic :: Utilities',
76 'Programming Language :: Python',
77 'Programming Language :: Python :: 2',
78 'Programming Language :: Python :: 2.7',
79 'Programming Language :: Python :: 3',
80 'Programming Language :: Python :: 3.4',
81 'Programming Language :: Python :: 3.5',
82 'Programming Language :: Python :: 3.6',
83 'Operating System :: POSIX :: Linux',
84 'Operating System :: MacOS',
85 ],
86 url="https://github.com/napalm-automation/napalm",
87 include_package_data=True,
88 install_requires=[],
89 entry_points={
90 'console_scripts': [
91 'cl_napalm_configure=napalm.base.clitools.cl_napalm_configure:main',
92 'cl_napalm_test=napalm.base.clitools.cl_napalm_test:main',
93 'cl_napalm_validate=napalm.base.clitools.cl_napalm_validate:main',
94 'napalm=napalm.base.clitools.cl_napalm:main',
95 ],
96 }
97 )
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -5,11 +5,12 @@
from distutils.core import Command
from setuptools import setup, find_packages
from setuptools.command import install
+from subprocess import check_call
from pip.req import parse_requirements
-import pip
+import pip # noqa: test pip is installed
import sys
__author__ = 'David Barroso <[email protected]>'
@@ -26,7 +27,9 @@
print("PROCESSING DEPENDENCIES FOR {}".format(dep))
u = uuid.uuid1()
iter_reqs = parse_requirements("requirements/{}".format(dep), session=u)
- [pip.main(['install', (str(ir.req))]) for ir in iter_reqs]
+
+ for ir in iter_reqs:
+ check_call([sys.executable, '-m', 'pip', 'install', str(ir.req)])
def custom_command_driver(driver):
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -5,11 +5,12 @@\n from distutils.core import Command\n from setuptools import setup, find_packages\n from setuptools.command import install\n+from subprocess import check_call\n \n \n from pip.req import parse_requirements\n \n-import pip\n+import pip # noqa: test pip is installed\n import sys\n \n __author__ = 'David Barroso <[email protected]>'\n@@ -26,7 +27,9 @@\n print(\"PROCESSING DEPENDENCIES FOR {}\".format(dep))\n u = uuid.uuid1()\n iter_reqs = parse_requirements(\"requirements/{}\".format(dep), session=u)\n- [pip.main(['install', (str(ir.req))]) for ir in iter_reqs]\n+\n+ for ir in iter_reqs:\n+ check_call([sys.executable, '-m', 'pip', 'install', str(ir.req)])\n \n \n def custom_command_driver(driver):\n", "issue": "[2.0] \"pip3 install napalm\" doesn't install requirements\nDebian 9.2 (Stretch) with python3 v3.5.3, pip3 v9.0.1\r\n\r\nWith v1.2.0 a `pip3 install napalm==1.2.0` installs also the required modules (MarkupSafe, jinja2, netaddr, pyYAML, pyeapi, future, pynacl, bcrypt, paramiko, pyFG, scp, netmiko, lxml, pyIOSXR, ncclient, pyserial, junos-eznc, urllib3, idna, certifi, chardet, requests, pynxos, pan-python, requests-toolbelt, xmltodict, pyPluribus, chainmap, librouteros, vyattaconfparser).\r\n\r\nWith Napalm v2.0.0 no required module is installed with `pip3 install napalm`, so napalm won't work.\n", "before_files": [{"content": "\"\"\"setup.py file.\"\"\"\nimport uuid\nimport os\n\nfrom distutils.core import Command\nfrom setuptools import setup, find_packages\nfrom setuptools.command import install\n\n\nfrom pip.req import parse_requirements\n\nimport pip\nimport sys\n\n__author__ = 'David Barroso <[email protected]>'\n\n# Read SUPPORTED_DRIVERS from file (without importing)\n_locals = {}\nfilename = os.path.join('napalm', '_SUPPORTED_DRIVERS.py')\nwith open(filename) as supported:\n exec(supported.read(), None, _locals)\n SUPPORTED_DRIVERS = _locals['SUPPORTED_DRIVERS']\n\n\ndef process_requirements(dep):\n print(\"PROCESSING DEPENDENCIES FOR {}\".format(dep))\n u = uuid.uuid1()\n iter_reqs = parse_requirements(\"requirements/{}\".format(dep), session=u)\n [pip.main(['install', (str(ir.req))]) for ir in iter_reqs]\n\n\ndef custom_command_driver(driver):\n class CustomCommand(Command):\n \"\"\"A custom command to run Pylint on all Python source files.\"\"\"\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n \"\"\"Run command.\"\"\"\n process_requirements(driver)\n\n return CustomCommand\n\n\nclass CustomInstall(install.install):\n \"\"\"A custom command to run Pylint on all Python source files.\"\"\"\n\n def run(self):\n \"\"\"Run command.\"\"\"\n if any([d in sys.argv for d in SUPPORTED_DRIVERS]):\n process_requirements('base')\n else:\n process_requirements('all')\n install.install.run(self)\n\n\ncustom_commands = {d: custom_command_driver(d) for d in SUPPORTED_DRIVERS}\ncustom_commands['install'] = CustomInstall\n\nsetup(\n cmdclass=custom_commands,\n name=\"napalm\",\n version='2.0.0',\n packages=find_packages(exclude=(\"test*\", )),\n test_suite='test_base',\n author=\"David Barroso, Kirk Byers, Mircea Ulinic\",\n author_email=\"[email protected], [email protected], [email protected]\",\n description=\"Network Automation and Programmability Abstraction Layer with Multivendor support\",\n classifiers=[\n 'Topic :: Utilities',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS',\n ],\n url=\"https://github.com/napalm-automation/napalm\",\n include_package_data=True,\n install_requires=[],\n entry_points={\n 'console_scripts': [\n 'cl_napalm_configure=napalm.base.clitools.cl_napalm_configure:main',\n 'cl_napalm_test=napalm.base.clitools.cl_napalm_test:main',\n 'cl_napalm_validate=napalm.base.clitools.cl_napalm_validate:main',\n 'napalm=napalm.base.clitools.cl_napalm:main',\n ],\n }\n)\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"setup.py file.\"\"\"\nimport uuid\nimport os\n\nfrom distutils.core import Command\nfrom setuptools import setup, find_packages\nfrom setuptools.command import install\nfrom subprocess import check_call\n\n\nfrom pip.req import parse_requirements\n\nimport pip # noqa: test pip is installed\nimport sys\n\n__author__ = 'David Barroso <[email protected]>'\n\n# Read SUPPORTED_DRIVERS from file (without importing)\n_locals = {}\nfilename = os.path.join('napalm', '_SUPPORTED_DRIVERS.py')\nwith open(filename) as supported:\n exec(supported.read(), None, _locals)\n SUPPORTED_DRIVERS = _locals['SUPPORTED_DRIVERS']\n\n\ndef process_requirements(dep):\n print(\"PROCESSING DEPENDENCIES FOR {}\".format(dep))\n u = uuid.uuid1()\n iter_reqs = parse_requirements(\"requirements/{}\".format(dep), session=u)\n\n for ir in iter_reqs:\n check_call([sys.executable, '-m', 'pip', 'install', str(ir.req)])\n\n\ndef custom_command_driver(driver):\n class CustomCommand(Command):\n \"\"\"A custom command to run Pylint on all Python source files.\"\"\"\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n \"\"\"Run command.\"\"\"\n process_requirements(driver)\n\n return CustomCommand\n\n\nclass CustomInstall(install.install):\n \"\"\"A custom command to run Pylint on all Python source files.\"\"\"\n\n def run(self):\n \"\"\"Run command.\"\"\"\n if any([d in sys.argv for d in SUPPORTED_DRIVERS]):\n process_requirements('base')\n else:\n process_requirements('all')\n install.install.run(self)\n\n\ncustom_commands = {d: custom_command_driver(d) for d in SUPPORTED_DRIVERS}\ncustom_commands['install'] = CustomInstall\n\nsetup(\n cmdclass=custom_commands,\n name=\"napalm\",\n version='2.0.0',\n packages=find_packages(exclude=(\"test*\", )),\n test_suite='test_base',\n author=\"David Barroso, Kirk Byers, Mircea Ulinic\",\n author_email=\"[email protected], [email protected], [email protected]\",\n description=\"Network Automation and Programmability Abstraction Layer with Multivendor support\",\n classifiers=[\n 'Topic :: Utilities',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS',\n ],\n url=\"https://github.com/napalm-automation/napalm\",\n include_package_data=True,\n install_requires=[],\n entry_points={\n 'console_scripts': [\n 'cl_napalm_configure=napalm.base.clitools.cl_napalm_configure:main',\n 'cl_napalm_test=napalm.base.clitools.cl_napalm_test:main',\n 'cl_napalm_validate=napalm.base.clitools.cl_napalm_validate:main',\n 'napalm=napalm.base.clitools.cl_napalm:main',\n ],\n }\n)\n", "path": "setup.py"}]} | 1,366 | 216 |
gh_patches_debug_34856 | rasdani/github-patches | git_diff | streamlit__streamlit-1903 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Replace Exception in st.foo() with StreamlitAPIException
Main places we do this today:
graphviz_chart.py
map.py
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/streamlit/elements/graphviz_chart.py`
Content:
```
1 # Copyright 2018-2020 Streamlit Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Streamlit support for GraphViz charts."""
16
17 from streamlit import type_util
18 from streamlit.logger import get_logger
19 from streamlit.proto.GraphVizChart_pb2 import GraphVizChart as GraphVizChartProto
20
21 LOGGER = get_logger(__name__)
22
23
24 class GraphvizMixin:
25 def graphviz_chart(dg, figure_or_dot, width=0, height=0, use_container_width=False):
26 """Display a graph using the dagre-d3 library.
27
28 Parameters
29 ----------
30 figure_or_dot : graphviz.dot.Graph, graphviz.dot.Digraph, str
31 The Graphlib graph object or dot string to display
32
33 width : number
34 Deprecated. If != 0 (default), will show an alert.
35 From now on you should set the width directly in the Graphviz
36 spec. Please refer to the Graphviz documentation for details.
37
38 height : number
39 Deprecated. If != 0 (default), will show an alert.
40 From now on you should set the height directly in the Graphviz
41 spec. Please refer to the Graphviz documentation for details.
42
43 use_container_width : bool
44 If True, set the chart width to the column width. This takes
45 precedence over the figure's native `width` value.
46
47 Example
48 -------
49
50 >>> import streamlit as st
51 >>> import graphviz as graphviz
52 >>>
53 >>> # Create a graphlib graph object
54 >>> graph = graphviz.Digraph()
55 >>> graph.edge('run', 'intr')
56 >>> graph.edge('intr', 'runbl')
57 >>> graph.edge('runbl', 'run')
58 >>> graph.edge('run', 'kernel')
59 >>> graph.edge('kernel', 'zombie')
60 >>> graph.edge('kernel', 'sleep')
61 >>> graph.edge('kernel', 'runmem')
62 >>> graph.edge('sleep', 'swap')
63 >>> graph.edge('swap', 'runswap')
64 >>> graph.edge('runswap', 'new')
65 >>> graph.edge('runswap', 'runmem')
66 >>> graph.edge('new', 'runmem')
67 >>> graph.edge('sleep', 'runmem')
68 >>>
69 >>> st.graphviz_chart(graph)
70
71 Or you can render the chart from the graph using GraphViz's Dot
72 language:
73
74 >>> st.graphviz_chart('''
75 digraph {
76 run -> intr
77 intr -> runbl
78 runbl -> run
79 run -> kernel
80 kernel -> zombie
81 kernel -> sleep
82 kernel -> runmem
83 sleep -> swap
84 swap -> runswap
85 runswap -> new
86 runswap -> runmem
87 new -> runmem
88 sleep -> runmem
89 }
90 ''')
91
92 .. output::
93 https://share.streamlit.io/0.56.0-xTAd/index.html?id=GBn3GXZie5K1kXuBKe4yQL
94 height: 400px
95
96 """
97 if width != 0 and height != 0:
98 import streamlit as st
99
100 st.warning(
101 "The `width` and `height` arguments in `st.graphviz` are deprecated and will be removed on 2020-03-04"
102 )
103 elif width != 0:
104 import streamlit as st
105
106 st.warning(
107 "The `width` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04"
108 )
109 elif height != 0:
110 import streamlit as st
111
112 st.warning(
113 "The `height` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04"
114 )
115
116 graphviz_chart_proto = GraphVizChartProto()
117 marshall(graphviz_chart_proto, figure_or_dot, use_container_width)
118 return dg._enqueue("graphviz_chart", graphviz_chart_proto) # type: ignore
119
120
121 def marshall(proto, figure_or_dot, use_container_width):
122 """Construct a GraphViz chart object.
123
124 See DeltaGenerator.graphviz_chart for docs.
125 """
126
127 if type_util.is_graphviz_chart(figure_or_dot):
128 dot = figure_or_dot.source
129 elif isinstance(figure_or_dot, str):
130 dot = figure_or_dot
131 else:
132 raise Exception("Unhandled type for graphviz chart: %s" % type(figure_or_dot))
133
134 proto.spec = dot
135 proto.use_container_width = use_container_width
136
```
Path: `lib/streamlit/elements/map.py`
Content:
```
1 # Copyright 2018-2020 Streamlit Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """A wrapper for simple PyDeck scatter charts."""
16
17 import copy
18 import json
19 from typing import Any, Dict
20
21 import pandas as pd
22
23 from streamlit.proto.DeckGlJsonChart_pb2 import DeckGlJsonChart as DeckGlJsonChartProto
24 import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart
25
26
27 class MapMixin:
28 def map(dg, data=None, zoom=None, use_container_width=True):
29 """Display a map with points on it.
30
31 This is a wrapper around st.pydeck_chart to quickly create scatterplot
32 charts on top of a map, with auto-centering and auto-zoom.
33
34 When using this command, we advise all users to use a personal Mapbox
35 token. This ensures the map tiles used in this chart are more
36 robust. You can do this with the mapbox.token config option.
37
38 To get a token for yourself, create an account at
39 https://mapbox.com. It's free! (for moderate usage levels) See
40 https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more
41 info on how to set config options.
42
43 Parameters
44 ----------
45 data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict,
46 or None
47 The data to be plotted. Must have columns called 'lat', 'lon',
48 'latitude', or 'longitude'.
49 zoom : int
50 Zoom level as specified in
51 https://wiki.openstreetmap.org/wiki/Zoom_levels
52
53 Example
54 -------
55 >>> import pandas as pd
56 >>> import numpy as np
57 >>>
58 >>> df = pd.DataFrame(
59 ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4],
60 ... columns=['lat', 'lon'])
61 >>>
62 >>> st.map(df)
63
64 .. output::
65 https://share.streamlit.io/0.53.0-SULT/index.html?id=9gTiomqPEbvHY2huTLoQtH
66 height: 600px
67
68 """
69 map_proto = DeckGlJsonChartProto()
70 map_proto.json = to_deckgl_json(data, zoom)
71 map_proto.use_container_width = use_container_width
72 return dg._enqueue("deck_gl_json_chart", map_proto) # type: ignore
73
74
75 # Map used as the basis for st.map.
76 _DEFAULT_MAP = dict(deck_gl_json_chart.EMPTY_MAP) # type: Dict[str, Any]
77 _DEFAULT_MAP["mapStyle"] = "mapbox://styles/mapbox/light-v10"
78
79 # Other default parameters for st.map.
80 _DEFAULT_COLOR = [200, 30, 0, 160]
81 _ZOOM_LEVELS = [
82 360,
83 180,
84 90,
85 45,
86 22.5,
87 11.25,
88 5.625,
89 2.813,
90 1.406,
91 0.703,
92 0.352,
93 0.176,
94 0.088,
95 0.044,
96 0.022,
97 0.011,
98 0.005,
99 0.003,
100 0.001,
101 0.0005,
102 ]
103
104
105 def _get_zoom_level(distance):
106 """Get the zoom level for a given distance in degrees.
107
108 See https://wiki.openstreetmap.org/wiki/Zoom_levels for reference.
109
110 Parameters
111 ----------
112 distance : float
113 How many degrees of longitude should fit in the map.
114
115 Returns
116 -------
117 int
118 The zoom level, from 0 to 29.
119
120 """
121
122 for i in range(len(_ZOOM_LEVELS) - 1):
123 if _ZOOM_LEVELS[i + 1] < distance <= _ZOOM_LEVELS[i]:
124 return i
125
126
127 def to_deckgl_json(data, zoom):
128
129 if data is None or data.empty:
130 return json.dumps(_DEFAULT_MAP)
131
132 if "lat" in data:
133 lat = "lat"
134 elif "latitude" in data:
135 lat = "latitude"
136 else:
137 raise Exception('Map data must contain a column named "latitude" or "lat".')
138
139 if "lon" in data:
140 lon = "lon"
141 elif "longitude" in data:
142 lon = "longitude"
143 else:
144 raise Exception('Map data must contain a column called "longitude" or "lon".')
145
146 if data[lon].isnull().values.any() or data[lat].isnull().values.any():
147 raise Exception("Latitude and longitude data must be numeric.")
148
149 data = pd.DataFrame(data)
150
151 min_lat = data[lat].min()
152 max_lat = data[lat].max()
153 min_lon = data[lon].min()
154 max_lon = data[lon].max()
155 center_lat = (max_lat + min_lat) / 2.0
156 center_lon = (max_lon + min_lon) / 2.0
157 range_lon = abs(max_lon - min_lon)
158 range_lat = abs(max_lat - min_lat)
159
160 if zoom == None:
161 if range_lon > range_lat:
162 longitude_distance = range_lon
163 else:
164 longitude_distance = range_lat
165 zoom = _get_zoom_level(longitude_distance)
166
167 # "+1" because itertuples includes the row index.
168 lon_col_index = data.columns.get_loc(lon) + 1
169 lat_col_index = data.columns.get_loc(lat) + 1
170 final_data = []
171 for row in data.itertuples():
172 final_data.append(
173 {"lon": float(row[lon_col_index]), "lat": float(row[lat_col_index])}
174 )
175
176 default = copy.deepcopy(_DEFAULT_MAP)
177 default["initialViewState"]["latitude"] = center_lat
178 default["initialViewState"]["longitude"] = center_lon
179 default["initialViewState"]["zoom"] = zoom
180 default["layers"] = [
181 {
182 "@@type": "ScatterplotLayer",
183 "getPosition": "@@=[lon, lat]",
184 "getRadius": 10,
185 "radiusScale": 10,
186 "radiusMinPixels": 3,
187 "getFillColor": _DEFAULT_COLOR,
188 "data": final_data,
189 }
190 ]
191 return json.dumps(default)
192
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/streamlit/elements/graphviz_chart.py b/lib/streamlit/elements/graphviz_chart.py
--- a/lib/streamlit/elements/graphviz_chart.py
+++ b/lib/streamlit/elements/graphviz_chart.py
@@ -17,6 +17,7 @@
from streamlit import type_util
from streamlit.logger import get_logger
from streamlit.proto.GraphVizChart_pb2 import GraphVizChart as GraphVizChartProto
+from streamlit.errors import StreamlitAPIException
LOGGER = get_logger(__name__)
@@ -129,7 +130,9 @@
elif isinstance(figure_or_dot, str):
dot = figure_or_dot
else:
- raise Exception("Unhandled type for graphviz chart: %s" % type(figure_or_dot))
+ raise StreamlitAPIException(
+ "Unhandled type for graphviz chart: %s" % type(figure_or_dot)
+ )
proto.spec = dot
proto.use_container_width = use_container_width
diff --git a/lib/streamlit/elements/map.py b/lib/streamlit/elements/map.py
--- a/lib/streamlit/elements/map.py
+++ b/lib/streamlit/elements/map.py
@@ -22,6 +22,7 @@
from streamlit.proto.DeckGlJsonChart_pb2 import DeckGlJsonChart as DeckGlJsonChartProto
import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart
+from streamlit.errors import StreamlitAPIException
class MapMixin:
@@ -134,17 +135,21 @@
elif "latitude" in data:
lat = "latitude"
else:
- raise Exception('Map data must contain a column named "latitude" or "lat".')
+ raise StreamlitAPIException(
+ 'Map data must contain a column named "latitude" or "lat".'
+ )
if "lon" in data:
lon = "lon"
elif "longitude" in data:
lon = "longitude"
else:
- raise Exception('Map data must contain a column called "longitude" or "lon".')
+ raise StreamlitAPIException(
+ 'Map data must contain a column called "longitude" or "lon".'
+ )
if data[lon].isnull().values.any() or data[lat].isnull().values.any():
- raise Exception("Latitude and longitude data must be numeric.")
+ raise StreamlitAPIException("Latitude and longitude data must be numeric.")
data = pd.DataFrame(data)
| {"golden_diff": "diff --git a/lib/streamlit/elements/graphviz_chart.py b/lib/streamlit/elements/graphviz_chart.py\n--- a/lib/streamlit/elements/graphviz_chart.py\n+++ b/lib/streamlit/elements/graphviz_chart.py\n@@ -17,6 +17,7 @@\n from streamlit import type_util\n from streamlit.logger import get_logger\n from streamlit.proto.GraphVizChart_pb2 import GraphVizChart as GraphVizChartProto\n+from streamlit.errors import StreamlitAPIException\n \n LOGGER = get_logger(__name__)\n \n@@ -129,7 +130,9 @@\n elif isinstance(figure_or_dot, str):\n dot = figure_or_dot\n else:\n- raise Exception(\"Unhandled type for graphviz chart: %s\" % type(figure_or_dot))\n+ raise StreamlitAPIException(\n+ \"Unhandled type for graphviz chart: %s\" % type(figure_or_dot)\n+ )\n \n proto.spec = dot\n proto.use_container_width = use_container_width\ndiff --git a/lib/streamlit/elements/map.py b/lib/streamlit/elements/map.py\n--- a/lib/streamlit/elements/map.py\n+++ b/lib/streamlit/elements/map.py\n@@ -22,6 +22,7 @@\n \n from streamlit.proto.DeckGlJsonChart_pb2 import DeckGlJsonChart as DeckGlJsonChartProto\n import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart\n+from streamlit.errors import StreamlitAPIException\n \n \n class MapMixin:\n@@ -134,17 +135,21 @@\n elif \"latitude\" in data:\n lat = \"latitude\"\n else:\n- raise Exception('Map data must contain a column named \"latitude\" or \"lat\".')\n+ raise StreamlitAPIException(\n+ 'Map data must contain a column named \"latitude\" or \"lat\".'\n+ )\n \n if \"lon\" in data:\n lon = \"lon\"\n elif \"longitude\" in data:\n lon = \"longitude\"\n else:\n- raise Exception('Map data must contain a column called \"longitude\" or \"lon\".')\n+ raise StreamlitAPIException(\n+ 'Map data must contain a column called \"longitude\" or \"lon\".'\n+ )\n \n if data[lon].isnull().values.any() or data[lat].isnull().values.any():\n- raise Exception(\"Latitude and longitude data must be numeric.\")\n+ raise StreamlitAPIException(\"Latitude and longitude data must be numeric.\")\n \n data = pd.DataFrame(data)\n", "issue": "Replace Exception in st.foo() with StreamlitAPIException\nMain places we do this today:\r\ngraphviz_chart.py\r\nmap.py\n", "before_files": [{"content": "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Streamlit support for GraphViz charts.\"\"\"\n\nfrom streamlit import type_util\nfrom streamlit.logger import get_logger\nfrom streamlit.proto.GraphVizChart_pb2 import GraphVizChart as GraphVizChartProto\n\nLOGGER = get_logger(__name__)\n\n\nclass GraphvizMixin:\n def graphviz_chart(dg, figure_or_dot, width=0, height=0, use_container_width=False):\n \"\"\"Display a graph using the dagre-d3 library.\n\n Parameters\n ----------\n figure_or_dot : graphviz.dot.Graph, graphviz.dot.Digraph, str\n The Graphlib graph object or dot string to display\n\n width : number\n Deprecated. If != 0 (default), will show an alert.\n From now on you should set the width directly in the Graphviz\n spec. Please refer to the Graphviz documentation for details.\n\n height : number\n Deprecated. If != 0 (default), will show an alert.\n From now on you should set the height directly in the Graphviz\n spec. Please refer to the Graphviz documentation for details.\n\n use_container_width : bool\n If True, set the chart width to the column width. This takes\n precedence over the figure's native `width` value.\n\n Example\n -------\n\n >>> import streamlit as st\n >>> import graphviz as graphviz\n >>>\n >>> # Create a graphlib graph object\n >>> graph = graphviz.Digraph()\n >>> graph.edge('run', 'intr')\n >>> graph.edge('intr', 'runbl')\n >>> graph.edge('runbl', 'run')\n >>> graph.edge('run', 'kernel')\n >>> graph.edge('kernel', 'zombie')\n >>> graph.edge('kernel', 'sleep')\n >>> graph.edge('kernel', 'runmem')\n >>> graph.edge('sleep', 'swap')\n >>> graph.edge('swap', 'runswap')\n >>> graph.edge('runswap', 'new')\n >>> graph.edge('runswap', 'runmem')\n >>> graph.edge('new', 'runmem')\n >>> graph.edge('sleep', 'runmem')\n >>>\n >>> st.graphviz_chart(graph)\n\n Or you can render the chart from the graph using GraphViz's Dot\n language:\n\n >>> st.graphviz_chart('''\n digraph {\n run -> intr\n intr -> runbl\n runbl -> run\n run -> kernel\n kernel -> zombie\n kernel -> sleep\n kernel -> runmem\n sleep -> swap\n swap -> runswap\n runswap -> new\n runswap -> runmem\n new -> runmem\n sleep -> runmem\n }\n ''')\n\n .. output::\n https://share.streamlit.io/0.56.0-xTAd/index.html?id=GBn3GXZie5K1kXuBKe4yQL\n height: 400px\n\n \"\"\"\n if width != 0 and height != 0:\n import streamlit as st\n\n st.warning(\n \"The `width` and `height` arguments in `st.graphviz` are deprecated and will be removed on 2020-03-04\"\n )\n elif width != 0:\n import streamlit as st\n\n st.warning(\n \"The `width` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04\"\n )\n elif height != 0:\n import streamlit as st\n\n st.warning(\n \"The `height` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04\"\n )\n\n graphviz_chart_proto = GraphVizChartProto()\n marshall(graphviz_chart_proto, figure_or_dot, use_container_width)\n return dg._enqueue(\"graphviz_chart\", graphviz_chart_proto) # type: ignore\n\n\ndef marshall(proto, figure_or_dot, use_container_width):\n \"\"\"Construct a GraphViz chart object.\n\n See DeltaGenerator.graphviz_chart for docs.\n \"\"\"\n\n if type_util.is_graphviz_chart(figure_or_dot):\n dot = figure_or_dot.source\n elif isinstance(figure_or_dot, str):\n dot = figure_or_dot\n else:\n raise Exception(\"Unhandled type for graphviz chart: %s\" % type(figure_or_dot))\n\n proto.spec = dot\n proto.use_container_width = use_container_width\n", "path": "lib/streamlit/elements/graphviz_chart.py"}, {"content": "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A wrapper for simple PyDeck scatter charts.\"\"\"\n\nimport copy\nimport json\nfrom typing import Any, Dict\n\nimport pandas as pd\n\nfrom streamlit.proto.DeckGlJsonChart_pb2 import DeckGlJsonChart as DeckGlJsonChartProto\nimport streamlit.elements.deck_gl_json_chart as deck_gl_json_chart\n\n\nclass MapMixin:\n def map(dg, data=None, zoom=None, use_container_width=True):\n \"\"\"Display a map with points on it.\n\n This is a wrapper around st.pydeck_chart to quickly create scatterplot\n charts on top of a map, with auto-centering and auto-zoom.\n\n When using this command, we advise all users to use a personal Mapbox\n token. This ensures the map tiles used in this chart are more\n robust. You can do this with the mapbox.token config option.\n\n To get a token for yourself, create an account at\n https://mapbox.com. It's free! (for moderate usage levels) See\n https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more\n info on how to set config options.\n\n Parameters\n ----------\n data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict,\n or None\n The data to be plotted. Must have columns called 'lat', 'lon',\n 'latitude', or 'longitude'.\n zoom : int\n Zoom level as specified in\n https://wiki.openstreetmap.org/wiki/Zoom_levels\n\n Example\n -------\n >>> import pandas as pd\n >>> import numpy as np\n >>>\n >>> df = pd.DataFrame(\n ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4],\n ... columns=['lat', 'lon'])\n >>>\n >>> st.map(df)\n\n .. output::\n https://share.streamlit.io/0.53.0-SULT/index.html?id=9gTiomqPEbvHY2huTLoQtH\n height: 600px\n\n \"\"\"\n map_proto = DeckGlJsonChartProto()\n map_proto.json = to_deckgl_json(data, zoom)\n map_proto.use_container_width = use_container_width\n return dg._enqueue(\"deck_gl_json_chart\", map_proto) # type: ignore\n\n\n# Map used as the basis for st.map.\n_DEFAULT_MAP = dict(deck_gl_json_chart.EMPTY_MAP) # type: Dict[str, Any]\n_DEFAULT_MAP[\"mapStyle\"] = \"mapbox://styles/mapbox/light-v10\"\n\n# Other default parameters for st.map.\n_DEFAULT_COLOR = [200, 30, 0, 160]\n_ZOOM_LEVELS = [\n 360,\n 180,\n 90,\n 45,\n 22.5,\n 11.25,\n 5.625,\n 2.813,\n 1.406,\n 0.703,\n 0.352,\n 0.176,\n 0.088,\n 0.044,\n 0.022,\n 0.011,\n 0.005,\n 0.003,\n 0.001,\n 0.0005,\n]\n\n\ndef _get_zoom_level(distance):\n \"\"\"Get the zoom level for a given distance in degrees.\n\n See https://wiki.openstreetmap.org/wiki/Zoom_levels for reference.\n\n Parameters\n ----------\n distance : float\n How many degrees of longitude should fit in the map.\n\n Returns\n -------\n int\n The zoom level, from 0 to 29.\n\n \"\"\"\n\n for i in range(len(_ZOOM_LEVELS) - 1):\n if _ZOOM_LEVELS[i + 1] < distance <= _ZOOM_LEVELS[i]:\n return i\n\n\ndef to_deckgl_json(data, zoom):\n\n if data is None or data.empty:\n return json.dumps(_DEFAULT_MAP)\n\n if \"lat\" in data:\n lat = \"lat\"\n elif \"latitude\" in data:\n lat = \"latitude\"\n else:\n raise Exception('Map data must contain a column named \"latitude\" or \"lat\".')\n\n if \"lon\" in data:\n lon = \"lon\"\n elif \"longitude\" in data:\n lon = \"longitude\"\n else:\n raise Exception('Map data must contain a column called \"longitude\" or \"lon\".')\n\n if data[lon].isnull().values.any() or data[lat].isnull().values.any():\n raise Exception(\"Latitude and longitude data must be numeric.\")\n\n data = pd.DataFrame(data)\n\n min_lat = data[lat].min()\n max_lat = data[lat].max()\n min_lon = data[lon].min()\n max_lon = data[lon].max()\n center_lat = (max_lat + min_lat) / 2.0\n center_lon = (max_lon + min_lon) / 2.0\n range_lon = abs(max_lon - min_lon)\n range_lat = abs(max_lat - min_lat)\n\n if zoom == None:\n if range_lon > range_lat:\n longitude_distance = range_lon\n else:\n longitude_distance = range_lat\n zoom = _get_zoom_level(longitude_distance)\n\n # \"+1\" because itertuples includes the row index.\n lon_col_index = data.columns.get_loc(lon) + 1\n lat_col_index = data.columns.get_loc(lat) + 1\n final_data = []\n for row in data.itertuples():\n final_data.append(\n {\"lon\": float(row[lon_col_index]), \"lat\": float(row[lat_col_index])}\n )\n\n default = copy.deepcopy(_DEFAULT_MAP)\n default[\"initialViewState\"][\"latitude\"] = center_lat\n default[\"initialViewState\"][\"longitude\"] = center_lon\n default[\"initialViewState\"][\"zoom\"] = zoom\n default[\"layers\"] = [\n {\n \"@@type\": \"ScatterplotLayer\",\n \"getPosition\": \"@@=[lon, lat]\",\n \"getRadius\": 10,\n \"radiusScale\": 10,\n \"radiusMinPixels\": 3,\n \"getFillColor\": _DEFAULT_COLOR,\n \"data\": final_data,\n }\n ]\n return json.dumps(default)\n", "path": "lib/streamlit/elements/map.py"}], "after_files": [{"content": "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Streamlit support for GraphViz charts.\"\"\"\n\nfrom streamlit import type_util\nfrom streamlit.logger import get_logger\nfrom streamlit.proto.GraphVizChart_pb2 import GraphVizChart as GraphVizChartProto\nfrom streamlit.errors import StreamlitAPIException\n\nLOGGER = get_logger(__name__)\n\n\nclass GraphvizMixin:\n def graphviz_chart(dg, figure_or_dot, width=0, height=0, use_container_width=False):\n \"\"\"Display a graph using the dagre-d3 library.\n\n Parameters\n ----------\n figure_or_dot : graphviz.dot.Graph, graphviz.dot.Digraph, str\n The Graphlib graph object or dot string to display\n\n width : number\n Deprecated. If != 0 (default), will show an alert.\n From now on you should set the width directly in the Graphviz\n spec. Please refer to the Graphviz documentation for details.\n\n height : number\n Deprecated. If != 0 (default), will show an alert.\n From now on you should set the height directly in the Graphviz\n spec. Please refer to the Graphviz documentation for details.\n\n use_container_width : bool\n If True, set the chart width to the column width. This takes\n precedence over the figure's native `width` value.\n\n Example\n -------\n\n >>> import streamlit as st\n >>> import graphviz as graphviz\n >>>\n >>> # Create a graphlib graph object\n >>> graph = graphviz.Digraph()\n >>> graph.edge('run', 'intr')\n >>> graph.edge('intr', 'runbl')\n >>> graph.edge('runbl', 'run')\n >>> graph.edge('run', 'kernel')\n >>> graph.edge('kernel', 'zombie')\n >>> graph.edge('kernel', 'sleep')\n >>> graph.edge('kernel', 'runmem')\n >>> graph.edge('sleep', 'swap')\n >>> graph.edge('swap', 'runswap')\n >>> graph.edge('runswap', 'new')\n >>> graph.edge('runswap', 'runmem')\n >>> graph.edge('new', 'runmem')\n >>> graph.edge('sleep', 'runmem')\n >>>\n >>> st.graphviz_chart(graph)\n\n Or you can render the chart from the graph using GraphViz's Dot\n language:\n\n >>> st.graphviz_chart('''\n digraph {\n run -> intr\n intr -> runbl\n runbl -> run\n run -> kernel\n kernel -> zombie\n kernel -> sleep\n kernel -> runmem\n sleep -> swap\n swap -> runswap\n runswap -> new\n runswap -> runmem\n new -> runmem\n sleep -> runmem\n }\n ''')\n\n .. output::\n https://share.streamlit.io/0.56.0-xTAd/index.html?id=GBn3GXZie5K1kXuBKe4yQL\n height: 400px\n\n \"\"\"\n if width != 0 and height != 0:\n import streamlit as st\n\n st.warning(\n \"The `width` and `height` arguments in `st.graphviz` are deprecated and will be removed on 2020-03-04\"\n )\n elif width != 0:\n import streamlit as st\n\n st.warning(\n \"The `width` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04\"\n )\n elif height != 0:\n import streamlit as st\n\n st.warning(\n \"The `height` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04\"\n )\n\n graphviz_chart_proto = GraphVizChartProto()\n marshall(graphviz_chart_proto, figure_or_dot, use_container_width)\n return dg._enqueue(\"graphviz_chart\", graphviz_chart_proto) # type: ignore\n\n\ndef marshall(proto, figure_or_dot, use_container_width):\n \"\"\"Construct a GraphViz chart object.\n\n See DeltaGenerator.graphviz_chart for docs.\n \"\"\"\n\n if type_util.is_graphviz_chart(figure_or_dot):\n dot = figure_or_dot.source\n elif isinstance(figure_or_dot, str):\n dot = figure_or_dot\n else:\n raise StreamlitAPIException(\n \"Unhandled type for graphviz chart: %s\" % type(figure_or_dot)\n )\n\n proto.spec = dot\n proto.use_container_width = use_container_width\n", "path": "lib/streamlit/elements/graphviz_chart.py"}, {"content": "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A wrapper for simple PyDeck scatter charts.\"\"\"\n\nimport copy\nimport json\nfrom typing import Any, Dict\n\nimport pandas as pd\n\nfrom streamlit.proto.DeckGlJsonChart_pb2 import DeckGlJsonChart as DeckGlJsonChartProto\nimport streamlit.elements.deck_gl_json_chart as deck_gl_json_chart\nfrom streamlit.errors import StreamlitAPIException\n\n\nclass MapMixin:\n def map(dg, data=None, zoom=None, use_container_width=True):\n \"\"\"Display a map with points on it.\n\n This is a wrapper around st.pydeck_chart to quickly create scatterplot\n charts on top of a map, with auto-centering and auto-zoom.\n\n When using this command, we advise all users to use a personal Mapbox\n token. This ensures the map tiles used in this chart are more\n robust. You can do this with the mapbox.token config option.\n\n To get a token for yourself, create an account at\n https://mapbox.com. It's free! (for moderate usage levels) See\n https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more\n info on how to set config options.\n\n Parameters\n ----------\n data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict,\n or None\n The data to be plotted. Must have columns called 'lat', 'lon',\n 'latitude', or 'longitude'.\n zoom : int\n Zoom level as specified in\n https://wiki.openstreetmap.org/wiki/Zoom_levels\n\n Example\n -------\n >>> import pandas as pd\n >>> import numpy as np\n >>>\n >>> df = pd.DataFrame(\n ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4],\n ... columns=['lat', 'lon'])\n >>>\n >>> st.map(df)\n\n .. output::\n https://share.streamlit.io/0.53.0-SULT/index.html?id=9gTiomqPEbvHY2huTLoQtH\n height: 600px\n\n \"\"\"\n map_proto = DeckGlJsonChartProto()\n map_proto.json = to_deckgl_json(data, zoom)\n map_proto.use_container_width = use_container_width\n return dg._enqueue(\"deck_gl_json_chart\", map_proto) # type: ignore\n\n\n# Map used as the basis for st.map.\n_DEFAULT_MAP = dict(deck_gl_json_chart.EMPTY_MAP) # type: Dict[str, Any]\n_DEFAULT_MAP[\"mapStyle\"] = \"mapbox://styles/mapbox/light-v10\"\n\n# Other default parameters for st.map.\n_DEFAULT_COLOR = [200, 30, 0, 160]\n_ZOOM_LEVELS = [\n 360,\n 180,\n 90,\n 45,\n 22.5,\n 11.25,\n 5.625,\n 2.813,\n 1.406,\n 0.703,\n 0.352,\n 0.176,\n 0.088,\n 0.044,\n 0.022,\n 0.011,\n 0.005,\n 0.003,\n 0.001,\n 0.0005,\n]\n\n\ndef _get_zoom_level(distance):\n \"\"\"Get the zoom level for a given distance in degrees.\n\n See https://wiki.openstreetmap.org/wiki/Zoom_levels for reference.\n\n Parameters\n ----------\n distance : float\n How many degrees of longitude should fit in the map.\n\n Returns\n -------\n int\n The zoom level, from 0 to 29.\n\n \"\"\"\n\n for i in range(len(_ZOOM_LEVELS) - 1):\n if _ZOOM_LEVELS[i + 1] < distance <= _ZOOM_LEVELS[i]:\n return i\n\n\ndef to_deckgl_json(data, zoom):\n\n if data is None or data.empty:\n return json.dumps(_DEFAULT_MAP)\n\n if \"lat\" in data:\n lat = \"lat\"\n elif \"latitude\" in data:\n lat = \"latitude\"\n else:\n raise StreamlitAPIException(\n 'Map data must contain a column named \"latitude\" or \"lat\".'\n )\n\n if \"lon\" in data:\n lon = \"lon\"\n elif \"longitude\" in data:\n lon = \"longitude\"\n else:\n raise StreamlitAPIException(\n 'Map data must contain a column called \"longitude\" or \"lon\".'\n )\n\n if data[lon].isnull().values.any() or data[lat].isnull().values.any():\n raise StreamlitAPIException(\"Latitude and longitude data must be numeric.\")\n\n data = pd.DataFrame(data)\n\n min_lat = data[lat].min()\n max_lat = data[lat].max()\n min_lon = data[lon].min()\n max_lon = data[lon].max()\n center_lat = (max_lat + min_lat) / 2.0\n center_lon = (max_lon + min_lon) / 2.0\n range_lon = abs(max_lon - min_lon)\n range_lat = abs(max_lat - min_lat)\n\n if zoom == None:\n if range_lon > range_lat:\n longitude_distance = range_lon\n else:\n longitude_distance = range_lat\n zoom = _get_zoom_level(longitude_distance)\n\n # \"+1\" because itertuples includes the row index.\n lon_col_index = data.columns.get_loc(lon) + 1\n lat_col_index = data.columns.get_loc(lat) + 1\n final_data = []\n for row in data.itertuples():\n final_data.append(\n {\"lon\": float(row[lon_col_index]), \"lat\": float(row[lat_col_index])}\n )\n\n default = copy.deepcopy(_DEFAULT_MAP)\n default[\"initialViewState\"][\"latitude\"] = center_lat\n default[\"initialViewState\"][\"longitude\"] = center_lon\n default[\"initialViewState\"][\"zoom\"] = zoom\n default[\"layers\"] = [\n {\n \"@@type\": \"ScatterplotLayer\",\n \"getPosition\": \"@@=[lon, lat]\",\n \"getRadius\": 10,\n \"radiusScale\": 10,\n \"radiusMinPixels\": 3,\n \"getFillColor\": _DEFAULT_COLOR,\n \"data\": final_data,\n }\n ]\n return json.dumps(default)\n", "path": "lib/streamlit/elements/map.py"}]} | 3,742 | 550 |
gh_patches_debug_7728 | rasdani/github-patches | git_diff | comic__grand-challenge.org-838 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove unused CIRRUS environment variables
These variables are now deprecated.
https://github.com/comic/grand-challenge.org/blob/30875fd388f2ad14212cf57c0caa8b9efcba19d9/app/grandchallenge/workstations/models.py#L201
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/grandchallenge/workstations/models.py`
Content:
```
1 from datetime import timedelta, datetime
2 from urllib.parse import unquote, urljoin
3
4 from django.conf import settings
5 from django.core.validators import MaxValueValidator, RegexValidator
6 from django.db import models
7 from django_extensions.db.models import TitleSlugDescriptionModel
8 from rest_framework.authtoken.models import Token
9 from simple_history.models import HistoricalRecords
10
11 from grandchallenge.challenges.models import get_logo_path
12 from grandchallenge.container_exec.backends.docker import Service
13 from grandchallenge.container_exec.models import ContainerImageModel
14 from grandchallenge.container_exec.tasks import start_service, stop_service
15 from grandchallenge.core.models import UUIDModel
16 from grandchallenge.subdomains.utils import reverse
17
18 __doc__ = """
19 Workstations are used to view, annotate and upload images to grand challenge.
20 A `workstation admin` is able to upload a ``WorkstationImage``, which is a docker container image.
21 A ``WorkstationImage`` expose a http and, optionally, a websocket port.
22 A `workstation user` can then launch a workstation ``Session`` for a particular ``WorkstationImage``.
23
24 When a new session is started, a new container instance of the selected ``WorkstationImage`` is lauched on the docker host.
25 The connection to the container will be proxied, and only accessible to the user that created the session.
26 The proxy will map the http and websocket connections from the user to the running instance, which is mapped by the container hostname.
27 The container instance will have the users API token set in the environment, so that it is able to interact with the grand challenge API as this user.
28 The user is able to stop the container, otherwise it will be terminated after ``maxmium_duration`` is reached.
29 """
30
31
32 class Workstation(UUIDModel, TitleSlugDescriptionModel):
33 """ This model holds the title and description of a workstation. """
34
35 logo = models.ImageField(upload_to=get_logo_path)
36
37 @property
38 def latest_ready_image(self):
39 """
40 Returns
41 -------
42 The most recent container image for this workstation
43 """
44 return (
45 self.workstationimage_set.filter(ready=True)
46 .order_by("-created")
47 .first()
48 )
49
50 def __str__(self):
51 return f"Workstation {self.title}"
52
53 def get_absolute_url(self):
54 return reverse("workstations:detail", kwargs={"slug": self.slug})
55
56
57 class WorkstationImage(UUIDModel, ContainerImageModel):
58 """
59 A ``WorkstationImage`` is a docker container image of a workstation.
60
61 Parameters
62 ----------
63 workstation
64 A ``Workstation`` can have multiple ``WorkstationImage``, that
65 represent different versions of a workstation
66 http_port
67 This container will expose a http server on this port
68 websocket_port
69 This container will expose a websocket on this port. Any relative url
70 that starts with ``/mlab4d4c4142`` will be proxied to this port.
71 initial_path
72 The initial path that users will navigate to in order to load the
73 workstation
74 """
75
76 workstation = models.ForeignKey(Workstation, on_delete=models.CASCADE)
77 http_port = models.PositiveIntegerField(
78 default=8080, validators=[MaxValueValidator(2 ** 16 - 1)]
79 )
80 websocket_port = models.PositiveIntegerField(
81 default=4114, validators=[MaxValueValidator(2 ** 16 - 1)]
82 )
83 initial_path = models.CharField(
84 max_length=256,
85 default="Applications/GrandChallengeViewer/index.html",
86 blank=True,
87 validators=[
88 RegexValidator(
89 regex=r"^(?:[^/][^\s]*)\Z",
90 message="This path is invalid, it must not start with a /",
91 )
92 ],
93 )
94
95 def __str__(self):
96 return f"Workstation Image {self.pk}"
97
98 def get_absolute_url(self):
99 return reverse(
100 "workstations:image-detail",
101 kwargs={"slug": self.workstation.slug, "pk": self.pk},
102 )
103
104
105 class Session(UUIDModel):
106 """
107 Tracks who has launched workstation images. The ``WorkstationImage`` will
108 be launched as a ``Service``. The ``Session`` is responsible for starting
109 and stopping the ``Service``.
110
111 Parameters
112 ----------
113
114 status
115 Stores what has happened with the service, is it running, errored, etc?
116 creator
117 Who created the session? This is also the only user that should be able
118 to access the launched service.
119 workstation_image
120 The container image that will be launched by this ``Session``.
121 maximum_duration
122 The maximum time that the service can be active before it is terminated
123 user_finished
124 Indicates if the user has chosen to end the session early
125 history
126 The history of this Session
127 """
128
129 QUEUED = 0
130 STARTED = 1
131 RUNNING = 2
132 FAILED = 3
133 STOPPED = 4
134
135 # These should match the values in session.js
136 STATUS_CHOICES = (
137 (QUEUED, "Queued"),
138 (STARTED, "Started"),
139 (RUNNING, "Running"),
140 (FAILED, "Failed"),
141 (STOPPED, "Stopped"),
142 )
143
144 status = models.PositiveSmallIntegerField(
145 choices=STATUS_CHOICES, default=QUEUED
146 )
147 creator = models.ForeignKey(
148 settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL
149 )
150 workstation_image = models.ForeignKey(
151 WorkstationImage, on_delete=models.CASCADE
152 )
153 maximum_duration = models.DurationField(default=timedelta(minutes=10))
154 user_finished = models.BooleanField(default=False)
155 history = HistoricalRecords()
156
157 def __str__(self):
158 return f"Session {self.pk}"
159
160 @property
161 def task_kwargs(self) -> dict:
162 """
163 Returns
164 -------
165 The kwargs that need to be passed to celery to get this object
166 """
167 return {
168 "app_label": self._meta.app_label,
169 "model_name": self._meta.model_name,
170 "pk": self.pk,
171 }
172
173 @property
174 def hostname(self) -> str:
175 """
176 Returns
177 -------
178 The unique hostname for this session
179 """
180 return (
181 f"{self.pk}.{self._meta.model_name}.{self._meta.app_label}".lower()
182 )
183
184 @property
185 def expires_at(self) -> datetime:
186 """
187 Returns
188 -------
189 The time when this session expires.
190 """
191 return self.created + self.maximum_duration
192
193 @property
194 def environment(self) -> dict:
195 """
196 Returns
197 -------
198 The environment variables that should be set on the container.
199 """
200 env = {
201 "GRAND_CHALLENGE_PROXY_URL_MAPPINGS": "",
202 "GRAND_CHALLENGE_QUERY_IMAGE_URL": unquote(
203 reverse("api:image-detail", kwargs={"pk": "{key}"})
204 ),
205 }
206
207 if self.creator:
208 env.update(
209 {
210 "GRAND_CHALLENGE_AUTHORIZATION": f"TOKEN {Token.objects.get_or_create(user=self.creator)[0].key}"
211 }
212 )
213
214 if settings.DEBUG:
215 # Allow the container to communicate with the dev environment
216 env.update({"GRAND_CHALLENGE_UNSAFE": "True"})
217
218 return env
219
220 @property
221 def service(self) -> Service:
222 """
223 Returns
224 -------
225 The service for this session, could be active or inactive.
226 """
227 return Service(
228 job_id=self.pk,
229 job_model=f"{self._meta.app_label}-{self._meta.model_name}",
230 exec_image=self.workstation_image.image,
231 exec_image_sha256=self.workstation_image.image_sha256,
232 )
233
234 @property
235 def workstation_url(self) -> str:
236 """
237 Returns
238 -------
239 The url that users will use to access the workstation instance.
240 """
241 return urljoin(
242 self.get_absolute_url(), self.workstation_image.initial_path
243 )
244
245 def start(self) -> None:
246 """
247 Starts the service for this session, ensuring that the
248 ``workstation_image`` is ready to be used and that
249 ``WORKSTATIONS_MAXIMUM_SESSIONS`` has not been reached.
250
251 Raises
252 ------
253 RunTimeError
254 If the service cannot be started.
255
256 Returns
257 -------
258 """
259 try:
260 if not self.workstation_image.ready:
261 raise RuntimeError("Workstation image was not ready")
262
263 if (
264 Session.objects.all()
265 .filter(status__in=[Session.RUNNING, Session.STARTED])
266 .count()
267 >= settings.WORKSTATIONS_MAXIMUM_SESSIONS
268 ):
269 raise RuntimeError("Too many sessions are running")
270
271 self.service.start(
272 http_port=self.workstation_image.http_port,
273 websocket_port=self.workstation_image.websocket_port,
274 hostname=self.hostname,
275 environment=self.environment,
276 )
277 self.update_status(status=self.STARTED)
278 except RuntimeError:
279 self.update_status(status=self.FAILED)
280
281 def stop(self) -> None:
282 """
283 Stops the service for this session, cleaning up all of the containers.
284
285 Returns
286 -------
287 """
288 self.service.stop_and_cleanup()
289 self.update_status(status=self.STOPPED)
290
291 def update_status(self, *, status: STATUS_CHOICES) -> None:
292 """
293 Updates the status of this session.
294
295 Parameters
296 ----------
297 status
298 The new status for this session.
299
300 Returns
301 -------
302 """
303 self.status = status
304 self.save()
305
306 def get_absolute_url(self):
307 return reverse(
308 "workstations:session-detail",
309 kwargs={
310 "slug": self.workstation_image.workstation.slug,
311 "pk": self.pk,
312 },
313 )
314
315 def save(self, *args, **kwargs) -> None:
316 """
317 Saves the session instance, starting or stopping the service if needed.
318
319 Returns
320 -------
321 """
322 created = self._state.adding
323
324 super().save(*args, **kwargs)
325
326 if created:
327 start_service.apply_async(kwargs=self.task_kwargs)
328 elif self.user_finished and self.status != self.STOPPED:
329 stop_service.apply_async(kwargs=self.task_kwargs)
330
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/grandchallenge/workstations/models.py b/app/grandchallenge/workstations/models.py
--- a/app/grandchallenge/workstations/models.py
+++ b/app/grandchallenge/workstations/models.py
@@ -197,12 +197,7 @@
-------
The environment variables that should be set on the container.
"""
- env = {
- "GRAND_CHALLENGE_PROXY_URL_MAPPINGS": "",
- "GRAND_CHALLENGE_QUERY_IMAGE_URL": unquote(
- reverse("api:image-detail", kwargs={"pk": "{key}"})
- ),
- }
+ env = {"GRAND_CHALLENGE_API_ROOT": unquote(reverse("api:api-root"))}
if self.creator:
env.update(
| {"golden_diff": "diff --git a/app/grandchallenge/workstations/models.py b/app/grandchallenge/workstations/models.py\n--- a/app/grandchallenge/workstations/models.py\n+++ b/app/grandchallenge/workstations/models.py\n@@ -197,12 +197,7 @@\n -------\n The environment variables that should be set on the container.\n \"\"\"\n- env = {\n- \"GRAND_CHALLENGE_PROXY_URL_MAPPINGS\": \"\",\n- \"GRAND_CHALLENGE_QUERY_IMAGE_URL\": unquote(\n- reverse(\"api:image-detail\", kwargs={\"pk\": \"{key}\"})\n- ),\n- }\n+ env = {\"GRAND_CHALLENGE_API_ROOT\": unquote(reverse(\"api:api-root\"))}\n \n if self.creator:\n env.update(\n", "issue": "Remove unused CIRRUS environment variables\nThese variables are now deprecated.\r\n\r\nhttps://github.com/comic/grand-challenge.org/blob/30875fd388f2ad14212cf57c0caa8b9efcba19d9/app/grandchallenge/workstations/models.py#L201\n", "before_files": [{"content": "from datetime import timedelta, datetime\nfrom urllib.parse import unquote, urljoin\n\nfrom django.conf import settings\nfrom django.core.validators import MaxValueValidator, RegexValidator\nfrom django.db import models\nfrom django_extensions.db.models import TitleSlugDescriptionModel\nfrom rest_framework.authtoken.models import Token\nfrom simple_history.models import HistoricalRecords\n\nfrom grandchallenge.challenges.models import get_logo_path\nfrom grandchallenge.container_exec.backends.docker import Service\nfrom grandchallenge.container_exec.models import ContainerImageModel\nfrom grandchallenge.container_exec.tasks import start_service, stop_service\nfrom grandchallenge.core.models import UUIDModel\nfrom grandchallenge.subdomains.utils import reverse\n\n__doc__ = \"\"\"\nWorkstations are used to view, annotate and upload images to grand challenge.\nA `workstation admin` is able to upload a ``WorkstationImage``, which is a docker container image.\nA ``WorkstationImage`` expose a http and, optionally, a websocket port.\nA `workstation user` can then launch a workstation ``Session`` for a particular ``WorkstationImage``.\n\nWhen a new session is started, a new container instance of the selected ``WorkstationImage`` is lauched on the docker host.\nThe connection to the container will be proxied, and only accessible to the user that created the session.\nThe proxy will map the http and websocket connections from the user to the running instance, which is mapped by the container hostname.\nThe container instance will have the users API token set in the environment, so that it is able to interact with the grand challenge API as this user.\nThe user is able to stop the container, otherwise it will be terminated after ``maxmium_duration`` is reached.\n\"\"\"\n\n\nclass Workstation(UUIDModel, TitleSlugDescriptionModel):\n \"\"\" This model holds the title and description of a workstation. \"\"\"\n\n logo = models.ImageField(upload_to=get_logo_path)\n\n @property\n def latest_ready_image(self):\n \"\"\"\n Returns\n -------\n The most recent container image for this workstation\n \"\"\"\n return (\n self.workstationimage_set.filter(ready=True)\n .order_by(\"-created\")\n .first()\n )\n\n def __str__(self):\n return f\"Workstation {self.title}\"\n\n def get_absolute_url(self):\n return reverse(\"workstations:detail\", kwargs={\"slug\": self.slug})\n\n\nclass WorkstationImage(UUIDModel, ContainerImageModel):\n \"\"\"\n A ``WorkstationImage`` is a docker container image of a workstation.\n\n Parameters\n ----------\n workstation\n A ``Workstation`` can have multiple ``WorkstationImage``, that\n represent different versions of a workstation\n http_port\n This container will expose a http server on this port\n websocket_port\n This container will expose a websocket on this port. Any relative url\n that starts with ``/mlab4d4c4142`` will be proxied to this port.\n initial_path\n The initial path that users will navigate to in order to load the\n workstation\n \"\"\"\n\n workstation = models.ForeignKey(Workstation, on_delete=models.CASCADE)\n http_port = models.PositiveIntegerField(\n default=8080, validators=[MaxValueValidator(2 ** 16 - 1)]\n )\n websocket_port = models.PositiveIntegerField(\n default=4114, validators=[MaxValueValidator(2 ** 16 - 1)]\n )\n initial_path = models.CharField(\n max_length=256,\n default=\"Applications/GrandChallengeViewer/index.html\",\n blank=True,\n validators=[\n RegexValidator(\n regex=r\"^(?:[^/][^\\s]*)\\Z\",\n message=\"This path is invalid, it must not start with a /\",\n )\n ],\n )\n\n def __str__(self):\n return f\"Workstation Image {self.pk}\"\n\n def get_absolute_url(self):\n return reverse(\n \"workstations:image-detail\",\n kwargs={\"slug\": self.workstation.slug, \"pk\": self.pk},\n )\n\n\nclass Session(UUIDModel):\n \"\"\"\n Tracks who has launched workstation images. The ``WorkstationImage`` will\n be launched as a ``Service``. The ``Session`` is responsible for starting\n and stopping the ``Service``.\n\n Parameters\n ----------\n\n status\n Stores what has happened with the service, is it running, errored, etc?\n creator\n Who created the session? This is also the only user that should be able\n to access the launched service.\n workstation_image\n The container image that will be launched by this ``Session``.\n maximum_duration\n The maximum time that the service can be active before it is terminated\n user_finished\n Indicates if the user has chosen to end the session early\n history\n The history of this Session\n \"\"\"\n\n QUEUED = 0\n STARTED = 1\n RUNNING = 2\n FAILED = 3\n STOPPED = 4\n\n # These should match the values in session.js\n STATUS_CHOICES = (\n (QUEUED, \"Queued\"),\n (STARTED, \"Started\"),\n (RUNNING, \"Running\"),\n (FAILED, \"Failed\"),\n (STOPPED, \"Stopped\"),\n )\n\n status = models.PositiveSmallIntegerField(\n choices=STATUS_CHOICES, default=QUEUED\n )\n creator = models.ForeignKey(\n settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL\n )\n workstation_image = models.ForeignKey(\n WorkstationImage, on_delete=models.CASCADE\n )\n maximum_duration = models.DurationField(default=timedelta(minutes=10))\n user_finished = models.BooleanField(default=False)\n history = HistoricalRecords()\n\n def __str__(self):\n return f\"Session {self.pk}\"\n\n @property\n def task_kwargs(self) -> dict:\n \"\"\"\n Returns\n -------\n The kwargs that need to be passed to celery to get this object\n \"\"\"\n return {\n \"app_label\": self._meta.app_label,\n \"model_name\": self._meta.model_name,\n \"pk\": self.pk,\n }\n\n @property\n def hostname(self) -> str:\n \"\"\"\n Returns\n -------\n The unique hostname for this session\n \"\"\"\n return (\n f\"{self.pk}.{self._meta.model_name}.{self._meta.app_label}\".lower()\n )\n\n @property\n def expires_at(self) -> datetime:\n \"\"\"\n Returns\n -------\n The time when this session expires.\n \"\"\"\n return self.created + self.maximum_duration\n\n @property\n def environment(self) -> dict:\n \"\"\"\n Returns\n -------\n The environment variables that should be set on the container.\n \"\"\"\n env = {\n \"GRAND_CHALLENGE_PROXY_URL_MAPPINGS\": \"\",\n \"GRAND_CHALLENGE_QUERY_IMAGE_URL\": unquote(\n reverse(\"api:image-detail\", kwargs={\"pk\": \"{key}\"})\n ),\n }\n\n if self.creator:\n env.update(\n {\n \"GRAND_CHALLENGE_AUTHORIZATION\": f\"TOKEN {Token.objects.get_or_create(user=self.creator)[0].key}\"\n }\n )\n\n if settings.DEBUG:\n # Allow the container to communicate with the dev environment\n env.update({\"GRAND_CHALLENGE_UNSAFE\": \"True\"})\n\n return env\n\n @property\n def service(self) -> Service:\n \"\"\"\n Returns\n -------\n The service for this session, could be active or inactive.\n \"\"\"\n return Service(\n job_id=self.pk,\n job_model=f\"{self._meta.app_label}-{self._meta.model_name}\",\n exec_image=self.workstation_image.image,\n exec_image_sha256=self.workstation_image.image_sha256,\n )\n\n @property\n def workstation_url(self) -> str:\n \"\"\"\n Returns\n -------\n The url that users will use to access the workstation instance.\n \"\"\"\n return urljoin(\n self.get_absolute_url(), self.workstation_image.initial_path\n )\n\n def start(self) -> None:\n \"\"\"\n Starts the service for this session, ensuring that the\n ``workstation_image`` is ready to be used and that\n ``WORKSTATIONS_MAXIMUM_SESSIONS`` has not been reached.\n\n Raises\n ------\n RunTimeError\n If the service cannot be started.\n\n Returns\n -------\n \"\"\"\n try:\n if not self.workstation_image.ready:\n raise RuntimeError(\"Workstation image was not ready\")\n\n if (\n Session.objects.all()\n .filter(status__in=[Session.RUNNING, Session.STARTED])\n .count()\n >= settings.WORKSTATIONS_MAXIMUM_SESSIONS\n ):\n raise RuntimeError(\"Too many sessions are running\")\n\n self.service.start(\n http_port=self.workstation_image.http_port,\n websocket_port=self.workstation_image.websocket_port,\n hostname=self.hostname,\n environment=self.environment,\n )\n self.update_status(status=self.STARTED)\n except RuntimeError:\n self.update_status(status=self.FAILED)\n\n def stop(self) -> None:\n \"\"\"\n Stops the service for this session, cleaning up all of the containers.\n\n Returns\n -------\n \"\"\"\n self.service.stop_and_cleanup()\n self.update_status(status=self.STOPPED)\n\n def update_status(self, *, status: STATUS_CHOICES) -> None:\n \"\"\"\n Updates the status of this session.\n\n Parameters\n ----------\n status\n The new status for this session.\n\n Returns\n -------\n \"\"\"\n self.status = status\n self.save()\n\n def get_absolute_url(self):\n return reverse(\n \"workstations:session-detail\",\n kwargs={\n \"slug\": self.workstation_image.workstation.slug,\n \"pk\": self.pk,\n },\n )\n\n def save(self, *args, **kwargs) -> None:\n \"\"\"\n Saves the session instance, starting or stopping the service if needed.\n\n Returns\n -------\n \"\"\"\n created = self._state.adding\n\n super().save(*args, **kwargs)\n\n if created:\n start_service.apply_async(kwargs=self.task_kwargs)\n elif self.user_finished and self.status != self.STOPPED:\n stop_service.apply_async(kwargs=self.task_kwargs)\n", "path": "app/grandchallenge/workstations/models.py"}], "after_files": [{"content": "from datetime import timedelta, datetime\nfrom urllib.parse import unquote, urljoin\n\nfrom django.conf import settings\nfrom django.core.validators import MaxValueValidator, RegexValidator\nfrom django.db import models\nfrom django_extensions.db.models import TitleSlugDescriptionModel\nfrom rest_framework.authtoken.models import Token\nfrom simple_history.models import HistoricalRecords\n\nfrom grandchallenge.challenges.models import get_logo_path\nfrom grandchallenge.container_exec.backends.docker import Service\nfrom grandchallenge.container_exec.models import ContainerImageModel\nfrom grandchallenge.container_exec.tasks import start_service, stop_service\nfrom grandchallenge.core.models import UUIDModel\nfrom grandchallenge.subdomains.utils import reverse\n\n__doc__ = \"\"\"\nWorkstations are used to view, annotate and upload images to grand challenge.\nA `workstation admin` is able to upload a ``WorkstationImage``, which is a docker container image.\nA ``WorkstationImage`` expose a http and, optionally, a websocket port.\nA `workstation user` can then launch a workstation ``Session`` for a particular ``WorkstationImage``.\n\nWhen a new session is started, a new container instance of the selected ``WorkstationImage`` is lauched on the docker host.\nThe connection to the container will be proxied, and only accessible to the user that created the session.\nThe proxy will map the http and websocket connections from the user to the running instance, which is mapped by the container hostname.\nThe container instance will have the users API token set in the environment, so that it is able to interact with the grand challenge API as this user.\nThe user is able to stop the container, otherwise it will be terminated after ``maxmium_duration`` is reached.\n\"\"\"\n\n\nclass Workstation(UUIDModel, TitleSlugDescriptionModel):\n \"\"\" This model holds the title and description of a workstation. \"\"\"\n\n logo = models.ImageField(upload_to=get_logo_path)\n\n @property\n def latest_ready_image(self):\n \"\"\"\n Returns\n -------\n The most recent container image for this workstation\n \"\"\"\n return (\n self.workstationimage_set.filter(ready=True)\n .order_by(\"-created\")\n .first()\n )\n\n def __str__(self):\n return f\"Workstation {self.title}\"\n\n def get_absolute_url(self):\n return reverse(\"workstations:detail\", kwargs={\"slug\": self.slug})\n\n\nclass WorkstationImage(UUIDModel, ContainerImageModel):\n \"\"\"\n A ``WorkstationImage`` is a docker container image of a workstation.\n\n Parameters\n ----------\n workstation\n A ``Workstation`` can have multiple ``WorkstationImage``, that\n represent different versions of a workstation\n http_port\n This container will expose a http server on this port\n websocket_port\n This container will expose a websocket on this port. Any relative url\n that starts with ``/mlab4d4c4142`` will be proxied to this port.\n initial_path\n The initial path that users will navigate to in order to load the\n workstation\n \"\"\"\n\n workstation = models.ForeignKey(Workstation, on_delete=models.CASCADE)\n http_port = models.PositiveIntegerField(\n default=8080, validators=[MaxValueValidator(2 ** 16 - 1)]\n )\n websocket_port = models.PositiveIntegerField(\n default=4114, validators=[MaxValueValidator(2 ** 16 - 1)]\n )\n initial_path = models.CharField(\n max_length=256,\n default=\"Applications/GrandChallengeViewer/index.html\",\n blank=True,\n validators=[\n RegexValidator(\n regex=r\"^(?:[^/][^\\s]*)\\Z\",\n message=\"This path is invalid, it must not start with a /\",\n )\n ],\n )\n\n def __str__(self):\n return f\"Workstation Image {self.pk}\"\n\n def get_absolute_url(self):\n return reverse(\n \"workstations:image-detail\",\n kwargs={\"slug\": self.workstation.slug, \"pk\": self.pk},\n )\n\n\nclass Session(UUIDModel):\n \"\"\"\n Tracks who has launched workstation images. The ``WorkstationImage`` will\n be launched as a ``Service``. The ``Session`` is responsible for starting\n and stopping the ``Service``.\n\n Parameters\n ----------\n\n status\n Stores what has happened with the service, is it running, errored, etc?\n creator\n Who created the session? This is also the only user that should be able\n to access the launched service.\n workstation_image\n The container image that will be launched by this ``Session``.\n maximum_duration\n The maximum time that the service can be active before it is terminated\n user_finished\n Indicates if the user has chosen to end the session early\n history\n The history of this Session\n \"\"\"\n\n QUEUED = 0\n STARTED = 1\n RUNNING = 2\n FAILED = 3\n STOPPED = 4\n\n # These should match the values in session.js\n STATUS_CHOICES = (\n (QUEUED, \"Queued\"),\n (STARTED, \"Started\"),\n (RUNNING, \"Running\"),\n (FAILED, \"Failed\"),\n (STOPPED, \"Stopped\"),\n )\n\n status = models.PositiveSmallIntegerField(\n choices=STATUS_CHOICES, default=QUEUED\n )\n creator = models.ForeignKey(\n settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL\n )\n workstation_image = models.ForeignKey(\n WorkstationImage, on_delete=models.CASCADE\n )\n maximum_duration = models.DurationField(default=timedelta(minutes=10))\n user_finished = models.BooleanField(default=False)\n history = HistoricalRecords()\n\n def __str__(self):\n return f\"Session {self.pk}\"\n\n @property\n def task_kwargs(self) -> dict:\n \"\"\"\n Returns\n -------\n The kwargs that need to be passed to celery to get this object\n \"\"\"\n return {\n \"app_label\": self._meta.app_label,\n \"model_name\": self._meta.model_name,\n \"pk\": self.pk,\n }\n\n @property\n def hostname(self) -> str:\n \"\"\"\n Returns\n -------\n The unique hostname for this session\n \"\"\"\n return (\n f\"{self.pk}.{self._meta.model_name}.{self._meta.app_label}\".lower()\n )\n\n @property\n def expires_at(self) -> datetime:\n \"\"\"\n Returns\n -------\n The time when this session expires.\n \"\"\"\n return self.created + self.maximum_duration\n\n @property\n def environment(self) -> dict:\n \"\"\"\n Returns\n -------\n The environment variables that should be set on the container.\n \"\"\"\n env = {\"GRAND_CHALLENGE_API_ROOT\": unquote(reverse(\"api:api-root\"))}\n\n if self.creator:\n env.update(\n {\n \"GRAND_CHALLENGE_AUTHORIZATION\": f\"TOKEN {Token.objects.get_or_create(user=self.creator)[0].key}\"\n }\n )\n\n if settings.DEBUG:\n # Allow the container to communicate with the dev environment\n env.update({\"GRAND_CHALLENGE_UNSAFE\": \"True\"})\n\n return env\n\n @property\n def service(self) -> Service:\n \"\"\"\n Returns\n -------\n The service for this session, could be active or inactive.\n \"\"\"\n return Service(\n job_id=self.pk,\n job_model=f\"{self._meta.app_label}-{self._meta.model_name}\",\n exec_image=self.workstation_image.image,\n exec_image_sha256=self.workstation_image.image_sha256,\n )\n\n @property\n def workstation_url(self) -> str:\n \"\"\"\n Returns\n -------\n The url that users will use to access the workstation instance.\n \"\"\"\n return urljoin(\n self.get_absolute_url(), self.workstation_image.initial_path\n )\n\n def start(self) -> None:\n \"\"\"\n Starts the service for this session, ensuring that the\n ``workstation_image`` is ready to be used and that\n ``WORKSTATIONS_MAXIMUM_SESSIONS`` has not been reached.\n\n Raises\n ------\n RunTimeError\n If the service cannot be started.\n\n Returns\n -------\n \"\"\"\n try:\n if not self.workstation_image.ready:\n raise RuntimeError(\"Workstation image was not ready\")\n\n if (\n Session.objects.all()\n .filter(status__in=[Session.RUNNING, Session.STARTED])\n .count()\n >= settings.WORKSTATIONS_MAXIMUM_SESSIONS\n ):\n raise RuntimeError(\"Too many sessions are running\")\n\n self.service.start(\n http_port=self.workstation_image.http_port,\n websocket_port=self.workstation_image.websocket_port,\n hostname=self.hostname,\n environment=self.environment,\n )\n self.update_status(status=self.STARTED)\n except RuntimeError:\n self.update_status(status=self.FAILED)\n\n def stop(self) -> None:\n \"\"\"\n Stops the service for this session, cleaning up all of the containers.\n\n Returns\n -------\n \"\"\"\n self.service.stop_and_cleanup()\n self.update_status(status=self.STOPPED)\n\n def update_status(self, *, status: STATUS_CHOICES) -> None:\n \"\"\"\n Updates the status of this session.\n\n Parameters\n ----------\n status\n The new status for this session.\n\n Returns\n -------\n \"\"\"\n self.status = status\n self.save()\n\n def get_absolute_url(self):\n return reverse(\n \"workstations:session-detail\",\n kwargs={\n \"slug\": self.workstation_image.workstation.slug,\n \"pk\": self.pk,\n },\n )\n\n def save(self, *args, **kwargs) -> None:\n \"\"\"\n Saves the session instance, starting or stopping the service if needed.\n\n Returns\n -------\n \"\"\"\n created = self._state.adding\n\n super().save(*args, **kwargs)\n\n if created:\n start_service.apply_async(kwargs=self.task_kwargs)\n elif self.user_finished and self.status != self.STOPPED:\n stop_service.apply_async(kwargs=self.task_kwargs)\n", "path": "app/grandchallenge/workstations/models.py"}]} | 3,389 | 165 |
gh_patches_debug_66489 | rasdani/github-patches | git_diff | aio-libs__aiohttp-1752 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Encoding is always UTF-8 in POST data
## Long story short
I'm doing a `POST` request via `client.post`:
```
data = aiohttp.FormData({
'FindText': name,
}, charset='windows-1251')
client.post(base_url, params={'RowFrom': offset}, data=data)
```
where `name` contains some none-latin text (`'хан'`)
## Expected behaviour
POST data should contain: `FindText=%D5%E0%ED`
## Actual behaviour
`FindText=%D1%85%D0%B0%D0%BD'`
## Steps to reproduce
Looking through the code of `formdata.py:99`
```
urlencode(data, doseq=True).encode(charset),
```
I noticed, that `data` is urlencoded in UTF-8 first and then encoded to `windows-1251` (and that has no effect on `%D1...`).
For now, I just manually do in my code:
```
data = urlencode({
'FindText': name,
}, encoding='windows-1251')
```
And I get the string that I need.
Is it a bug? Or am I doing it wrong?
## Your environment
```
Python 3.6.0 (default, Jan 16 2017, 12:12:55)
[GCC 6.3.1 20170109] on linux
---
aiohttp==2.0.3
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `aiohttp/formdata.py`
Content:
```
1 import io
2 from urllib.parse import urlencode
3
4 from multidict import MultiDict, MultiDictProxy
5
6 from . import hdrs, multipart, payload
7 from .helpers import guess_filename
8
9 __all__ = ('FormData',)
10
11
12 class FormData:
13 """Helper class for multipart/form-data and
14 application/x-www-form-urlencoded body generation."""
15
16 def __init__(self, fields=(), quote_fields=True, charset=None):
17 self._writer = multipart.MultipartWriter('form-data')
18 self._fields = []
19 self._is_multipart = False
20 self._quote_fields = quote_fields
21 self._charset = charset
22
23 if isinstance(fields, dict):
24 fields = list(fields.items())
25 elif not isinstance(fields, (list, tuple)):
26 fields = (fields,)
27 self.add_fields(*fields)
28
29 @property
30 def is_multipart(self):
31 return self._is_multipart
32
33 def add_field(self, name, value, *, content_type=None, filename=None,
34 content_transfer_encoding=None):
35
36 if isinstance(value, io.IOBase):
37 self._is_multipart = True
38 elif isinstance(value, (bytes, bytearray, memoryview)):
39 if filename is None and content_transfer_encoding is None:
40 filename = name
41
42 type_options = MultiDict({'name': name})
43 if filename is not None and not isinstance(filename, str):
44 raise TypeError('filename must be an instance of str. '
45 'Got: %s' % filename)
46 if filename is None and isinstance(value, io.IOBase):
47 filename = guess_filename(value, name)
48 if filename is not None:
49 type_options['filename'] = filename
50 self._is_multipart = True
51
52 headers = {}
53 if content_type is not None:
54 if not isinstance(content_type, str):
55 raise TypeError('content_type must be an instance of str. '
56 'Got: %s' % content_type)
57 headers[hdrs.CONTENT_TYPE] = content_type
58 self._is_multipart = True
59 if content_transfer_encoding is not None:
60 if not isinstance(content_transfer_encoding, str):
61 raise TypeError('content_transfer_encoding must be an instance'
62 ' of str. Got: %s' % content_transfer_encoding)
63 headers[hdrs.CONTENT_TRANSFER_ENCODING] = content_transfer_encoding
64 self._is_multipart = True
65
66 self._fields.append((type_options, headers, value))
67
68 def add_fields(self, *fields):
69 to_add = list(fields)
70
71 while to_add:
72 rec = to_add.pop(0)
73
74 if isinstance(rec, io.IOBase):
75 k = guess_filename(rec, 'unknown')
76 self.add_field(k, rec)
77
78 elif isinstance(rec, (MultiDictProxy, MultiDict)):
79 to_add.extend(rec.items())
80
81 elif isinstance(rec, (list, tuple)) and len(rec) == 2:
82 k, fp = rec
83 self.add_field(k, fp)
84
85 else:
86 raise TypeError('Only io.IOBase, multidict and (name, file) '
87 'pairs allowed, use .add_field() for passing '
88 'more complex parameters, got {!r}'
89 .format(rec))
90
91 def _gen_form_urlencoded(self):
92 # form data (x-www-form-urlencoded)
93 data = []
94 for type_options, _, value in self._fields:
95 data.append((type_options['name'], value))
96
97 charset = self._charset if self._charset is not None else 'utf-8'
98 return payload.BytesPayload(
99 urlencode(data, doseq=True).encode(charset),
100 content_type='application/x-www-form-urlencoded')
101
102 def _gen_form_data(self):
103 """Encode a list of fields using the multipart/form-data MIME format"""
104 for dispparams, headers, value in self._fields:
105 try:
106 if hdrs.CONTENT_TYPE in headers:
107 part = payload.get_payload(
108 value, content_type=headers[hdrs.CONTENT_TYPE],
109 headers=headers, encoding=self._charset)
110 else:
111 part = payload.get_payload(
112 value, headers=headers, encoding=self._charset)
113 except Exception as exc:
114 raise TypeError(
115 'Can not serialize value type: %r\n '
116 'headers: %r\n value: %r' % (
117 type(value), headers, value)) from exc
118
119 if dispparams:
120 part.set_content_disposition(
121 'form-data', quote_fields=self._quote_fields, **dispparams
122 )
123 # FIXME cgi.FieldStorage doesn't likes body parts with
124 # Content-Length which were sent via chunked transfer encoding
125 part.headers.pop(hdrs.CONTENT_LENGTH, None)
126
127 self._writer.append_payload(part)
128
129 return self._writer
130
131 def __call__(self):
132 if self._is_multipart:
133 return self._gen_form_data()
134 else:
135 return self._gen_form_urlencoded()
136
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/aiohttp/formdata.py b/aiohttp/formdata.py
--- a/aiohttp/formdata.py
+++ b/aiohttp/formdata.py
@@ -96,7 +96,7 @@
charset = self._charset if self._charset is not None else 'utf-8'
return payload.BytesPayload(
- urlencode(data, doseq=True).encode(charset),
+ urlencode(data, doseq=True, encoding=charset).encode(),
content_type='application/x-www-form-urlencoded')
def _gen_form_data(self):
| {"golden_diff": "diff --git a/aiohttp/formdata.py b/aiohttp/formdata.py\n--- a/aiohttp/formdata.py\n+++ b/aiohttp/formdata.py\n@@ -96,7 +96,7 @@\n \n charset = self._charset if self._charset is not None else 'utf-8'\n return payload.BytesPayload(\n- urlencode(data, doseq=True).encode(charset),\n+ urlencode(data, doseq=True, encoding=charset).encode(),\n content_type='application/x-www-form-urlencoded')\n \n def _gen_form_data(self):\n", "issue": "Encoding is always UTF-8 in POST data\n## Long story short\r\n\r\nI'm doing a `POST` request via `client.post`:\r\n\r\n```\r\ndata = aiohttp.FormData({\r\n 'FindText': name,\r\n }, charset='windows-1251')\r\n\r\nclient.post(base_url, params={'RowFrom': offset}, data=data)\r\n```\r\n\r\nwhere `name` contains some none-latin text (`'\u0445\u0430\u043d'`)\r\n\r\n## Expected behaviour\r\n\r\nPOST data should contain: `FindText=%D5%E0%ED`\r\n\r\n## Actual behaviour\r\n\r\n`FindText=%D1%85%D0%B0%D0%BD'`\r\n\r\n## Steps to reproduce\r\n\r\nLooking through the code of `formdata.py:99`\r\n\r\n```\r\nurlencode(data, doseq=True).encode(charset),\r\n```\r\n\r\nI noticed, that `data` is urlencoded in UTF-8 first and then encoded to `windows-1251` (and that has no effect on `%D1...`).\r\n\r\nFor now, I just manually do in my code:\r\n\r\n```\r\ndata = urlencode({\r\n 'FindText': name,\r\n }, encoding='windows-1251')\r\n```\r\n\r\nAnd I get the string that I need.\r\n\r\nIs it a bug? Or am I doing it wrong?\r\n\r\n## Your environment\r\n\r\n```\r\nPython 3.6.0 (default, Jan 16 2017, 12:12:55) \r\n[GCC 6.3.1 20170109] on linux\r\n---\r\naiohttp==2.0.3\r\n```\r\n\n", "before_files": [{"content": "import io\nfrom urllib.parse import urlencode\n\nfrom multidict import MultiDict, MultiDictProxy\n\nfrom . import hdrs, multipart, payload\nfrom .helpers import guess_filename\n\n__all__ = ('FormData',)\n\n\nclass FormData:\n \"\"\"Helper class for multipart/form-data and\n application/x-www-form-urlencoded body generation.\"\"\"\n\n def __init__(self, fields=(), quote_fields=True, charset=None):\n self._writer = multipart.MultipartWriter('form-data')\n self._fields = []\n self._is_multipart = False\n self._quote_fields = quote_fields\n self._charset = charset\n\n if isinstance(fields, dict):\n fields = list(fields.items())\n elif not isinstance(fields, (list, tuple)):\n fields = (fields,)\n self.add_fields(*fields)\n\n @property\n def is_multipart(self):\n return self._is_multipart\n\n def add_field(self, name, value, *, content_type=None, filename=None,\n content_transfer_encoding=None):\n\n if isinstance(value, io.IOBase):\n self._is_multipart = True\n elif isinstance(value, (bytes, bytearray, memoryview)):\n if filename is None and content_transfer_encoding is None:\n filename = name\n\n type_options = MultiDict({'name': name})\n if filename is not None and not isinstance(filename, str):\n raise TypeError('filename must be an instance of str. '\n 'Got: %s' % filename)\n if filename is None and isinstance(value, io.IOBase):\n filename = guess_filename(value, name)\n if filename is not None:\n type_options['filename'] = filename\n self._is_multipart = True\n\n headers = {}\n if content_type is not None:\n if not isinstance(content_type, str):\n raise TypeError('content_type must be an instance of str. '\n 'Got: %s' % content_type)\n headers[hdrs.CONTENT_TYPE] = content_type\n self._is_multipart = True\n if content_transfer_encoding is not None:\n if not isinstance(content_transfer_encoding, str):\n raise TypeError('content_transfer_encoding must be an instance'\n ' of str. Got: %s' % content_transfer_encoding)\n headers[hdrs.CONTENT_TRANSFER_ENCODING] = content_transfer_encoding\n self._is_multipart = True\n\n self._fields.append((type_options, headers, value))\n\n def add_fields(self, *fields):\n to_add = list(fields)\n\n while to_add:\n rec = to_add.pop(0)\n\n if isinstance(rec, io.IOBase):\n k = guess_filename(rec, 'unknown')\n self.add_field(k, rec)\n\n elif isinstance(rec, (MultiDictProxy, MultiDict)):\n to_add.extend(rec.items())\n\n elif isinstance(rec, (list, tuple)) and len(rec) == 2:\n k, fp = rec\n self.add_field(k, fp)\n\n else:\n raise TypeError('Only io.IOBase, multidict and (name, file) '\n 'pairs allowed, use .add_field() for passing '\n 'more complex parameters, got {!r}'\n .format(rec))\n\n def _gen_form_urlencoded(self):\n # form data (x-www-form-urlencoded)\n data = []\n for type_options, _, value in self._fields:\n data.append((type_options['name'], value))\n\n charset = self._charset if self._charset is not None else 'utf-8'\n return payload.BytesPayload(\n urlencode(data, doseq=True).encode(charset),\n content_type='application/x-www-form-urlencoded')\n\n def _gen_form_data(self):\n \"\"\"Encode a list of fields using the multipart/form-data MIME format\"\"\"\n for dispparams, headers, value in self._fields:\n try:\n if hdrs.CONTENT_TYPE in headers:\n part = payload.get_payload(\n value, content_type=headers[hdrs.CONTENT_TYPE],\n headers=headers, encoding=self._charset)\n else:\n part = payload.get_payload(\n value, headers=headers, encoding=self._charset)\n except Exception as exc:\n raise TypeError(\n 'Can not serialize value type: %r\\n '\n 'headers: %r\\n value: %r' % (\n type(value), headers, value)) from exc\n\n if dispparams:\n part.set_content_disposition(\n 'form-data', quote_fields=self._quote_fields, **dispparams\n )\n # FIXME cgi.FieldStorage doesn't likes body parts with\n # Content-Length which were sent via chunked transfer encoding\n part.headers.pop(hdrs.CONTENT_LENGTH, None)\n\n self._writer.append_payload(part)\n\n return self._writer\n\n def __call__(self):\n if self._is_multipart:\n return self._gen_form_data()\n else:\n return self._gen_form_urlencoded()\n", "path": "aiohttp/formdata.py"}], "after_files": [{"content": "import io\nfrom urllib.parse import urlencode\n\nfrom multidict import MultiDict, MultiDictProxy\n\nfrom . import hdrs, multipart, payload\nfrom .helpers import guess_filename\n\n__all__ = ('FormData',)\n\n\nclass FormData:\n \"\"\"Helper class for multipart/form-data and\n application/x-www-form-urlencoded body generation.\"\"\"\n\n def __init__(self, fields=(), quote_fields=True, charset=None):\n self._writer = multipart.MultipartWriter('form-data')\n self._fields = []\n self._is_multipart = False\n self._quote_fields = quote_fields\n self._charset = charset\n\n if isinstance(fields, dict):\n fields = list(fields.items())\n elif not isinstance(fields, (list, tuple)):\n fields = (fields,)\n self.add_fields(*fields)\n\n @property\n def is_multipart(self):\n return self._is_multipart\n\n def add_field(self, name, value, *, content_type=None, filename=None,\n content_transfer_encoding=None):\n\n if isinstance(value, io.IOBase):\n self._is_multipart = True\n elif isinstance(value, (bytes, bytearray, memoryview)):\n if filename is None and content_transfer_encoding is None:\n filename = name\n\n type_options = MultiDict({'name': name})\n if filename is not None and not isinstance(filename, str):\n raise TypeError('filename must be an instance of str. '\n 'Got: %s' % filename)\n if filename is None and isinstance(value, io.IOBase):\n filename = guess_filename(value, name)\n if filename is not None:\n type_options['filename'] = filename\n self._is_multipart = True\n\n headers = {}\n if content_type is not None:\n if not isinstance(content_type, str):\n raise TypeError('content_type must be an instance of str. '\n 'Got: %s' % content_type)\n headers[hdrs.CONTENT_TYPE] = content_type\n self._is_multipart = True\n if content_transfer_encoding is not None:\n if not isinstance(content_transfer_encoding, str):\n raise TypeError('content_transfer_encoding must be an instance'\n ' of str. Got: %s' % content_transfer_encoding)\n headers[hdrs.CONTENT_TRANSFER_ENCODING] = content_transfer_encoding\n self._is_multipart = True\n\n self._fields.append((type_options, headers, value))\n\n def add_fields(self, *fields):\n to_add = list(fields)\n\n while to_add:\n rec = to_add.pop(0)\n\n if isinstance(rec, io.IOBase):\n k = guess_filename(rec, 'unknown')\n self.add_field(k, rec)\n\n elif isinstance(rec, (MultiDictProxy, MultiDict)):\n to_add.extend(rec.items())\n\n elif isinstance(rec, (list, tuple)) and len(rec) == 2:\n k, fp = rec\n self.add_field(k, fp)\n\n else:\n raise TypeError('Only io.IOBase, multidict and (name, file) '\n 'pairs allowed, use .add_field() for passing '\n 'more complex parameters, got {!r}'\n .format(rec))\n\n def _gen_form_urlencoded(self):\n # form data (x-www-form-urlencoded)\n data = []\n for type_options, _, value in self._fields:\n data.append((type_options['name'], value))\n\n charset = self._charset if self._charset is not None else 'utf-8'\n return payload.BytesPayload(\n urlencode(data, doseq=True, encoding=charset).encode(),\n content_type='application/x-www-form-urlencoded')\n\n def _gen_form_data(self):\n \"\"\"Encode a list of fields using the multipart/form-data MIME format\"\"\"\n for dispparams, headers, value in self._fields:\n try:\n if hdrs.CONTENT_TYPE in headers:\n part = payload.get_payload(\n value, content_type=headers[hdrs.CONTENT_TYPE],\n headers=headers, encoding=self._charset)\n else:\n part = payload.get_payload(\n value, headers=headers, encoding=self._charset)\n except Exception as exc:\n raise TypeError(\n 'Can not serialize value type: %r\\n '\n 'headers: %r\\n value: %r' % (\n type(value), headers, value)) from exc\n\n if dispparams:\n part.set_content_disposition(\n 'form-data', quote_fields=self._quote_fields, **dispparams\n )\n # FIXME cgi.FieldStorage doesn't likes body parts with\n # Content-Length which were sent via chunked transfer encoding\n part.headers.pop(hdrs.CONTENT_LENGTH, None)\n\n self._writer.append_payload(part)\n\n return self._writer\n\n def __call__(self):\n if self._is_multipart:\n return self._gen_form_data()\n else:\n return self._gen_form_urlencoded()\n", "path": "aiohttp/formdata.py"}]} | 1,943 | 121 |
gh_patches_debug_31145 | rasdani/github-patches | git_diff | archlinux__archinstall-408 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
_gfx_driver_packages not defined when choosing sway option (current master build)

_gfx_driver_packages not defined when choosing sway option (current master build)

_gfx_driver_packages not defined when choosing sway option (current master build)

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `profiles/sway.py`
Content:
```
1 # A desktop environment using "Sway"
2
3 import archinstall
4
5 is_top_level_profile = False
6
7 __packages__ = ["sway", "swaylock", "swayidle", "waybar", "dmenu", "light", "grim", "slurp", "pavucontrol", "alacritty"]
8
9 def _prep_function(*args, **kwargs):
10 """
11 Magic function called by the importing installer
12 before continuing any further. It also avoids executing any
13 other code in this stage. So it's a safe way to ask the user
14 for more input before any other installer steps start.
15 """
16 if "nvidia" in _gfx_driver_packages:
17 choice = input("The proprietary Nvidia driver is not supported by Sway. It is likely that you will run into issues. Continue anyways? [y/N] ")
18 if choice.lower() in ("n", ""):
19 raise archinstall.lib.exceptions.HardwareIncompatibilityError("Sway does not support the proprietary nvidia drivers.")
20
21 __builtins__['_gfx_driver_packages'] = archinstall.select_driver()
22
23 return True
24
25 # Ensures that this code only gets executed if executed
26 # through importlib.util.spec_from_file_location("sway", "/somewhere/sway.py")
27 # or through conventional import sway
28 if __name__ == 'sway':
29 # Install the Sway packages
30 installation.add_additional_packages(__packages__)
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/profiles/sway.py b/profiles/sway.py
--- a/profiles/sway.py
+++ b/profiles/sway.py
@@ -4,7 +4,19 @@
is_top_level_profile = False
-__packages__ = ["sway", "swaylock", "swayidle", "waybar", "dmenu", "light", "grim", "slurp", "pavucontrol", "alacritty"]
+__packages__ = [
+ "sway",
+ "swaylock",
+ "swayidle",
+ "waybar",
+ "dmenu",
+ "light",
+ "grim",
+ "slurp",
+ "pavucontrol",
+ "alacritty",
+]
+
def _prep_function(*args, **kwargs):
"""
@@ -13,18 +25,26 @@
other code in this stage. So it's a safe way to ask the user
for more input before any other installer steps start.
"""
- if "nvidia" in _gfx_driver_packages:
- choice = input("The proprietary Nvidia driver is not supported by Sway. It is likely that you will run into issues. Continue anyways? [y/N] ")
- if choice.lower() in ("n", ""):
- raise archinstall.lib.exceptions.HardwareIncompatibilityError("Sway does not support the proprietary nvidia drivers.")
-
- __builtins__['_gfx_driver_packages'] = archinstall.select_driver()
+ __builtins__["_gfx_driver_packages"] = archinstall.select_driver()
return True
+
# Ensures that this code only gets executed if executed
# through importlib.util.spec_from_file_location("sway", "/somewhere/sway.py")
# or through conventional import sway
-if __name__ == 'sway':
+if __name__ == "sway":
+ if "nvidia" in _gfx_driver_packages:
+ choice = input(
+ "The proprietary Nvidia driver is not supported by Sway. It is likely that you will run into issues. Continue anyways? [y/N] "
+ )
+ if choice.lower() in ("n", ""):
+ raise archinstall.lib.exceptions.HardwareIncompatibilityError(
+ "Sway does not support the proprietary nvidia drivers."
+ )
+
# Install the Sway packages
installation.add_additional_packages(__packages__)
+
+ # Install the graphics driver packages
+ installation.add_additional_packages(_gfx_driver_packages)
| {"golden_diff": "diff --git a/profiles/sway.py b/profiles/sway.py\n--- a/profiles/sway.py\n+++ b/profiles/sway.py\n@@ -4,7 +4,19 @@\n \n is_top_level_profile = False\n \n-__packages__ = [\"sway\", \"swaylock\", \"swayidle\", \"waybar\", \"dmenu\", \"light\", \"grim\", \"slurp\", \"pavucontrol\", \"alacritty\"]\n+__packages__ = [\n+\t\"sway\",\n+\t\"swaylock\",\n+\t\"swayidle\",\n+\t\"waybar\",\n+\t\"dmenu\",\n+\t\"light\",\n+\t\"grim\",\n+\t\"slurp\",\n+\t\"pavucontrol\",\n+\t\"alacritty\",\n+]\n+\n \n def _prep_function(*args, **kwargs):\n \t\"\"\"\n@@ -13,18 +25,26 @@\n \tother code in this stage. So it's a safe way to ask the user\n \tfor more input before any other installer steps start.\n \t\"\"\"\n-\tif \"nvidia\" in _gfx_driver_packages:\n-\t\tchoice = input(\"The proprietary Nvidia driver is not supported by Sway. It is likely that you will run into issues. Continue anyways? [y/N] \")\n-\t\tif choice.lower() in (\"n\", \"\"):\n-\t\t\traise archinstall.lib.exceptions.HardwareIncompatibilityError(\"Sway does not support the proprietary nvidia drivers.\")\n-\n-\t__builtins__['_gfx_driver_packages'] = archinstall.select_driver()\n+\t__builtins__[\"_gfx_driver_packages\"] = archinstall.select_driver()\n \n \treturn True\n \n+\n # Ensures that this code only gets executed if executed\n # through importlib.util.spec_from_file_location(\"sway\", \"/somewhere/sway.py\")\n # or through conventional import sway\n-if __name__ == 'sway':\n+if __name__ == \"sway\":\n+\tif \"nvidia\" in _gfx_driver_packages:\n+\t\tchoice = input(\n+\t\t\t\"The proprietary Nvidia driver is not supported by Sway. It is likely that you will run into issues. Continue anyways? [y/N] \"\n+\t\t)\n+\t\tif choice.lower() in (\"n\", \"\"):\n+\t\t\traise archinstall.lib.exceptions.HardwareIncompatibilityError(\n+\t\t\t\t\"Sway does not support the proprietary nvidia drivers.\"\n+\t\t\t)\n+\n \t# Install the Sway packages\n \tinstallation.add_additional_packages(__packages__)\n+\n+\t# Install the graphics driver packages\n+\tinstallation.add_additional_packages(_gfx_driver_packages)\n", "issue": "_gfx_driver_packages not defined when choosing sway option (current master build)\n\r\n\n_gfx_driver_packages not defined when choosing sway option (current master build)\n\r\n\n_gfx_driver_packages not defined when choosing sway option (current master build)\n\r\n\n", "before_files": [{"content": "# A desktop environment using \"Sway\"\n\nimport archinstall\n\nis_top_level_profile = False\n\n__packages__ = [\"sway\", \"swaylock\", \"swayidle\", \"waybar\", \"dmenu\", \"light\", \"grim\", \"slurp\", \"pavucontrol\", \"alacritty\"]\n\ndef _prep_function(*args, **kwargs):\n\t\"\"\"\n\tMagic function called by the importing installer\n\tbefore continuing any further. It also avoids executing any\n\tother code in this stage. So it's a safe way to ask the user\n\tfor more input before any other installer steps start.\n\t\"\"\"\n\tif \"nvidia\" in _gfx_driver_packages:\n\t\tchoice = input(\"The proprietary Nvidia driver is not supported by Sway. It is likely that you will run into issues. Continue anyways? [y/N] \")\n\t\tif choice.lower() in (\"n\", \"\"):\n\t\t\traise archinstall.lib.exceptions.HardwareIncompatibilityError(\"Sway does not support the proprietary nvidia drivers.\")\n\n\t__builtins__['_gfx_driver_packages'] = archinstall.select_driver()\n\n\treturn True\n\n# Ensures that this code only gets executed if executed\n# through importlib.util.spec_from_file_location(\"sway\", \"/somewhere/sway.py\")\n# or through conventional import sway\nif __name__ == 'sway':\n\t# Install the Sway packages\n\tinstallation.add_additional_packages(__packages__)\n", "path": "profiles/sway.py"}], "after_files": [{"content": "# A desktop environment using \"Sway\"\n\nimport archinstall\n\nis_top_level_profile = False\n\n__packages__ = [\n\t\"sway\",\n\t\"swaylock\",\n\t\"swayidle\",\n\t\"waybar\",\n\t\"dmenu\",\n\t\"light\",\n\t\"grim\",\n\t\"slurp\",\n\t\"pavucontrol\",\n\t\"alacritty\",\n]\n\n\ndef _prep_function(*args, **kwargs):\n\t\"\"\"\n\tMagic function called by the importing installer\n\tbefore continuing any further. It also avoids executing any\n\tother code in this stage. So it's a safe way to ask the user\n\tfor more input before any other installer steps start.\n\t\"\"\"\n\t__builtins__[\"_gfx_driver_packages\"] = archinstall.select_driver()\n\n\treturn True\n\n\n# Ensures that this code only gets executed if executed\n# through importlib.util.spec_from_file_location(\"sway\", \"/somewhere/sway.py\")\n# or through conventional import sway\nif __name__ == \"sway\":\n\tif \"nvidia\" in _gfx_driver_packages:\n\t\tchoice = input(\n\t\t\t\"The proprietary Nvidia driver is not supported by Sway. It is likely that you will run into issues. Continue anyways? [y/N] \"\n\t\t)\n\t\tif choice.lower() in (\"n\", \"\"):\n\t\t\traise archinstall.lib.exceptions.HardwareIncompatibilityError(\n\t\t\t\t\"Sway does not support the proprietary nvidia drivers.\"\n\t\t\t)\n\n\t# Install the Sway packages\n\tinstallation.add_additional_packages(__packages__)\n\n\t# Install the graphics driver packages\n\tinstallation.add_additional_packages(_gfx_driver_packages)\n", "path": "profiles/sway.py"}]} | 928 | 558 |
gh_patches_debug_1625 | rasdani/github-patches | git_diff | Lightning-AI__pytorch-lightning-799 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Optional dependencies are required for deprecated logging module
🐛 Bug
There is a backwards compatibility issues coming from PR #767. Notably, if a user doesn't have any of the extra logging dependencies then they'll be an import error.
### To Reproduce
1. Remove all logging dependencies from your environment (E.g. comet)
2. Depend on the deprecated pytorch_lightning.logging package and run
### Expected behavior
We expect to maintain backwards compatibility here so optional dependencies shouldn't be required.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pytorch_lightning/logging/__init__.py`
Content:
```
1 """
2 .. warning:: `logging` package has been renamed to `loggers` since v0.6.1 and will be removed in v0.8.0
3 """
4
5 import warnings
6
7 warnings.warn("`logging` package has been renamed to `loggers` since v0.6.1"
8 " and will be removed in v0.8.0", DeprecationWarning)
9
10 from pytorch_lightning.loggers import * # noqa: F403
11 from pytorch_lightning.loggers import ( # noqa: E402
12 base, comet, mlflow, neptune, tensorboard, test_tube, wandb
13 )
14
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pytorch_lightning/logging/__init__.py b/pytorch_lightning/logging/__init__.py
--- a/pytorch_lightning/logging/__init__.py
+++ b/pytorch_lightning/logging/__init__.py
@@ -8,6 +8,3 @@
" and will be removed in v0.8.0", DeprecationWarning)
from pytorch_lightning.loggers import * # noqa: F403
-from pytorch_lightning.loggers import ( # noqa: E402
- base, comet, mlflow, neptune, tensorboard, test_tube, wandb
-)
| {"golden_diff": "diff --git a/pytorch_lightning/logging/__init__.py b/pytorch_lightning/logging/__init__.py\n--- a/pytorch_lightning/logging/__init__.py\n+++ b/pytorch_lightning/logging/__init__.py\n@@ -8,6 +8,3 @@\n \" and will be removed in v0.8.0\", DeprecationWarning)\n \n from pytorch_lightning.loggers import * # noqa: F403\n-from pytorch_lightning.loggers import ( # noqa: E402\n- base, comet, mlflow, neptune, tensorboard, test_tube, wandb\n-)\n", "issue": "Optional dependencies are required for deprecated logging module\n\ud83d\udc1b Bug\r\n\r\nThere is a backwards compatibility issues coming from PR #767. Notably, if a user doesn't have any of the extra logging dependencies then they'll be an import error.\r\n\r\n### To Reproduce\r\n\r\n1. Remove all logging dependencies from your environment (E.g. comet)\r\n2. Depend on the deprecated pytorch_lightning.logging package and run\r\n\r\n### Expected behavior\r\n\r\nWe expect to maintain backwards compatibility here so optional dependencies shouldn't be required.\n", "before_files": [{"content": "\"\"\"\n.. warning:: `logging` package has been renamed to `loggers` since v0.6.1 and will be removed in v0.8.0\n\"\"\"\n\nimport warnings\n\nwarnings.warn(\"`logging` package has been renamed to `loggers` since v0.6.1\"\n \" and will be removed in v0.8.0\", DeprecationWarning)\n\nfrom pytorch_lightning.loggers import * # noqa: F403\nfrom pytorch_lightning.loggers import ( # noqa: E402\n base, comet, mlflow, neptune, tensorboard, test_tube, wandb\n)\n", "path": "pytorch_lightning/logging/__init__.py"}], "after_files": [{"content": "\"\"\"\n.. warning:: `logging` package has been renamed to `loggers` since v0.6.1 and will be removed in v0.8.0\n\"\"\"\n\nimport warnings\n\nwarnings.warn(\"`logging` package has been renamed to `loggers` since v0.6.1\"\n \" and will be removed in v0.8.0\", DeprecationWarning)\n\nfrom pytorch_lightning.loggers import * # noqa: F403\n", "path": "pytorch_lightning/logging/__init__.py"}]} | 526 | 138 |
gh_patches_debug_6678 | rasdani/github-patches | git_diff | psychopy__psychopy-2734 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[bug] visual.Rect component does not recalculate vertices after change of size, width and height properties
Which results in inability to update width and height of Rect during main loop. I have noticed that Rect class was updated (commit from 14 october), but this update made it unusable. Fix is simple, update vertices after setting size and set self._needVertexUpdate to True to enable redrawing updated shape.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `psychopy/visual/rect.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """Creates a rectangle of given width and height as a special case of a
5 :class:`~psychopy.visual.ShapeStim`"""
6
7 # Part of the PsychoPy library
8 # Copyright (C) 2002-2018 Jonathan Peirce (C) 2019 Open Science Tools Ltd.
9 # Distributed under the terms of the GNU General Public License (GPL).
10
11 from __future__ import absolute_import, print_function
12
13 import numpy as np
14
15 import psychopy # so we can get the __path__
16 from psychopy.visual.shape import BaseShapeStim
17 from psychopy.tools.attributetools import attributeSetter, setAttribute
18
19
20 class Rect(BaseShapeStim):
21 """Creates a rectangle of given width and height as a special case of a
22 :class:`~psychopy.visual.ShapeStim`
23
24 (New in version 1.72.00)
25
26 Attributes
27 ----------
28 width, height : float or int
29 The width and height of the rectangle. Values are aliased with fields
30 in the `size` attribute. Use these values to adjust the size of the
31 rectangle in a single dimension after initialization.
32
33 """
34 def __init__(self,
35 win,
36 width=.5,
37 height=.5,
38 autoLog=None,
39 units='',
40 lineWidth=1.5,
41 lineColor='white',
42 lineColorSpace='rgb',
43 fillColor=None,
44 fillColorSpace='rgb',
45 pos=(0, 0),
46 size=None,
47 ori=0.0,
48 opacity=1.0,
49 contrast=1.0,
50 depth=0,
51 interpolate=True,
52 name=None,
53 autoDraw=False):
54 """
55 Parameters
56 ----------
57 win : `~psychopy.visual.Window`
58 Window object to be associated with this stimuli.
59 width, height : float or int
60 The width and height of the rectangle. *DEPRECATED* use `size`
61 to define the dimensions of the rectangle on initialization. If
62 `size` is specified the values of `width` and `height` are
63 ignored. This is to provide legacy compatibility for existing
64 applications.
65 size : array_like, float or int
66 Width and height of the rectangle as (w, h) or [w, h]. If a single
67 value is provided, the width and height will be set to the same
68 specified value. If `None` is specified, the `size` will be set
69 with values passed to `width` and `height`.
70
71 """
72 # width and height attributes, these are later aliased with `size`
73 self.__dict__['width'] = float(width)
74 self.__dict__['height'] = float(height)
75
76 # If the size argument was specified, override values of width and
77 # height, this is to maintain legacy compatibility. Args width and
78 # height should be deprecated in later releases.
79 if size is None:
80 size = (self.__dict__['width'],
81 self.__dict__['height'])
82
83 # vertices for rectangle, CCW winding order
84 vertices = np.array([[-.5, .5],
85 [ .5, .5],
86 [ .5, -.5],
87 [-.5, -.5]])
88
89 super(Rect, self).__init__(
90 win,
91 units=units,
92 lineWidth=lineWidth,
93 lineColor=lineColor,
94 lineColorSpace=lineColorSpace,
95 fillColor=fillColor,
96 fillColorSpace=fillColorSpace,
97 vertices=vertices,
98 closeShape=True,
99 pos=pos,
100 size=size,
101 ori=ori,
102 opacity=opacity,
103 contrast=contrast,
104 depth=depth,
105 interpolate=interpolate,
106 name=name,
107 autoLog=autoLog,
108 autoDraw=autoDraw)
109
110 @attributeSetter
111 def size(self, value):
112 """array-like.
113 Size of the rectangle (`width` and `height`).
114 """
115 # Needed to override `size` to ensure `width` and `height` attrs
116 # are updated when it changes.
117 self.__dict__['size'] = np.array(value, float)
118
119 width, height = self.__dict__['size']
120 self.__dict__['width'] = width
121 self.__dict__['height'] = height
122
123 def setSize(self, size, operation='', log=None):
124 """Usually you can use 'stim.attribute = value' syntax instead,
125 but use this method if you need to suppress the log message
126
127 :ref:`Operations <attrib-operations>` supported.
128 """
129 setAttribute(self, 'size', size, log, operation)
130
131 @attributeSetter
132 def width(self, value):
133 """int or float.
134 Width of the Rectangle (in its respective units, if specified).
135
136 :ref:`Operations <attrib-operations>` supported.
137 """
138 self.__dict__['width'] = float(value)
139 self.size = (self.__dict__['width'], self.size[1])
140
141 def setWidth(self, width, operation='', log=None):
142 """Usually you can use 'stim.attribute = value' syntax instead,
143 but use this method if you need to suppress the log message
144 """
145 setAttribute(self, 'width', width, log, operation)
146
147 @attributeSetter
148 def height(self, value):
149 """int or float.
150 Height of the Rectangle (in its respective units, if specified).
151
152 :ref:`Operations <attrib-operations>` supported.
153 """
154 self.__dict__['height'] = float(value)
155 self.size = (self.size[0], self.__dict__['height'])
156
157 def setHeight(self, height, operation='', log=None):
158 """Usually you can use 'stim.attribute = value' syntax instead,
159 but use this method if you need to suppress the log message
160 """
161 setAttribute(self, 'height', height, log, operation)
162
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/psychopy/visual/rect.py b/psychopy/visual/rect.py
--- a/psychopy/visual/rect.py
+++ b/psychopy/visual/rect.py
@@ -120,6 +120,8 @@
self.__dict__['width'] = width
self.__dict__['height'] = height
+ self._needVertexUpdate = True
+
def setSize(self, size, operation='', log=None):
"""Usually you can use 'stim.attribute = value' syntax instead,
but use this method if you need to suppress the log message
| {"golden_diff": "diff --git a/psychopy/visual/rect.py b/psychopy/visual/rect.py\n--- a/psychopy/visual/rect.py\n+++ b/psychopy/visual/rect.py\n@@ -120,6 +120,8 @@\n self.__dict__['width'] = width\n self.__dict__['height'] = height\n \n+ self._needVertexUpdate = True\n+\n def setSize(self, size, operation='', log=None):\n \"\"\"Usually you can use 'stim.attribute = value' syntax instead,\n but use this method if you need to suppress the log message\n", "issue": "[bug] visual.Rect component does not recalculate vertices after change of size, width and height properties\nWhich results in inability to update width and height of Rect during main loop. I have noticed that Rect class was updated (commit from 14 october), but this update made it unusable. Fix is simple, update vertices after setting size and set self._needVertexUpdate to True to enable redrawing updated shape.\r\n \n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Creates a rectangle of given width and height as a special case of a\n:class:`~psychopy.visual.ShapeStim`\"\"\"\n\n# Part of the PsychoPy library\n# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019 Open Science Tools Ltd.\n# Distributed under the terms of the GNU General Public License (GPL).\n\nfrom __future__ import absolute_import, print_function\n\nimport numpy as np\n\nimport psychopy # so we can get the __path__\nfrom psychopy.visual.shape import BaseShapeStim\nfrom psychopy.tools.attributetools import attributeSetter, setAttribute\n\n\nclass Rect(BaseShapeStim):\n \"\"\"Creates a rectangle of given width and height as a special case of a\n :class:`~psychopy.visual.ShapeStim`\n\n (New in version 1.72.00)\n\n Attributes\n ----------\n width, height : float or int\n The width and height of the rectangle. Values are aliased with fields\n in the `size` attribute. Use these values to adjust the size of the\n rectangle in a single dimension after initialization.\n\n \"\"\"\n def __init__(self,\n win,\n width=.5,\n height=.5,\n autoLog=None,\n units='',\n lineWidth=1.5,\n lineColor='white',\n lineColorSpace='rgb',\n fillColor=None,\n fillColorSpace='rgb',\n pos=(0, 0),\n size=None,\n ori=0.0,\n opacity=1.0,\n contrast=1.0,\n depth=0,\n interpolate=True,\n name=None,\n autoDraw=False):\n \"\"\"\n Parameters\n ----------\n win : `~psychopy.visual.Window`\n Window object to be associated with this stimuli.\n width, height : float or int\n The width and height of the rectangle. *DEPRECATED* use `size`\n to define the dimensions of the rectangle on initialization. If\n `size` is specified the values of `width` and `height` are\n ignored. This is to provide legacy compatibility for existing\n applications.\n size : array_like, float or int\n Width and height of the rectangle as (w, h) or [w, h]. If a single\n value is provided, the width and height will be set to the same\n specified value. If `None` is specified, the `size` will be set\n with values passed to `width` and `height`.\n\n \"\"\"\n # width and height attributes, these are later aliased with `size`\n self.__dict__['width'] = float(width)\n self.__dict__['height'] = float(height)\n\n # If the size argument was specified, override values of width and\n # height, this is to maintain legacy compatibility. Args width and\n # height should be deprecated in later releases.\n if size is None:\n size = (self.__dict__['width'],\n self.__dict__['height'])\n\n # vertices for rectangle, CCW winding order\n vertices = np.array([[-.5, .5],\n [ .5, .5],\n [ .5, -.5],\n [-.5, -.5]])\n\n super(Rect, self).__init__(\n win,\n units=units,\n lineWidth=lineWidth,\n lineColor=lineColor,\n lineColorSpace=lineColorSpace,\n fillColor=fillColor,\n fillColorSpace=fillColorSpace,\n vertices=vertices,\n closeShape=True,\n pos=pos,\n size=size,\n ori=ori,\n opacity=opacity,\n contrast=contrast,\n depth=depth,\n interpolate=interpolate,\n name=name,\n autoLog=autoLog,\n autoDraw=autoDraw)\n\n @attributeSetter\n def size(self, value):\n \"\"\"array-like.\n Size of the rectangle (`width` and `height`).\n \"\"\"\n # Needed to override `size` to ensure `width` and `height` attrs\n # are updated when it changes.\n self.__dict__['size'] = np.array(value, float)\n\n width, height = self.__dict__['size']\n self.__dict__['width'] = width\n self.__dict__['height'] = height\n\n def setSize(self, size, operation='', log=None):\n \"\"\"Usually you can use 'stim.attribute = value' syntax instead,\n but use this method if you need to suppress the log message\n\n :ref:`Operations <attrib-operations>` supported.\n \"\"\"\n setAttribute(self, 'size', size, log, operation)\n\n @attributeSetter\n def width(self, value):\n \"\"\"int or float.\n Width of the Rectangle (in its respective units, if specified).\n\n :ref:`Operations <attrib-operations>` supported.\n \"\"\"\n self.__dict__['width'] = float(value)\n self.size = (self.__dict__['width'], self.size[1])\n\n def setWidth(self, width, operation='', log=None):\n \"\"\"Usually you can use 'stim.attribute = value' syntax instead,\n but use this method if you need to suppress the log message\n \"\"\"\n setAttribute(self, 'width', width, log, operation)\n\n @attributeSetter\n def height(self, value):\n \"\"\"int or float.\n Height of the Rectangle (in its respective units, if specified).\n\n :ref:`Operations <attrib-operations>` supported.\n \"\"\"\n self.__dict__['height'] = float(value)\n self.size = (self.size[0], self.__dict__['height'])\n\n def setHeight(self, height, operation='', log=None):\n \"\"\"Usually you can use 'stim.attribute = value' syntax instead,\n but use this method if you need to suppress the log message\n \"\"\"\n setAttribute(self, 'height', height, log, operation)\n", "path": "psychopy/visual/rect.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Creates a rectangle of given width and height as a special case of a\n:class:`~psychopy.visual.ShapeStim`\"\"\"\n\n# Part of the PsychoPy library\n# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019 Open Science Tools Ltd.\n# Distributed under the terms of the GNU General Public License (GPL).\n\nfrom __future__ import absolute_import, print_function\n\nimport numpy as np\n\nimport psychopy # so we can get the __path__\nfrom psychopy.visual.shape import BaseShapeStim\nfrom psychopy.tools.attributetools import attributeSetter, setAttribute\n\n\nclass Rect(BaseShapeStim):\n \"\"\"Creates a rectangle of given width and height as a special case of a\n :class:`~psychopy.visual.ShapeStim`\n\n (New in version 1.72.00)\n\n Attributes\n ----------\n width, height : float or int\n The width and height of the rectangle. Values are aliased with fields\n in the `size` attribute. Use these values to adjust the size of the\n rectangle in a single dimension after initialization.\n\n \"\"\"\n def __init__(self,\n win,\n width=.5,\n height=.5,\n autoLog=None,\n units='',\n lineWidth=1.5,\n lineColor='white',\n lineColorSpace='rgb',\n fillColor=None,\n fillColorSpace='rgb',\n pos=(0, 0),\n size=None,\n ori=0.0,\n opacity=1.0,\n contrast=1.0,\n depth=0,\n interpolate=True,\n name=None,\n autoDraw=False):\n \"\"\"\n Parameters\n ----------\n win : `~psychopy.visual.Window`\n Window object to be associated with this stimuli.\n width, height : float or int\n The width and height of the rectangle. *DEPRECATED* use `size`\n to define the dimensions of the rectangle on initialization. If\n `size` is specified the values of `width` and `height` are\n ignored. This is to provide legacy compatibility for existing\n applications.\n size : array_like, float or int\n Width and height of the rectangle as (w, h) or [w, h]. If a single\n value is provided, the width and height will be set to the same\n specified value. If `None` is specified, the `size` will be set\n with values passed to `width` and `height`.\n\n \"\"\"\n # width and height attributes, these are later aliased with `size`\n self.__dict__['width'] = float(width)\n self.__dict__['height'] = float(height)\n\n # If the size argument was specified, override values of width and\n # height, this is to maintain legacy compatibility. Args width and\n # height should be deprecated in later releases.\n if size is None:\n size = (self.__dict__['width'],\n self.__dict__['height'])\n\n # vertices for rectangle, CCW winding order\n vertices = np.array([[-.5, .5],\n [ .5, .5],\n [ .5, -.5],\n [-.5, -.5]])\n\n super(Rect, self).__init__(\n win,\n units=units,\n lineWidth=lineWidth,\n lineColor=lineColor,\n lineColorSpace=lineColorSpace,\n fillColor=fillColor,\n fillColorSpace=fillColorSpace,\n vertices=vertices,\n closeShape=True,\n pos=pos,\n size=size,\n ori=ori,\n opacity=opacity,\n contrast=contrast,\n depth=depth,\n interpolate=interpolate,\n name=name,\n autoLog=autoLog,\n autoDraw=autoDraw)\n\n @attributeSetter\n def size(self, value):\n \"\"\"array-like.\n Size of the rectangle (`width` and `height`).\n \"\"\"\n # Needed to override `size` to ensure `width` and `height` attrs\n # are updated when it changes.\n self.__dict__['size'] = np.array(value, float)\n\n width, height = self.__dict__['size']\n self.__dict__['width'] = width\n self.__dict__['height'] = height\n\n self._needVertexUpdate = True\n\n def setSize(self, size, operation='', log=None):\n \"\"\"Usually you can use 'stim.attribute = value' syntax instead,\n but use this method if you need to suppress the log message\n\n :ref:`Operations <attrib-operations>` supported.\n \"\"\"\n setAttribute(self, 'size', size, log, operation)\n\n @attributeSetter\n def width(self, value):\n \"\"\"int or float.\n Width of the Rectangle (in its respective units, if specified).\n\n :ref:`Operations <attrib-operations>` supported.\n \"\"\"\n self.__dict__['width'] = float(value)\n self.size = (self.__dict__['width'], self.size[1])\n\n def setWidth(self, width, operation='', log=None):\n \"\"\"Usually you can use 'stim.attribute = value' syntax instead,\n but use this method if you need to suppress the log message\n \"\"\"\n setAttribute(self, 'width', width, log, operation)\n\n @attributeSetter\n def height(self, value):\n \"\"\"int or float.\n Height of the Rectangle (in its respective units, if specified).\n\n :ref:`Operations <attrib-operations>` supported.\n \"\"\"\n self.__dict__['height'] = float(value)\n self.size = (self.size[0], self.__dict__['height'])\n\n def setHeight(self, height, operation='', log=None):\n \"\"\"Usually you can use 'stim.attribute = value' syntax instead,\n but use this method if you need to suppress the log message\n \"\"\"\n setAttribute(self, 'height', height, log, operation)\n", "path": "psychopy/visual/rect.py"}]} | 2,008 | 132 |
gh_patches_debug_22721 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-825 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
.cookiecutterrc and cookiecutters_dir not working as I expected
Hi,
Here's the setup:
- I have a ~/.cookiecutterrc as talked about here: http://cookiecutter.readthedocs.org/en/latest/advanced_usage.html
- I also have a cookiecutters_dir at ~/.cookiecutters/ with the subdirectory cookiecutter-pypackage/.
When I try to run "cookiecutter cookiecutter-pypackage/" in ~/Projects/, I get the following error
```
Traceback (most recent call last):
File "/opt/anaconda/bin/cookiecutter", line 9, in <module>
load_entry_point('cookiecutter==0.9.0', 'console_scripts', 'cookiecutter')()
File "/opt/anaconda/lib/python2.7/site-packages/click-3.3-py2.7.egg/click/core.py", line 610, in __call__
return self.main(*args, **kwargs)
File "/opt/anaconda/lib/python2.7/site-packages/click-3.3-py2.7.egg/click/core.py", line 590, in main
rv = self.invoke(ctx)
File "/opt/anaconda/lib/python2.7/site-packages/click-3.3-py2.7.egg/click/core.py", line 782, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/opt/anaconda/lib/python2.7/site-packages/click-3.3-py2.7.egg/click/core.py", line 416, in invoke
return callback(*args, **kwargs)
File "/Users/alexp/Projects/cookiecutter/cookiecutter/cli.py", line 70, in main
cookiecutter(template, checkout, no_input)
File "/Users/alexp/Projects/cookiecutter/cookiecutter/main.py", line 95, in cookiecutter
extra_context=extra_context,
File "/Users/alexp/Projects/cookiecutter/cookiecutter/generate.py", line 43, in generate_context
file_handle = open(context_file)
IOError: [Errno 2] No such file or directory: u'cookiecutter-pypackage/cookiecutter.json'
```
This error shows up if I either do pip install or with the git repo locally. Naturally, it makes a bit of sense. There is no directory ~/Projects/cookiecutter-pypackage/.
However, and perhaps I'm making a poor assumption about usage, it seems to me if I clone or otherwise create a cookiecutter and it's sitting in cookiecutters_dir, it'd be nice to just refer to it as I did above. For my usage, if I create a cookiecutter, I don't particularly want it sitting around a Projects directory, especially if I have multiple project directories for personal and organizational purposes.
In order to do this, I added three lines to main.py in my fork (see below) and it seems to work. I did it as an `elif` to try to preserve the possibility of a lack of a cookiecutters_dir. I have not written a test for this and admittedly I don't really know how. I will likely just use my fork with this modificationgoing forward but I wanted to let the developer crew know about this.
Cheers.
```
# cookiecutter.main
...
# TODO: find a better way to tell if it's a repo URL
if 'git@' in template or 'https://' in template:
repo_dir = clone(
repo_url=template,
checkout=checkout,
clone_to_dir=config_dict['cookiecutters_dir'],
no_input=no_input
)
#### Added these three lines
elif 'cookiecutters_dir' in config_dict:
cookiecutters_dir = os.path.expanduser(config_dict['cookiecutters_dir'])
repo_dir = os.path.join(cookiecutters_dir,template)
else:
# If it's a local repo, no need to clone or copy to your
# cookiecutters_dir
repo_dir = template
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cookiecutter/repository.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """Cookiecutter repository functions."""
4 from __future__ import unicode_literals
5 import os
6 import re
7
8 from .exceptions import RepositoryNotFound
9 from .vcs import clone
10
11 REPO_REGEX = re.compile(r"""
12 (?x)
13 ((((git|hg)\+)?(git|ssh|https?):(//)?) # something like git:// ssh:// etc.
14 | # or
15 (\w+@[\w\.]+) # something like user@...
16 )
17 """)
18
19
20 def is_repo_url(value):
21 """Return True if value is a repository URL."""
22 return bool(REPO_REGEX.match(value))
23
24
25 def expand_abbreviations(template, abbreviations):
26 """
27 Expand abbreviations in a template name.
28
29 :param template: The project template name.
30 :param abbreviations: Abbreviation definitions.
31 """
32 if template in abbreviations:
33 return abbreviations[template]
34
35 # Split on colon. If there is no colon, rest will be empty
36 # and prefix will be the whole template
37 prefix, sep, rest = template.partition(':')
38 if prefix in abbreviations:
39 return abbreviations[prefix].format(rest)
40
41 return template
42
43
44 def repository_has_cookiecutter_json(repo_directory):
45 """Determines if `repo_directory` contains a `cookiecutter.json` file.
46
47 :param repo_directory: The candidate repository directory.
48 :return: True if the `repo_directory` is valid, else False.
49 """
50 repo_directory_exists = os.path.isdir(repo_directory)
51
52 repo_config_exists = os.path.isfile(
53 os.path.join(repo_directory, 'cookiecutter.json')
54 )
55 return repo_directory_exists and repo_config_exists
56
57
58 def determine_repo_dir(template, abbreviations, clone_to_dir, checkout,
59 no_input):
60 """
61 Locate the repository directory from a template reference.
62
63 Applies repository abbreviations to the template reference.
64 If the template refers to a repository URL, clone it.
65 If the template is a path to a local repository, use it.
66
67 :param template: A directory containing a project template directory,
68 or a URL to a git repository.
69 :param abbreviations: A dictionary of repository abbreviation
70 definitions.
71 :param clone_to_dir: The directory to clone the repository into.
72 :param checkout: The branch, tag or commit ID to checkout after clone.
73 :param no_input: Prompt the user at command line for manual configuration?
74 :return: The cookiecutter template directory
75 :raises: `RepositoryNotFound` if a repository directory could not be found.
76 """
77 template = expand_abbreviations(template, abbreviations)
78
79 if is_repo_url(template):
80 repo_dir = clone(
81 repo_url=template,
82 checkout=checkout,
83 clone_to_dir=clone_to_dir,
84 no_input=no_input,
85 )
86 else:
87 # If it's a local repo, no need to clone or copy to your
88 # cookiecutters_dir
89 repo_dir = template
90
91 if repository_has_cookiecutter_json(repo_dir):
92 return repo_dir
93
94 raise RepositoryNotFound(
95 'The repository {} could not be located or does not contain '
96 'a "cookiecutter.json" file.'.format(repo_dir)
97 )
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cookiecutter/repository.py b/cookiecutter/repository.py
--- a/cookiecutter/repository.py
+++ b/cookiecutter/repository.py
@@ -77,21 +77,27 @@
template = expand_abbreviations(template, abbreviations)
if is_repo_url(template):
- repo_dir = clone(
+ cloned_repo = clone(
repo_url=template,
checkout=checkout,
clone_to_dir=clone_to_dir,
no_input=no_input,
)
+ repository_candidates = [cloned_repo]
else:
- # If it's a local repo, no need to clone or copy to your
- # cookiecutters_dir
- repo_dir = template
+ repository_candidates = [
+ template,
+ os.path.join(clone_to_dir, template)
+ ]
- if repository_has_cookiecutter_json(repo_dir):
- return repo_dir
+ for repo_candidate in repository_candidates:
+ if repository_has_cookiecutter_json(repo_candidate):
+ return repo_candidate
raise RepositoryNotFound(
- 'The repository {} could not be located or does not contain '
- 'a "cookiecutter.json" file.'.format(repo_dir)
+ 'A valid repository for "{}" could not be found in the following '
+ 'locations:\n{}'.format(
+ template,
+ '\n'.join(repository_candidates)
+ )
)
| {"golden_diff": "diff --git a/cookiecutter/repository.py b/cookiecutter/repository.py\n--- a/cookiecutter/repository.py\n+++ b/cookiecutter/repository.py\n@@ -77,21 +77,27 @@\n template = expand_abbreviations(template, abbreviations)\n \n if is_repo_url(template):\n- repo_dir = clone(\n+ cloned_repo = clone(\n repo_url=template,\n checkout=checkout,\n clone_to_dir=clone_to_dir,\n no_input=no_input,\n )\n+ repository_candidates = [cloned_repo]\n else:\n- # If it's a local repo, no need to clone or copy to your\n- # cookiecutters_dir\n- repo_dir = template\n+ repository_candidates = [\n+ template,\n+ os.path.join(clone_to_dir, template)\n+ ]\n \n- if repository_has_cookiecutter_json(repo_dir):\n- return repo_dir\n+ for repo_candidate in repository_candidates:\n+ if repository_has_cookiecutter_json(repo_candidate):\n+ return repo_candidate\n \n raise RepositoryNotFound(\n- 'The repository {} could not be located or does not contain '\n- 'a \"cookiecutter.json\" file.'.format(repo_dir)\n+ 'A valid repository for \"{}\" could not be found in the following '\n+ 'locations:\\n{}'.format(\n+ template,\n+ '\\n'.join(repository_candidates)\n+ )\n )\n", "issue": ".cookiecutterrc and cookiecutters_dir not working as I expected\nHi,\nHere's the setup:\n- I have a ~/.cookiecutterrc as talked about here: http://cookiecutter.readthedocs.org/en/latest/advanced_usage.html\n- I also have a cookiecutters_dir at ~/.cookiecutters/ with the subdirectory cookiecutter-pypackage/.\n\nWhen I try to run \"cookiecutter cookiecutter-pypackage/\" in ~/Projects/, I get the following error\n\n```\nTraceback (most recent call last):\n File \"/opt/anaconda/bin/cookiecutter\", line 9, in <module>\n load_entry_point('cookiecutter==0.9.0', 'console_scripts', 'cookiecutter')()\n File \"/opt/anaconda/lib/python2.7/site-packages/click-3.3-py2.7.egg/click/core.py\", line 610, in __call__\n return self.main(*args, **kwargs)\n File \"/opt/anaconda/lib/python2.7/site-packages/click-3.3-py2.7.egg/click/core.py\", line 590, in main\n rv = self.invoke(ctx)\n File \"/opt/anaconda/lib/python2.7/site-packages/click-3.3-py2.7.egg/click/core.py\", line 782, in invoke\n return ctx.invoke(self.callback, **ctx.params)\n File \"/opt/anaconda/lib/python2.7/site-packages/click-3.3-py2.7.egg/click/core.py\", line 416, in invoke\n return callback(*args, **kwargs)\n File \"/Users/alexp/Projects/cookiecutter/cookiecutter/cli.py\", line 70, in main\n cookiecutter(template, checkout, no_input)\n File \"/Users/alexp/Projects/cookiecutter/cookiecutter/main.py\", line 95, in cookiecutter\n extra_context=extra_context,\n File \"/Users/alexp/Projects/cookiecutter/cookiecutter/generate.py\", line 43, in generate_context\n file_handle = open(context_file)\nIOError: [Errno 2] No such file or directory: u'cookiecutter-pypackage/cookiecutter.json'\n```\n\nThis error shows up if I either do pip install or with the git repo locally. Naturally, it makes a bit of sense. There is no directory ~/Projects/cookiecutter-pypackage/. \n\nHowever, and perhaps I'm making a poor assumption about usage, it seems to me if I clone or otherwise create a cookiecutter and it's sitting in cookiecutters_dir, it'd be nice to just refer to it as I did above. For my usage, if I create a cookiecutter, I don't particularly want it sitting around a Projects directory, especially if I have multiple project directories for personal and organizational purposes.\n\nIn order to do this, I added three lines to main.py in my fork (see below) and it seems to work. I did it as an `elif` to try to preserve the possibility of a lack of a cookiecutters_dir. I have not written a test for this and admittedly I don't really know how. I will likely just use my fork with this modificationgoing forward but I wanted to let the developer crew know about this. \n\nCheers.\n\n```\n# cookiecutter.main\n...\n# TODO: find a better way to tell if it's a repo URL\nif 'git@' in template or 'https://' in template:\n repo_dir = clone(\n repo_url=template,\n checkout=checkout,\n clone_to_dir=config_dict['cookiecutters_dir'],\n no_input=no_input\n )\n#### Added these three lines\nelif 'cookiecutters_dir' in config_dict:\n cookiecutters_dir = os.path.expanduser(config_dict['cookiecutters_dir'])\n repo_dir = os.path.join(cookiecutters_dir,template)\nelse:\n # If it's a local repo, no need to clone or copy to your\n # cookiecutters_dir\n repo_dir = template\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Cookiecutter repository functions.\"\"\"\nfrom __future__ import unicode_literals\nimport os\nimport re\n\nfrom .exceptions import RepositoryNotFound\nfrom .vcs import clone\n\nREPO_REGEX = re.compile(r\"\"\"\n(?x)\n((((git|hg)\\+)?(git|ssh|https?):(//)?) # something like git:// ssh:// etc.\n | # or\n (\\w+@[\\w\\.]+) # something like user@...\n)\n\"\"\")\n\n\ndef is_repo_url(value):\n \"\"\"Return True if value is a repository URL.\"\"\"\n return bool(REPO_REGEX.match(value))\n\n\ndef expand_abbreviations(template, abbreviations):\n \"\"\"\n Expand abbreviations in a template name.\n\n :param template: The project template name.\n :param abbreviations: Abbreviation definitions.\n \"\"\"\n if template in abbreviations:\n return abbreviations[template]\n\n # Split on colon. If there is no colon, rest will be empty\n # and prefix will be the whole template\n prefix, sep, rest = template.partition(':')\n if prefix in abbreviations:\n return abbreviations[prefix].format(rest)\n\n return template\n\n\ndef repository_has_cookiecutter_json(repo_directory):\n \"\"\"Determines if `repo_directory` contains a `cookiecutter.json` file.\n\n :param repo_directory: The candidate repository directory.\n :return: True if the `repo_directory` is valid, else False.\n \"\"\"\n repo_directory_exists = os.path.isdir(repo_directory)\n\n repo_config_exists = os.path.isfile(\n os.path.join(repo_directory, 'cookiecutter.json')\n )\n return repo_directory_exists and repo_config_exists\n\n\ndef determine_repo_dir(template, abbreviations, clone_to_dir, checkout,\n no_input):\n \"\"\"\n Locate the repository directory from a template reference.\n\n Applies repository abbreviations to the template reference.\n If the template refers to a repository URL, clone it.\n If the template is a path to a local repository, use it.\n\n :param template: A directory containing a project template directory,\n or a URL to a git repository.\n :param abbreviations: A dictionary of repository abbreviation\n definitions.\n :param clone_to_dir: The directory to clone the repository into.\n :param checkout: The branch, tag or commit ID to checkout after clone.\n :param no_input: Prompt the user at command line for manual configuration?\n :return: The cookiecutter template directory\n :raises: `RepositoryNotFound` if a repository directory could not be found.\n \"\"\"\n template = expand_abbreviations(template, abbreviations)\n\n if is_repo_url(template):\n repo_dir = clone(\n repo_url=template,\n checkout=checkout,\n clone_to_dir=clone_to_dir,\n no_input=no_input,\n )\n else:\n # If it's a local repo, no need to clone or copy to your\n # cookiecutters_dir\n repo_dir = template\n\n if repository_has_cookiecutter_json(repo_dir):\n return repo_dir\n\n raise RepositoryNotFound(\n 'The repository {} could not be located or does not contain '\n 'a \"cookiecutter.json\" file.'.format(repo_dir)\n )\n", "path": "cookiecutter/repository.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Cookiecutter repository functions.\"\"\"\nfrom __future__ import unicode_literals\nimport os\nimport re\n\nfrom .exceptions import RepositoryNotFound\nfrom .vcs import clone\n\nREPO_REGEX = re.compile(r\"\"\"\n(?x)\n((((git|hg)\\+)?(git|ssh|https?):(//)?) # something like git:// ssh:// etc.\n | # or\n (\\w+@[\\w\\.]+) # something like user@...\n)\n\"\"\")\n\n\ndef is_repo_url(value):\n \"\"\"Return True if value is a repository URL.\"\"\"\n return bool(REPO_REGEX.match(value))\n\n\ndef expand_abbreviations(template, abbreviations):\n \"\"\"\n Expand abbreviations in a template name.\n\n :param template: The project template name.\n :param abbreviations: Abbreviation definitions.\n \"\"\"\n if template in abbreviations:\n return abbreviations[template]\n\n # Split on colon. If there is no colon, rest will be empty\n # and prefix will be the whole template\n prefix, sep, rest = template.partition(':')\n if prefix in abbreviations:\n return abbreviations[prefix].format(rest)\n\n return template\n\n\ndef repository_has_cookiecutter_json(repo_directory):\n \"\"\"Determines if `repo_directory` contains a `cookiecutter.json` file.\n\n :param repo_directory: The candidate repository directory.\n :return: True if the `repo_directory` is valid, else False.\n \"\"\"\n repo_directory_exists = os.path.isdir(repo_directory)\n\n repo_config_exists = os.path.isfile(\n os.path.join(repo_directory, 'cookiecutter.json')\n )\n return repo_directory_exists and repo_config_exists\n\n\ndef determine_repo_dir(template, abbreviations, clone_to_dir, checkout,\n no_input):\n \"\"\"\n Locate the repository directory from a template reference.\n\n Applies repository abbreviations to the template reference.\n If the template refers to a repository URL, clone it.\n If the template is a path to a local repository, use it.\n\n :param template: A directory containing a project template directory,\n or a URL to a git repository.\n :param abbreviations: A dictionary of repository abbreviation\n definitions.\n :param clone_to_dir: The directory to clone the repository into.\n :param checkout: The branch, tag or commit ID to checkout after clone.\n :param no_input: Prompt the user at command line for manual configuration?\n :return: The cookiecutter template directory\n :raises: `RepositoryNotFound` if a repository directory could not be found.\n \"\"\"\n template = expand_abbreviations(template, abbreviations)\n\n if is_repo_url(template):\n cloned_repo = clone(\n repo_url=template,\n checkout=checkout,\n clone_to_dir=clone_to_dir,\n no_input=no_input,\n )\n repository_candidates = [cloned_repo]\n else:\n repository_candidates = [\n template,\n os.path.join(clone_to_dir, template)\n ]\n\n for repo_candidate in repository_candidates:\n if repository_has_cookiecutter_json(repo_candidate):\n return repo_candidate\n\n raise RepositoryNotFound(\n 'A valid repository for \"{}\" could not be found in the following '\n 'locations:\\n{}'.format(\n template,\n '\\n'.join(repository_candidates)\n )\n )\n", "path": "cookiecutter/repository.py"}]} | 2,041 | 309 |
gh_patches_debug_12348 | rasdani/github-patches | git_diff | pyodide__pyodide-74 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve parsing of result line
The parsing of the input Python to find the last line which will be evaluated (rather than executed) to provide the result is probably a little brittle in certain corner cases. We should look at what IPython does here and copy that.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pyodide.py`
Content:
```
1 """
2 A library of helper utilities for connecting Python to the browser environment.
3 """
4
5 from js import XMLHttpRequest
6
7 import io
8
9
10 def open_url(url):
11 """
12 Fetches a given *url* and returns a io.StringIO to access its contents.
13 """
14 req = XMLHttpRequest.new()
15 req.open('GET', url, False)
16 req.send(None)
17 return io.StringIO(req.response)
18
19
20 __all__ = ['open_url']
21
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/pyodide.py b/src/pyodide.py
--- a/src/pyodide.py
+++ b/src/pyodide.py
@@ -4,6 +4,7 @@
from js import XMLHttpRequest
+import ast
import io
@@ -17,4 +18,23 @@
return io.StringIO(req.response)
-__all__ = ['open_url']
+def eval_code(code, ns):
+ """
+ Runs a string of code, the last part of which may be an expression.
+ """
+ mod = ast.parse(code)
+ if isinstance(mod.body[-1], ast.Expr):
+ expr = ast.Expression(mod.body[-1].value)
+ del mod.body[-1]
+ else:
+ expr = None
+
+ if len(mod.body):
+ exec(compile(mod, '<exec>', mode='exec'), ns, ns)
+ if expr is not None:
+ return eval(compile(expr, '<eval>', mode='eval'), ns, ns)
+ else:
+ return None
+
+
+__all__ = ['open_url', 'eval_code']
| {"golden_diff": "diff --git a/src/pyodide.py b/src/pyodide.py\n--- a/src/pyodide.py\n+++ b/src/pyodide.py\n@@ -4,6 +4,7 @@\n \n from js import XMLHttpRequest\n \n+import ast\n import io\n \n \n@@ -17,4 +18,23 @@\n return io.StringIO(req.response)\n \n \n-__all__ = ['open_url']\n+def eval_code(code, ns):\n+ \"\"\"\n+ Runs a string of code, the last part of which may be an expression.\n+ \"\"\"\n+ mod = ast.parse(code)\n+ if isinstance(mod.body[-1], ast.Expr):\n+ expr = ast.Expression(mod.body[-1].value)\n+ del mod.body[-1]\n+ else:\n+ expr = None\n+\n+ if len(mod.body):\n+ exec(compile(mod, '<exec>', mode='exec'), ns, ns)\n+ if expr is not None:\n+ return eval(compile(expr, '<eval>', mode='eval'), ns, ns)\n+ else:\n+ return None\n+\n+\n+__all__ = ['open_url', 'eval_code']\n", "issue": "Improve parsing of result line\nThe parsing of the input Python to find the last line which will be evaluated (rather than executed) to provide the result is probably a little brittle in certain corner cases. We should look at what IPython does here and copy that.\n", "before_files": [{"content": "\"\"\"\nA library of helper utilities for connecting Python to the browser environment.\n\"\"\"\n\nfrom js import XMLHttpRequest\n\nimport io\n\n\ndef open_url(url):\n \"\"\"\n Fetches a given *url* and returns a io.StringIO to access its contents.\n \"\"\"\n req = XMLHttpRequest.new()\n req.open('GET', url, False)\n req.send(None)\n return io.StringIO(req.response)\n\n\n__all__ = ['open_url']\n", "path": "src/pyodide.py"}], "after_files": [{"content": "\"\"\"\nA library of helper utilities for connecting Python to the browser environment.\n\"\"\"\n\nfrom js import XMLHttpRequest\n\nimport ast\nimport io\n\n\ndef open_url(url):\n \"\"\"\n Fetches a given *url* and returns a io.StringIO to access its contents.\n \"\"\"\n req = XMLHttpRequest.new()\n req.open('GET', url, False)\n req.send(None)\n return io.StringIO(req.response)\n\n\ndef eval_code(code, ns):\n \"\"\"\n Runs a string of code, the last part of which may be an expression.\n \"\"\"\n mod = ast.parse(code)\n if isinstance(mod.body[-1], ast.Expr):\n expr = ast.Expression(mod.body[-1].value)\n del mod.body[-1]\n else:\n expr = None\n\n if len(mod.body):\n exec(compile(mod, '<exec>', mode='exec'), ns, ns)\n if expr is not None:\n return eval(compile(expr, '<eval>', mode='eval'), ns, ns)\n else:\n return None\n\n\n__all__ = ['open_url', 'eval_code']\n", "path": "src/pyodide.py"}]} | 441 | 247 |
gh_patches_debug_11998 | rasdani/github-patches | git_diff | pyodide__pyodide-931 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CLAPACK.js is missing from the dev branch
For some reason, I started to see the CLAPACK.js missing error when using pyodide from https://cdn.jsdelivr.net/pyodide/dev/full/:
```
Couldn't load package from URL https://cdn.jsdelivr.net/pyodide/dev/full/CLAPACK.js
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyodide_build/buildall.py`
Content:
```
1 #!/usr/bin/env python3
2
3 """
4 Build all of the packages in a given directory.
5 """
6
7 import argparse
8 from functools import total_ordering
9 import json
10 from pathlib import Path
11 from queue import Queue, PriorityQueue
12 import shutil
13 import subprocess
14 import sys
15 from threading import Thread
16 from time import sleep
17 from typing import Dict, Set, Optional, List
18
19 from . import common
20
21
22 @total_ordering
23 class Package:
24 def __init__(self, pkgdir: Path):
25 self.pkgdir = pkgdir
26
27 pkgpath = pkgdir / "meta.yaml"
28 if not pkgpath.is_file():
29 raise ValueError(f"Directory {pkgdir} does not contain meta.yaml")
30
31 self.meta: dict = common.parse_package(pkgpath)
32 self.name: str = self.meta["package"]["name"]
33 self.library: bool = self.meta.get("build", {}).get("library", False)
34
35 assert self.name == pkgdir.stem
36
37 self.dependencies: List[str] = self.meta.get("requirements", {}).get("run", [])
38 self.unbuilt_dependencies: Set[str] = set(self.dependencies)
39 self.dependents: Set[str] = set()
40
41 def build(self, outputdir: Path, args) -> None:
42 with open(self.pkgdir / "build.log", "w") as f:
43 if self.library:
44 p = subprocess.run(
45 ["make"],
46 cwd=self.pkgdir,
47 check=False,
48 stdout=f,
49 stderr=subprocess.STDOUT,
50 )
51 else:
52 p = subprocess.run(
53 [
54 sys.executable,
55 "-m",
56 "pyodide_build",
57 "buildpkg",
58 str(self.pkgdir / "meta.yaml"),
59 "--package_abi",
60 str(args.package_abi),
61 "--cflags",
62 args.cflags,
63 "--ldflags",
64 args.ldflags,
65 "--target",
66 args.target,
67 "--install-dir",
68 args.install_dir,
69 ],
70 check=False,
71 stdout=f,
72 stderr=subprocess.STDOUT,
73 )
74
75 with open(self.pkgdir / "build.log", "r") as f:
76 shutil.copyfileobj(f, sys.stdout)
77
78 p.check_returncode()
79
80 if not self.library:
81 shutil.copyfile(
82 self.pkgdir / "build" / (self.name + ".data"),
83 outputdir / (self.name + ".data"),
84 )
85 shutil.copyfile(
86 self.pkgdir / "build" / (self.name + ".js"),
87 outputdir / (self.name + ".js"),
88 )
89
90 # We use this in the priority queue, which pops off the smallest element.
91 # So we want the smallest element to have the largest number of dependents
92 def __lt__(self, other) -> bool:
93 return len(self.dependents) > len(other.dependents)
94
95 def __eq__(self, other) -> bool:
96 return len(self.dependents) == len(other.dependents)
97
98
99 def generate_dependency_graph(
100 packages_dir: Path, package_list: Optional[str]
101 ) -> Dict[str, Package]:
102 """
103 This generates a dependency graph for the packages listed in package_list.
104 A node in the graph is a Package object defined above, which maintains a
105 list of dependencies and also dependents. That is, each node stores both
106 incoming and outgoing edges.
107
108 The dependencies and dependents are stored via their name, and we have a
109 lookup table pkg_map: Dict[str, Package] to look up the corresponding
110 Package object. The function returns pkg_map, which contains all packages
111 in the graph as its values.
112
113 Parameters:
114 - packages_dir: directory that contains packages
115 - package_list: set of packages to build. If None, then all packages in
116 packages_dir are compiled.
117
118 Returns:
119 - pkg_map: dictionary mapping package names to Package objects
120 """
121
122 pkg_map: Dict[str, Package] = {}
123
124 packages: Optional[Set[str]] = common._parse_package_subset(package_list)
125 if packages is None:
126 packages = set(
127 str(x) for x in packages_dir.iterdir() if (x / "meta.yaml").is_file()
128 )
129
130 while packages:
131 pkgname = packages.pop()
132
133 pkg = Package(packages_dir / pkgname)
134 pkg_map[pkg.name] = pkg
135
136 for dep in pkg.dependencies:
137 if pkg_map.get(dep) is None:
138 packages.add(dep)
139
140 # Compute dependents
141 for pkg in pkg_map.values():
142 for dep in pkg.dependencies:
143 pkg_map[dep].dependents.add(pkg.name)
144
145 return pkg_map
146
147
148 def build_from_graph(pkg_map: Dict[str, Package], outputdir: Path, args) -> None:
149 """
150 This builds packages in pkg_map in parallel, building at most args.n_jobs
151 packages at once.
152
153 We have a priority queue of packages we are ready to build (build_queue),
154 where a package is ready to build if all its dependencies are built. The
155 priority is based on the number of dependents --- we prefer to build
156 packages with more dependents first.
157
158 To build packages in parallel, we use a thread pool of args.n_jobs many
159 threads listening to build_queue. When the thread is free, it takes an
160 item off build_queue and builds it. Once the package is built, it sends the
161 package to the built_queue. The main thread listens to the built_queue and
162 checks if any of the dependents are ready to be built. If so, it add the
163 package to the build queue.
164 """
165
166 # Insert packages into build_queue. We *must* do this after counting
167 # dependents, because the ordering ought not to change after insertion.
168 build_queue: PriorityQueue = PriorityQueue()
169 for pkg in pkg_map.values():
170 if len(pkg.dependencies) == 0:
171 build_queue.put(pkg)
172
173 built_queue: Queue = Queue()
174
175 def builder(n):
176 print(f"Starting thread {n}")
177 while True:
178 pkg = build_queue.get()
179 print(f"Thread {n} building {pkg.name}")
180 try:
181 pkg.build(outputdir, args)
182 except Exception as e:
183 built_queue.put(e)
184 return
185
186 print(f"Thread {n} built {pkg.name}")
187 built_queue.put(pkg)
188 # Release the GIL so new packages get queued
189 sleep(0.01)
190
191 for n in range(0, args.n_jobs):
192 Thread(target=builder, args=(n + 1,), daemon=True).start()
193
194 num_built = 0
195 while num_built < len(pkg_map):
196 pkg = built_queue.get()
197 if isinstance(pkg, Exception):
198 raise pkg
199
200 num_built += 1
201
202 for _dependent in pkg.dependents:
203 dependent = pkg_map[_dependent]
204 dependent.unbuilt_dependencies.remove(pkg.name)
205 if len(dependent.unbuilt_dependencies) == 0:
206 build_queue.put(dependent)
207
208
209 def build_packages(packages_dir: Path, outputdir: Path, args) -> None:
210 pkg_map = generate_dependency_graph(packages_dir, args.only)
211
212 build_from_graph(pkg_map, outputdir, args)
213
214 # Build package.json data. The "test" package is built in a different way,
215 # so we hardcode its existence here.
216 #
217 # This is done last so the Makefile can use it as a completion token.
218 package_data: dict = {
219 "dependencies": {"test": []},
220 "import_name_to_package_name": {},
221 }
222
223 for name, pkg in pkg_map.items():
224 if pkg.library:
225 continue
226
227 package_data["dependencies"][name] = pkg.dependencies
228 for imp in pkg.meta.get("test", {}).get("imports", [name]):
229 package_data["import_name_to_package_name"][imp] = name
230
231 with open(outputdir / "packages.json", "w") as fd:
232 json.dump(package_data, fd)
233
234
235 def make_parser(parser):
236 parser.description = (
237 "Build all of the packages in a given directory\n\n"
238 "Unless the --only option is provided"
239 )
240 parser.add_argument(
241 "dir",
242 type=str,
243 nargs=1,
244 help="Input directory containing a tree of package definitions",
245 )
246 parser.add_argument(
247 "output",
248 type=str,
249 nargs=1,
250 help="Output directory in which to put all built packages",
251 )
252 parser.add_argument(
253 "--package_abi",
254 type=int,
255 required=True,
256 help="The ABI number for the packages to be built",
257 )
258 parser.add_argument(
259 "--cflags",
260 type=str,
261 nargs="?",
262 default=common.DEFAULTCFLAGS,
263 help="Extra compiling flags",
264 )
265 parser.add_argument(
266 "--ldflags",
267 type=str,
268 nargs="?",
269 default=common.DEFAULTLDFLAGS,
270 help="Extra linking flags",
271 )
272 parser.add_argument(
273 "--target",
274 type=str,
275 nargs="?",
276 default=common.TARGETPYTHON,
277 help="The path to the target Python installation",
278 )
279 parser.add_argument(
280 "--install-dir",
281 type=str,
282 nargs="?",
283 default="",
284 help=(
285 "Directory for installing built host packages. Defaults to setup.py "
286 "default. Set to 'skip' to skip installation. Installation is "
287 "needed if you want to build other packages that depend on this one."
288 ),
289 )
290 parser.add_argument(
291 "--only",
292 type=str,
293 nargs="?",
294 default=None,
295 help=(
296 "Only build the specified packages, provided as a comma " "separated list"
297 ),
298 )
299 parser.add_argument(
300 "--n-jobs",
301 type=int,
302 nargs="?",
303 default=4,
304 help="Number of packages to build in parallel",
305 )
306 return parser
307
308
309 def main(args):
310 packages_dir = Path(args.dir[0]).resolve()
311 outputdir = Path(args.output[0]).resolve()
312 build_packages(packages_dir, outputdir, args)
313
314
315 if __name__ == "__main__":
316 parser = make_parser(argparse.ArgumentParser())
317 args = parser.parse_args()
318 main(args)
319
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pyodide_build/buildall.py b/pyodide_build/buildall.py
--- a/pyodide_build/buildall.py
+++ b/pyodide_build/buildall.py
@@ -220,11 +220,15 @@
"import_name_to_package_name": {},
}
+ libraries = [pkg.name for pkg in pkg_map.values() if pkg.library]
+
for name, pkg in pkg_map.items():
if pkg.library:
continue
- package_data["dependencies"][name] = pkg.dependencies
+ package_data["dependencies"][name] = [
+ x for x in pkg.dependencies if x not in libraries
+ ]
for imp in pkg.meta.get("test", {}).get("imports", [name]):
package_data["import_name_to_package_name"][imp] = name
| {"golden_diff": "diff --git a/pyodide_build/buildall.py b/pyodide_build/buildall.py\n--- a/pyodide_build/buildall.py\n+++ b/pyodide_build/buildall.py\n@@ -220,11 +220,15 @@\n \"import_name_to_package_name\": {},\n }\n \n+ libraries = [pkg.name for pkg in pkg_map.values() if pkg.library]\n+\n for name, pkg in pkg_map.items():\n if pkg.library:\n continue\n \n- package_data[\"dependencies\"][name] = pkg.dependencies\n+ package_data[\"dependencies\"][name] = [\n+ x for x in pkg.dependencies if x not in libraries\n+ ]\n for imp in pkg.meta.get(\"test\", {}).get(\"imports\", [name]):\n package_data[\"import_name_to_package_name\"][imp] = name\n", "issue": "CLAPACK.js is missing from the dev branch\nFor some reason, I started to see the CLAPACK.js missing error when using pyodide from https://cdn.jsdelivr.net/pyodide/dev/full/:\r\n\r\n```\r\nCouldn't load package from URL https://cdn.jsdelivr.net/pyodide/dev/full/CLAPACK.js\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n\"\"\"\nBuild all of the packages in a given directory.\n\"\"\"\n\nimport argparse\nfrom functools import total_ordering\nimport json\nfrom pathlib import Path\nfrom queue import Queue, PriorityQueue\nimport shutil\nimport subprocess\nimport sys\nfrom threading import Thread\nfrom time import sleep\nfrom typing import Dict, Set, Optional, List\n\nfrom . import common\n\n\n@total_ordering\nclass Package:\n def __init__(self, pkgdir: Path):\n self.pkgdir = pkgdir\n\n pkgpath = pkgdir / \"meta.yaml\"\n if not pkgpath.is_file():\n raise ValueError(f\"Directory {pkgdir} does not contain meta.yaml\")\n\n self.meta: dict = common.parse_package(pkgpath)\n self.name: str = self.meta[\"package\"][\"name\"]\n self.library: bool = self.meta.get(\"build\", {}).get(\"library\", False)\n\n assert self.name == pkgdir.stem\n\n self.dependencies: List[str] = self.meta.get(\"requirements\", {}).get(\"run\", [])\n self.unbuilt_dependencies: Set[str] = set(self.dependencies)\n self.dependents: Set[str] = set()\n\n def build(self, outputdir: Path, args) -> None:\n with open(self.pkgdir / \"build.log\", \"w\") as f:\n if self.library:\n p = subprocess.run(\n [\"make\"],\n cwd=self.pkgdir,\n check=False,\n stdout=f,\n stderr=subprocess.STDOUT,\n )\n else:\n p = subprocess.run(\n [\n sys.executable,\n \"-m\",\n \"pyodide_build\",\n \"buildpkg\",\n str(self.pkgdir / \"meta.yaml\"),\n \"--package_abi\",\n str(args.package_abi),\n \"--cflags\",\n args.cflags,\n \"--ldflags\",\n args.ldflags,\n \"--target\",\n args.target,\n \"--install-dir\",\n args.install_dir,\n ],\n check=False,\n stdout=f,\n stderr=subprocess.STDOUT,\n )\n\n with open(self.pkgdir / \"build.log\", \"r\") as f:\n shutil.copyfileobj(f, sys.stdout)\n\n p.check_returncode()\n\n if not self.library:\n shutil.copyfile(\n self.pkgdir / \"build\" / (self.name + \".data\"),\n outputdir / (self.name + \".data\"),\n )\n shutil.copyfile(\n self.pkgdir / \"build\" / (self.name + \".js\"),\n outputdir / (self.name + \".js\"),\n )\n\n # We use this in the priority queue, which pops off the smallest element.\n # So we want the smallest element to have the largest number of dependents\n def __lt__(self, other) -> bool:\n return len(self.dependents) > len(other.dependents)\n\n def __eq__(self, other) -> bool:\n return len(self.dependents) == len(other.dependents)\n\n\ndef generate_dependency_graph(\n packages_dir: Path, package_list: Optional[str]\n) -> Dict[str, Package]:\n \"\"\"\n This generates a dependency graph for the packages listed in package_list.\n A node in the graph is a Package object defined above, which maintains a\n list of dependencies and also dependents. That is, each node stores both\n incoming and outgoing edges.\n\n The dependencies and dependents are stored via their name, and we have a\n lookup table pkg_map: Dict[str, Package] to look up the corresponding\n Package object. The function returns pkg_map, which contains all packages\n in the graph as its values.\n\n Parameters:\n - packages_dir: directory that contains packages\n - package_list: set of packages to build. If None, then all packages in\n packages_dir are compiled.\n\n Returns:\n - pkg_map: dictionary mapping package names to Package objects\n \"\"\"\n\n pkg_map: Dict[str, Package] = {}\n\n packages: Optional[Set[str]] = common._parse_package_subset(package_list)\n if packages is None:\n packages = set(\n str(x) for x in packages_dir.iterdir() if (x / \"meta.yaml\").is_file()\n )\n\n while packages:\n pkgname = packages.pop()\n\n pkg = Package(packages_dir / pkgname)\n pkg_map[pkg.name] = pkg\n\n for dep in pkg.dependencies:\n if pkg_map.get(dep) is None:\n packages.add(dep)\n\n # Compute dependents\n for pkg in pkg_map.values():\n for dep in pkg.dependencies:\n pkg_map[dep].dependents.add(pkg.name)\n\n return pkg_map\n\n\ndef build_from_graph(pkg_map: Dict[str, Package], outputdir: Path, args) -> None:\n \"\"\"\n This builds packages in pkg_map in parallel, building at most args.n_jobs\n packages at once.\n\n We have a priority queue of packages we are ready to build (build_queue),\n where a package is ready to build if all its dependencies are built. The\n priority is based on the number of dependents --- we prefer to build\n packages with more dependents first.\n\n To build packages in parallel, we use a thread pool of args.n_jobs many\n threads listening to build_queue. When the thread is free, it takes an\n item off build_queue and builds it. Once the package is built, it sends the\n package to the built_queue. The main thread listens to the built_queue and\n checks if any of the dependents are ready to be built. If so, it add the\n package to the build queue.\n \"\"\"\n\n # Insert packages into build_queue. We *must* do this after counting\n # dependents, because the ordering ought not to change after insertion.\n build_queue: PriorityQueue = PriorityQueue()\n for pkg in pkg_map.values():\n if len(pkg.dependencies) == 0:\n build_queue.put(pkg)\n\n built_queue: Queue = Queue()\n\n def builder(n):\n print(f\"Starting thread {n}\")\n while True:\n pkg = build_queue.get()\n print(f\"Thread {n} building {pkg.name}\")\n try:\n pkg.build(outputdir, args)\n except Exception as e:\n built_queue.put(e)\n return\n\n print(f\"Thread {n} built {pkg.name}\")\n built_queue.put(pkg)\n # Release the GIL so new packages get queued\n sleep(0.01)\n\n for n in range(0, args.n_jobs):\n Thread(target=builder, args=(n + 1,), daemon=True).start()\n\n num_built = 0\n while num_built < len(pkg_map):\n pkg = built_queue.get()\n if isinstance(pkg, Exception):\n raise pkg\n\n num_built += 1\n\n for _dependent in pkg.dependents:\n dependent = pkg_map[_dependent]\n dependent.unbuilt_dependencies.remove(pkg.name)\n if len(dependent.unbuilt_dependencies) == 0:\n build_queue.put(dependent)\n\n\ndef build_packages(packages_dir: Path, outputdir: Path, args) -> None:\n pkg_map = generate_dependency_graph(packages_dir, args.only)\n\n build_from_graph(pkg_map, outputdir, args)\n\n # Build package.json data. The \"test\" package is built in a different way,\n # so we hardcode its existence here.\n #\n # This is done last so the Makefile can use it as a completion token.\n package_data: dict = {\n \"dependencies\": {\"test\": []},\n \"import_name_to_package_name\": {},\n }\n\n for name, pkg in pkg_map.items():\n if pkg.library:\n continue\n\n package_data[\"dependencies\"][name] = pkg.dependencies\n for imp in pkg.meta.get(\"test\", {}).get(\"imports\", [name]):\n package_data[\"import_name_to_package_name\"][imp] = name\n\n with open(outputdir / \"packages.json\", \"w\") as fd:\n json.dump(package_data, fd)\n\n\ndef make_parser(parser):\n parser.description = (\n \"Build all of the packages in a given directory\\n\\n\"\n \"Unless the --only option is provided\"\n )\n parser.add_argument(\n \"dir\",\n type=str,\n nargs=1,\n help=\"Input directory containing a tree of package definitions\",\n )\n parser.add_argument(\n \"output\",\n type=str,\n nargs=1,\n help=\"Output directory in which to put all built packages\",\n )\n parser.add_argument(\n \"--package_abi\",\n type=int,\n required=True,\n help=\"The ABI number for the packages to be built\",\n )\n parser.add_argument(\n \"--cflags\",\n type=str,\n nargs=\"?\",\n default=common.DEFAULTCFLAGS,\n help=\"Extra compiling flags\",\n )\n parser.add_argument(\n \"--ldflags\",\n type=str,\n nargs=\"?\",\n default=common.DEFAULTLDFLAGS,\n help=\"Extra linking flags\",\n )\n parser.add_argument(\n \"--target\",\n type=str,\n nargs=\"?\",\n default=common.TARGETPYTHON,\n help=\"The path to the target Python installation\",\n )\n parser.add_argument(\n \"--install-dir\",\n type=str,\n nargs=\"?\",\n default=\"\",\n help=(\n \"Directory for installing built host packages. Defaults to setup.py \"\n \"default. Set to 'skip' to skip installation. Installation is \"\n \"needed if you want to build other packages that depend on this one.\"\n ),\n )\n parser.add_argument(\n \"--only\",\n type=str,\n nargs=\"?\",\n default=None,\n help=(\n \"Only build the specified packages, provided as a comma \" \"separated list\"\n ),\n )\n parser.add_argument(\n \"--n-jobs\",\n type=int,\n nargs=\"?\",\n default=4,\n help=\"Number of packages to build in parallel\",\n )\n return parser\n\n\ndef main(args):\n packages_dir = Path(args.dir[0]).resolve()\n outputdir = Path(args.output[0]).resolve()\n build_packages(packages_dir, outputdir, args)\n\n\nif __name__ == \"__main__\":\n parser = make_parser(argparse.ArgumentParser())\n args = parser.parse_args()\n main(args)\n", "path": "pyodide_build/buildall.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n\"\"\"\nBuild all of the packages in a given directory.\n\"\"\"\n\nimport argparse\nfrom functools import total_ordering\nimport json\nfrom pathlib import Path\nfrom queue import Queue, PriorityQueue\nimport shutil\nimport subprocess\nimport sys\nfrom threading import Thread\nfrom time import sleep\nfrom typing import Dict, Set, Optional, List\n\nfrom . import common\n\n\n@total_ordering\nclass Package:\n def __init__(self, pkgdir: Path):\n self.pkgdir = pkgdir\n\n pkgpath = pkgdir / \"meta.yaml\"\n if not pkgpath.is_file():\n raise ValueError(f\"Directory {pkgdir} does not contain meta.yaml\")\n\n self.meta: dict = common.parse_package(pkgpath)\n self.name: str = self.meta[\"package\"][\"name\"]\n self.library: bool = self.meta.get(\"build\", {}).get(\"library\", False)\n\n assert self.name == pkgdir.stem\n\n self.dependencies: List[str] = self.meta.get(\"requirements\", {}).get(\"run\", [])\n self.unbuilt_dependencies: Set[str] = set(self.dependencies)\n self.dependents: Set[str] = set()\n\n def build(self, outputdir: Path, args) -> None:\n with open(self.pkgdir / \"build.log\", \"w\") as f:\n if self.library:\n p = subprocess.run(\n [\"make\"],\n cwd=self.pkgdir,\n check=False,\n stdout=f,\n stderr=subprocess.STDOUT,\n )\n else:\n p = subprocess.run(\n [\n sys.executable,\n \"-m\",\n \"pyodide_build\",\n \"buildpkg\",\n str(self.pkgdir / \"meta.yaml\"),\n \"--package_abi\",\n str(args.package_abi),\n \"--cflags\",\n args.cflags,\n \"--ldflags\",\n args.ldflags,\n \"--target\",\n args.target,\n \"--install-dir\",\n args.install_dir,\n ],\n check=False,\n stdout=f,\n stderr=subprocess.STDOUT,\n )\n\n with open(self.pkgdir / \"build.log\", \"r\") as f:\n shutil.copyfileobj(f, sys.stdout)\n\n p.check_returncode()\n\n if not self.library:\n shutil.copyfile(\n self.pkgdir / \"build\" / (self.name + \".data\"),\n outputdir / (self.name + \".data\"),\n )\n shutil.copyfile(\n self.pkgdir / \"build\" / (self.name + \".js\"),\n outputdir / (self.name + \".js\"),\n )\n\n # We use this in the priority queue, which pops off the smallest element.\n # So we want the smallest element to have the largest number of dependents\n def __lt__(self, other) -> bool:\n return len(self.dependents) > len(other.dependents)\n\n def __eq__(self, other) -> bool:\n return len(self.dependents) == len(other.dependents)\n\n\ndef generate_dependency_graph(\n packages_dir: Path, package_list: Optional[str]\n) -> Dict[str, Package]:\n \"\"\"\n This generates a dependency graph for the packages listed in package_list.\n A node in the graph is a Package object defined above, which maintains a\n list of dependencies and also dependents. That is, each node stores both\n incoming and outgoing edges.\n\n The dependencies and dependents are stored via their name, and we have a\n lookup table pkg_map: Dict[str, Package] to look up the corresponding\n Package object. The function returns pkg_map, which contains all packages\n in the graph as its values.\n\n Parameters:\n - packages_dir: directory that contains packages\n - package_list: set of packages to build. If None, then all packages in\n packages_dir are compiled.\n\n Returns:\n - pkg_map: dictionary mapping package names to Package objects\n \"\"\"\n\n pkg_map: Dict[str, Package] = {}\n\n packages: Optional[Set[str]] = common._parse_package_subset(package_list)\n if packages is None:\n packages = set(\n str(x) for x in packages_dir.iterdir() if (x / \"meta.yaml\").is_file()\n )\n\n while packages:\n pkgname = packages.pop()\n\n pkg = Package(packages_dir / pkgname)\n pkg_map[pkg.name] = pkg\n\n for dep in pkg.dependencies:\n if pkg_map.get(dep) is None:\n packages.add(dep)\n\n # Compute dependents\n for pkg in pkg_map.values():\n for dep in pkg.dependencies:\n pkg_map[dep].dependents.add(pkg.name)\n\n return pkg_map\n\n\ndef build_from_graph(pkg_map: Dict[str, Package], outputdir: Path, args) -> None:\n \"\"\"\n This builds packages in pkg_map in parallel, building at most args.n_jobs\n packages at once.\n\n We have a priority queue of packages we are ready to build (build_queue),\n where a package is ready to build if all its dependencies are built. The\n priority is based on the number of dependents --- we prefer to build\n packages with more dependents first.\n\n To build packages in parallel, we use a thread pool of args.n_jobs many\n threads listening to build_queue. When the thread is free, it takes an\n item off build_queue and builds it. Once the package is built, it sends the\n package to the built_queue. The main thread listens to the built_queue and\n checks if any of the dependents are ready to be built. If so, it add the\n package to the build queue.\n \"\"\"\n\n # Insert packages into build_queue. We *must* do this after counting\n # dependents, because the ordering ought not to change after insertion.\n build_queue: PriorityQueue = PriorityQueue()\n for pkg in pkg_map.values():\n if len(pkg.dependencies) == 0:\n build_queue.put(pkg)\n\n built_queue: Queue = Queue()\n\n def builder(n):\n print(f\"Starting thread {n}\")\n while True:\n pkg = build_queue.get()\n print(f\"Thread {n} building {pkg.name}\")\n try:\n pkg.build(outputdir, args)\n except Exception as e:\n built_queue.put(e)\n return\n\n print(f\"Thread {n} built {pkg.name}\")\n built_queue.put(pkg)\n # Release the GIL so new packages get queued\n sleep(0.01)\n\n for n in range(0, args.n_jobs):\n Thread(target=builder, args=(n + 1,), daemon=True).start()\n\n num_built = 0\n while num_built < len(pkg_map):\n pkg = built_queue.get()\n if isinstance(pkg, Exception):\n raise pkg\n\n num_built += 1\n\n for _dependent in pkg.dependents:\n dependent = pkg_map[_dependent]\n dependent.unbuilt_dependencies.remove(pkg.name)\n if len(dependent.unbuilt_dependencies) == 0:\n build_queue.put(dependent)\n\n\ndef build_packages(packages_dir: Path, outputdir: Path, args) -> None:\n pkg_map = generate_dependency_graph(packages_dir, args.only)\n\n build_from_graph(pkg_map, outputdir, args)\n\n # Build package.json data. The \"test\" package is built in a different way,\n # so we hardcode its existence here.\n #\n # This is done last so the Makefile can use it as a completion token.\n package_data: dict = {\n \"dependencies\": {\"test\": []},\n \"import_name_to_package_name\": {},\n }\n\n libraries = [pkg.name for pkg in pkg_map.values() if pkg.library]\n\n for name, pkg in pkg_map.items():\n if pkg.library:\n continue\n\n package_data[\"dependencies\"][name] = [\n x for x in pkg.dependencies if x not in libraries\n ]\n for imp in pkg.meta.get(\"test\", {}).get(\"imports\", [name]):\n package_data[\"import_name_to_package_name\"][imp] = name\n\n with open(outputdir / \"packages.json\", \"w\") as fd:\n json.dump(package_data, fd)\n\n\ndef make_parser(parser):\n parser.description = (\n \"Build all of the packages in a given directory\\n\\n\"\n \"Unless the --only option is provided\"\n )\n parser.add_argument(\n \"dir\",\n type=str,\n nargs=1,\n help=\"Input directory containing a tree of package definitions\",\n )\n parser.add_argument(\n \"output\",\n type=str,\n nargs=1,\n help=\"Output directory in which to put all built packages\",\n )\n parser.add_argument(\n \"--package_abi\",\n type=int,\n required=True,\n help=\"The ABI number for the packages to be built\",\n )\n parser.add_argument(\n \"--cflags\",\n type=str,\n nargs=\"?\",\n default=common.DEFAULTCFLAGS,\n help=\"Extra compiling flags\",\n )\n parser.add_argument(\n \"--ldflags\",\n type=str,\n nargs=\"?\",\n default=common.DEFAULTLDFLAGS,\n help=\"Extra linking flags\",\n )\n parser.add_argument(\n \"--target\",\n type=str,\n nargs=\"?\",\n default=common.TARGETPYTHON,\n help=\"The path to the target Python installation\",\n )\n parser.add_argument(\n \"--install-dir\",\n type=str,\n nargs=\"?\",\n default=\"\",\n help=(\n \"Directory for installing built host packages. Defaults to setup.py \"\n \"default. Set to 'skip' to skip installation. Installation is \"\n \"needed if you want to build other packages that depend on this one.\"\n ),\n )\n parser.add_argument(\n \"--only\",\n type=str,\n nargs=\"?\",\n default=None,\n help=(\n \"Only build the specified packages, provided as a comma \" \"separated list\"\n ),\n )\n parser.add_argument(\n \"--n-jobs\",\n type=int,\n nargs=\"?\",\n default=4,\n help=\"Number of packages to build in parallel\",\n )\n return parser\n\n\ndef main(args):\n packages_dir = Path(args.dir[0]).resolve()\n outputdir = Path(args.output[0]).resolve()\n build_packages(packages_dir, outputdir, args)\n\n\nif __name__ == \"__main__\":\n parser = make_parser(argparse.ArgumentParser())\n args = parser.parse_args()\n main(args)\n", "path": "pyodide_build/buildall.py"}]} | 3,378 | 179 |
gh_patches_debug_31259 | rasdani/github-patches | git_diff | talonhub__community-243 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
feature request: `phrase hello slash world` -> `hello/world`
I'm not sure what the best way to go about this is, it might be simple.
some more examples/unit tests:
- `phrase hello slash world` -> `hello/world` (currently `hello /world`)
- `phrase hello slash snake name of file slash world` -> `hello/name_of_file/world` (currently `hello/name_of_file_/world`)
Similarly: `phrase file dot extension` -> `file.extension`, though this one might be complicated by the fact that I have a formatter called `dot` defined so that `dot hello world` -> `.hello_world` because I use python so much.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `code/formatters.py`
Content:
```
1 from talon import Module, Context, actions, ui, imgui
2 from talon.grammar import Phrase
3 from typing import List, Union
4 import re
5
6 ctx = Context()
7 key = actions.key
8 edit = actions.edit
9
10 words_to_keep_lowercase = "a,an,the,at,by,for,in,is,of,on,to,up,and,as,but,or,nor".split(
11 ","
12 )
13
14 # last_phrase has the last phrase spoken, WITHOUT formatting.
15 # This is needed for reformatting.
16 last_phrase = ""
17
18 # formatted_phrase_history keeps the most recent formatted phrases, WITH formatting.
19 formatted_phrase_history = []
20 formatted_phrase_history_length = 20
21
22
23 def surround(by):
24 def func(i, word, last):
25 if i == 0:
26 word = by + word
27 if last:
28 word += by
29 return word
30
31 return func
32
33
34 def format_phrase(m: Union[str, Phrase], fmtrs: str):
35 global last_phrase
36 last_phrase = m
37 words = []
38 if isinstance(m, str):
39 words = m.split(" ")
40 else:
41 if m.words[-1] == "over":
42 m.words = m.words[:-1]
43
44 words = actions.dictate.parse_words(m)
45 words = actions.dictate.replace_words(words)
46
47 result = format_phrase_no_history(words, fmtrs)
48
49 # Add result to history.
50 global formatted_phrase_history
51 formatted_phrase_history.insert(0, result)
52 formatted_phrase_history = formatted_phrase_history[
53 :formatted_phrase_history_length
54 ]
55
56 return result
57
58
59 def format_phrase_no_history(word_list, fmtrs: str):
60 fmtr_list = fmtrs.split(",")
61 words = []
62 spaces = True
63 for i, w in enumerate(word_list):
64 for name in reversed(fmtr_list):
65 smash, func = all_formatters[name]
66 w = func(i, w, i == len(word_list) - 1)
67 spaces = spaces and not smash
68 words.append(w)
69 sep = " " if spaces else ""
70 return sep.join(words)
71
72
73 NOSEP = True
74 SEP = False
75
76
77 def words_with_joiner(joiner):
78 """Pass through words unchanged, but add a separator between them."""
79
80 def formatter_function(i, word, _):
81 return word if i == 0 else joiner + word
82
83 return (NOSEP, formatter_function)
84
85
86 def first_vs_rest(first_func, rest_func=lambda w: w):
87 """Supply one or two transformer functions for the first and rest of
88 words respectively.
89
90 Leave second argument out if you want all but the first word to be passed
91 through unchanged.
92 Set first argument to None if you want the first word to be passed
93 through unchanged."""
94 if first_func is None:
95 first_func = lambda w: w
96
97 def formatter_function(i, word, _):
98 return first_func(word) if i == 0 else rest_func(word)
99
100 return formatter_function
101
102
103 def every_word(word_func):
104 """Apply one function to every word."""
105
106 def formatter_function(i, word, _):
107 return word_func(word)
108
109 return formatter_function
110
111
112 formatters_dict = {
113 "NOOP": (SEP, lambda i, word, _: word),
114 "DOUBLE_UNDERSCORE": (NOSEP, first_vs_rest(lambda w: "__%s__" % w)),
115 "PRIVATE_CAMEL_CASE": (NOSEP, first_vs_rest(lambda w: w, lambda w: w.capitalize())),
116 "PROTECTED_CAMEL_CASE": (
117 NOSEP,
118 first_vs_rest(lambda w: w, lambda w: w.capitalize()),
119 ),
120 "PUBLIC_CAMEL_CASE": (NOSEP, every_word(lambda w: w.capitalize())),
121 "SNAKE_CASE": (
122 NOSEP,
123 first_vs_rest(lambda w: w.lower(), lambda w: "_" + w.lower()),
124 ),
125 "NO_SPACES": (NOSEP, every_word(lambda w: w)),
126 "DASH_SEPARATED": words_with_joiner("-"),
127 "TERMINAL_DASH_SEPARATED": (
128 NOSEP,
129 first_vs_rest(lambda w: " --" + w.lower(), lambda w: "-" + w.lower()),
130 ),
131 "DOUBLE_COLON_SEPARATED": words_with_joiner("::"),
132 "ALL_CAPS": (SEP, every_word(lambda w: w.upper())),
133 "ALL_LOWERCASE": (SEP, every_word(lambda w: w.lower())),
134 "DOUBLE_QUOTED_STRING": (SEP, surround('"')),
135 "SINGLE_QUOTED_STRING": (SEP, surround("'")),
136 "SPACE_SURROUNDED_STRING": (SEP, surround(" ")),
137 "DOT_SEPARATED": words_with_joiner("."),
138 "DOT_SNAKE": (NOSEP, lambda i, word, _: "." + word if i == 0 else "_" + word),
139 "SLASH_SEPARATED": (NOSEP, every_word(lambda w: "/" + w)),
140 "CAPITALIZE_FIRST_WORD": (SEP, first_vs_rest(lambda w: w.capitalize())),
141 "CAPITALIZE_ALL_WORDS": (
142 SEP,
143 lambda i, word, _: word.capitalize()
144 if i == 0 or word not in words_to_keep_lowercase
145 else word,
146 ),
147 "FIRST_THREE": (NOSEP, lambda i, word, _: word[0:3]),
148 "FIRST_FOUR": (NOSEP, lambda i, word, _: word[0:4]),
149 "FIRST_FIVE": (NOSEP, lambda i, word, _: word[0:5]),
150 }
151
152 # This is the mapping from spoken phrases to formatters
153 formatters_words = {
154 "allcaps": formatters_dict["ALL_CAPS"],
155 "alldown": formatters_dict["ALL_LOWERCASE"],
156 "camel": formatters_dict["PRIVATE_CAMEL_CASE"],
157 "dotted": formatters_dict["DOT_SEPARATED"],
158 "dubstring": formatters_dict["DOUBLE_QUOTED_STRING"],
159 "dunder": formatters_dict["DOUBLE_UNDERSCORE"],
160 "hammer": formatters_dict["PUBLIC_CAMEL_CASE"],
161 "kebab": formatters_dict["DASH_SEPARATED"],
162 "packed": formatters_dict["DOUBLE_COLON_SEPARATED"],
163 "padded": formatters_dict["SPACE_SURROUNDED_STRING"],
164 # "say": formatters_dict["NOOP"],
165 "sentence": formatters_dict["CAPITALIZE_FIRST_WORD"],
166 "slasher": formatters_dict["SLASH_SEPARATED"],
167 "smash": formatters_dict["NO_SPACES"],
168 "snake": formatters_dict["SNAKE_CASE"],
169 # "speak": formatters_dict["NOOP"],
170 "string": formatters_dict["SINGLE_QUOTED_STRING"],
171 "title": formatters_dict["CAPITALIZE_ALL_WORDS"],
172 # disable a few formatters for now
173 # "tree": formatters_dict["FIRST_THREE"],
174 # "quad": formatters_dict["FIRST_FOUR"],
175 # "fiver": formatters_dict["FIRST_FIVE"],
176 }
177
178 all_formatters = {}
179 all_formatters.update(formatters_dict)
180 all_formatters.update(formatters_words)
181
182 mod = Module()
183 mod.list("formatters", desc="list of formatters")
184
185
186 @mod.capture
187 def formatters(m) -> str:
188 "Returns a comma-separated string of formatters e.g. 'SNAKE,DUBSTRING'"
189
190
191 @mod.capture
192 def format_text(m) -> str:
193 "Formats the text and returns a string"
194
195
196 @mod.action_class
197 class Actions:
198 def formatted_text(phrase: Union[str, Phrase], formatters: str) -> str:
199 """Formats a phrase according to formatters. formatters is a comma-separated string of formatters (e.g. 'CAPITALIZE_ALL_WORDS,DOUBLE_QUOTED_STRING')"""
200 return format_phrase(phrase, formatters)
201
202 def formatters_help_toggle():
203 """Lists all formatters"""
204 if gui.showing:
205 gui.hide()
206 else:
207 gui.show()
208
209 def formatters_recent_toggle():
210 """Toggles list of recent formatters"""
211 if recent_gui.showing:
212 recent_gui.hide()
213 else:
214 recent_gui.show()
215
216 def formatters_recent_select(number: int):
217 """Inserts a recent formatter"""
218 if len(formatted_phrase_history) >= number:
219 return formatted_phrase_history[number - 1]
220 return ""
221
222 def formatters_clear_last():
223 """Clears the last formatted phrase"""
224 if len(formatted_phrase_history) > 0:
225 for character in formatted_phrase_history[0]:
226 actions.edit.delete()
227
228 def formatters_reformat_last(formatters: str) -> str:
229 """Reformats last formatted phrase"""
230 global last_phrase
231 return format_phrase(last_phrase, formatters)
232
233 def formatters_reformat_selection(formatters: str) -> str:
234 """Reformats the current selection."""
235 selected = edit.selected_text()
236 unformatted = re.sub(r"[^a-zA-Z0-9]+", " ", selected).lower()
237 # TODO: Separate out camelcase & studleycase vars
238
239 # Delete separately for compatibility with programs that don't overwrite
240 # selected text (e.g. Emacs)
241 edit.delete()
242 text = actions.self.formatted_text(unformatted, formatters)
243 actions.insert(text)
244 return text
245
246
247 @ctx.capture(rule="{self.formatters}+")
248 def formatters(m):
249 return ",".join(m.formatters_list)
250
251
252 @ctx.capture(rule="<self.formatters> <user.text>")
253 def format_text(m):
254 return format_phrase(m.text, m.formatters)
255
256
257 ctx.lists["self.formatters"] = formatters_words.keys()
258
259
260 @imgui.open(software=False)
261 def gui(gui: imgui.GUI):
262 gui.text("List formatters")
263 gui.line()
264 for name in sorted(set(formatters_words.keys())):
265 gui.text(f"{name} | {format_phrase_no_history(['one', 'two', 'three'], name)}")
266
267
268 @imgui.open(software=False)
269 def recent_gui(gui: imgui.GUI):
270 gui.text("Recent formatters")
271 gui.line()
272 for index, result in enumerate(formatted_phrase_history, 1):
273 gui.text("{}. {}".format(index, result))
274
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/code/formatters.py b/code/formatters.py
--- a/code/formatters.py
+++ b/code/formatters.py
@@ -193,6 +193,22 @@
"Formats the text and returns a string"
+class ImmuneString(object):
+ """Wrapper that makes a string immune from formatting."""
+
+ def __init__(self, string):
+ self.string = string
+
+
[email protected]
+def formatter_immune(m) -> ImmuneString:
+ """Text that can be interspersed into a formatter, e.g. characters.
+
+ It will be inserted directly, without being formatted.
+
+ """
+
+
@mod.action_class
class Actions:
def formatted_text(phrase: Union[str, Phrase], formatters: str) -> str:
@@ -243,15 +259,45 @@
actions.insert(text)
return text
+ def insert_many(strings: List[str]) -> None:
+ """Insert a list of strings, sequentially."""
+ for string in strings:
+ actions.insert(string)
+
@ctx.capture(rule="{self.formatters}+")
def formatters(m):
return ",".join(m.formatters_list)
[email protected](rule="<self.formatters> <user.text>")
[email protected](
+ # Add anything else into this that you want to be able to speak during a
+ # formatter.
+ rule="(<user.symbol_key> | <user.letter> | numb <number>)"
+)
+def formatter_immune(m) -> ImmuneString:
+ if hasattr(m, "number"):
+ value = m.number
+ else:
+ value = m[0]
+ return ImmuneString(str(value))
+
+
[email protected](
+ # Note that if the user speaks something like "snake dot", it will
+ # insert "dot" - otherwise, they wouldn't be able to insert punctuation
+ # words directly.
+ rule="<self.formatters> <user.text> (<user.text> | <user.formatter_immune>)*"
+)
def format_text(m):
- return format_phrase(m.text, m.formatters)
+ out = ""
+ formatters = m[0]
+ for chunk in m[1:]:
+ if isinstance(chunk, ImmuneString):
+ out += chunk.string
+ else:
+ out += format_phrase(chunk, formatters)
+ return out
ctx.lists["self.formatters"] = formatters_words.keys()
| {"golden_diff": "diff --git a/code/formatters.py b/code/formatters.py\n--- a/code/formatters.py\n+++ b/code/formatters.py\n@@ -193,6 +193,22 @@\n \"Formats the text and returns a string\"\n \n \n+class ImmuneString(object):\n+ \"\"\"Wrapper that makes a string immune from formatting.\"\"\"\n+\n+ def __init__(self, string):\n+ self.string = string\n+\n+\[email protected]\n+def formatter_immune(m) -> ImmuneString:\n+ \"\"\"Text that can be interspersed into a formatter, e.g. characters.\n+\n+ It will be inserted directly, without being formatted.\n+\n+ \"\"\"\n+\n+\n @mod.action_class\n class Actions:\n def formatted_text(phrase: Union[str, Phrase], formatters: str) -> str:\n@@ -243,15 +259,45 @@\n actions.insert(text)\n return text\n \n+ def insert_many(strings: List[str]) -> None:\n+ \"\"\"Insert a list of strings, sequentially.\"\"\"\n+ for string in strings:\n+ actions.insert(string)\n+\n \n @ctx.capture(rule=\"{self.formatters}+\")\n def formatters(m):\n return \",\".join(m.formatters_list)\n \n \[email protected](rule=\"<self.formatters> <user.text>\")\[email protected](\n+ # Add anything else into this that you want to be able to speak during a\n+ # formatter.\n+ rule=\"(<user.symbol_key> | <user.letter> | numb <number>)\"\n+)\n+def formatter_immune(m) -> ImmuneString:\n+ if hasattr(m, \"number\"):\n+ value = m.number\n+ else:\n+ value = m[0]\n+ return ImmuneString(str(value))\n+\n+\[email protected](\n+ # Note that if the user speaks something like \"snake dot\", it will\n+ # insert \"dot\" - otherwise, they wouldn't be able to insert punctuation\n+ # words directly.\n+ rule=\"<self.formatters> <user.text> (<user.text> | <user.formatter_immune>)*\"\n+)\n def format_text(m):\n- return format_phrase(m.text, m.formatters)\n+ out = \"\"\n+ formatters = m[0]\n+ for chunk in m[1:]:\n+ if isinstance(chunk, ImmuneString):\n+ out += chunk.string\n+ else:\n+ out += format_phrase(chunk, formatters)\n+ return out\n \n \n ctx.lists[\"self.formatters\"] = formatters_words.keys()\n", "issue": "feature request: `phrase hello slash world` -> `hello/world`\nI'm not sure what the best way to go about this is, it might be simple.\r\n\r\nsome more examples/unit tests:\r\n\r\n- `phrase hello slash world` -> `hello/world` (currently `hello /world`)\r\n- `phrase hello slash snake name of file slash world` -> `hello/name_of_file/world` (currently `hello/name_of_file_/world`)\r\n\r\nSimilarly: `phrase file dot extension` -> `file.extension`, though this one might be complicated by the fact that I have a formatter called `dot` defined so that `dot hello world` -> `.hello_world` because I use python so much.\n", "before_files": [{"content": "from talon import Module, Context, actions, ui, imgui\nfrom talon.grammar import Phrase\nfrom typing import List, Union\nimport re\n\nctx = Context()\nkey = actions.key\nedit = actions.edit\n\nwords_to_keep_lowercase = \"a,an,the,at,by,for,in,is,of,on,to,up,and,as,but,or,nor\".split(\n \",\"\n)\n\n# last_phrase has the last phrase spoken, WITHOUT formatting.\n# This is needed for reformatting.\nlast_phrase = \"\"\n\n# formatted_phrase_history keeps the most recent formatted phrases, WITH formatting.\nformatted_phrase_history = []\nformatted_phrase_history_length = 20\n\n\ndef surround(by):\n def func(i, word, last):\n if i == 0:\n word = by + word\n if last:\n word += by\n return word\n\n return func\n\n\ndef format_phrase(m: Union[str, Phrase], fmtrs: str):\n global last_phrase\n last_phrase = m\n words = []\n if isinstance(m, str):\n words = m.split(\" \")\n else:\n if m.words[-1] == \"over\":\n m.words = m.words[:-1]\n\n words = actions.dictate.parse_words(m)\n words = actions.dictate.replace_words(words)\n\n result = format_phrase_no_history(words, fmtrs)\n\n # Add result to history.\n global formatted_phrase_history\n formatted_phrase_history.insert(0, result)\n formatted_phrase_history = formatted_phrase_history[\n :formatted_phrase_history_length\n ]\n\n return result\n\n\ndef format_phrase_no_history(word_list, fmtrs: str):\n fmtr_list = fmtrs.split(\",\")\n words = []\n spaces = True\n for i, w in enumerate(word_list):\n for name in reversed(fmtr_list):\n smash, func = all_formatters[name]\n w = func(i, w, i == len(word_list) - 1)\n spaces = spaces and not smash\n words.append(w)\n sep = \" \" if spaces else \"\"\n return sep.join(words)\n\n\nNOSEP = True\nSEP = False\n\n\ndef words_with_joiner(joiner):\n \"\"\"Pass through words unchanged, but add a separator between them.\"\"\"\n\n def formatter_function(i, word, _):\n return word if i == 0 else joiner + word\n\n return (NOSEP, formatter_function)\n\n\ndef first_vs_rest(first_func, rest_func=lambda w: w):\n \"\"\"Supply one or two transformer functions for the first and rest of\n words respectively.\n\n Leave second argument out if you want all but the first word to be passed\n through unchanged.\n Set first argument to None if you want the first word to be passed\n through unchanged.\"\"\"\n if first_func is None:\n first_func = lambda w: w\n\n def formatter_function(i, word, _):\n return first_func(word) if i == 0 else rest_func(word)\n\n return formatter_function\n\n\ndef every_word(word_func):\n \"\"\"Apply one function to every word.\"\"\"\n\n def formatter_function(i, word, _):\n return word_func(word)\n\n return formatter_function\n\n\nformatters_dict = {\n \"NOOP\": (SEP, lambda i, word, _: word),\n \"DOUBLE_UNDERSCORE\": (NOSEP, first_vs_rest(lambda w: \"__%s__\" % w)),\n \"PRIVATE_CAMEL_CASE\": (NOSEP, first_vs_rest(lambda w: w, lambda w: w.capitalize())),\n \"PROTECTED_CAMEL_CASE\": (\n NOSEP,\n first_vs_rest(lambda w: w, lambda w: w.capitalize()),\n ),\n \"PUBLIC_CAMEL_CASE\": (NOSEP, every_word(lambda w: w.capitalize())),\n \"SNAKE_CASE\": (\n NOSEP,\n first_vs_rest(lambda w: w.lower(), lambda w: \"_\" + w.lower()),\n ),\n \"NO_SPACES\": (NOSEP, every_word(lambda w: w)),\n \"DASH_SEPARATED\": words_with_joiner(\"-\"),\n \"TERMINAL_DASH_SEPARATED\": (\n NOSEP,\n first_vs_rest(lambda w: \" --\" + w.lower(), lambda w: \"-\" + w.lower()),\n ),\n \"DOUBLE_COLON_SEPARATED\": words_with_joiner(\"::\"),\n \"ALL_CAPS\": (SEP, every_word(lambda w: w.upper())),\n \"ALL_LOWERCASE\": (SEP, every_word(lambda w: w.lower())),\n \"DOUBLE_QUOTED_STRING\": (SEP, surround('\"')),\n \"SINGLE_QUOTED_STRING\": (SEP, surround(\"'\")),\n \"SPACE_SURROUNDED_STRING\": (SEP, surround(\" \")),\n \"DOT_SEPARATED\": words_with_joiner(\".\"),\n \"DOT_SNAKE\": (NOSEP, lambda i, word, _: \".\" + word if i == 0 else \"_\" + word),\n \"SLASH_SEPARATED\": (NOSEP, every_word(lambda w: \"/\" + w)),\n \"CAPITALIZE_FIRST_WORD\": (SEP, first_vs_rest(lambda w: w.capitalize())),\n \"CAPITALIZE_ALL_WORDS\": (\n SEP,\n lambda i, word, _: word.capitalize()\n if i == 0 or word not in words_to_keep_lowercase\n else word,\n ),\n \"FIRST_THREE\": (NOSEP, lambda i, word, _: word[0:3]),\n \"FIRST_FOUR\": (NOSEP, lambda i, word, _: word[0:4]),\n \"FIRST_FIVE\": (NOSEP, lambda i, word, _: word[0:5]),\n}\n\n# This is the mapping from spoken phrases to formatters\nformatters_words = {\n \"allcaps\": formatters_dict[\"ALL_CAPS\"],\n \"alldown\": formatters_dict[\"ALL_LOWERCASE\"],\n \"camel\": formatters_dict[\"PRIVATE_CAMEL_CASE\"],\n \"dotted\": formatters_dict[\"DOT_SEPARATED\"],\n \"dubstring\": formatters_dict[\"DOUBLE_QUOTED_STRING\"],\n \"dunder\": formatters_dict[\"DOUBLE_UNDERSCORE\"],\n \"hammer\": formatters_dict[\"PUBLIC_CAMEL_CASE\"],\n \"kebab\": formatters_dict[\"DASH_SEPARATED\"],\n \"packed\": formatters_dict[\"DOUBLE_COLON_SEPARATED\"],\n \"padded\": formatters_dict[\"SPACE_SURROUNDED_STRING\"],\n # \"say\": formatters_dict[\"NOOP\"],\n \"sentence\": formatters_dict[\"CAPITALIZE_FIRST_WORD\"],\n \"slasher\": formatters_dict[\"SLASH_SEPARATED\"],\n \"smash\": formatters_dict[\"NO_SPACES\"],\n \"snake\": formatters_dict[\"SNAKE_CASE\"],\n # \"speak\": formatters_dict[\"NOOP\"],\n \"string\": formatters_dict[\"SINGLE_QUOTED_STRING\"],\n \"title\": formatters_dict[\"CAPITALIZE_ALL_WORDS\"],\n # disable a few formatters for now\n # \"tree\": formatters_dict[\"FIRST_THREE\"],\n # \"quad\": formatters_dict[\"FIRST_FOUR\"],\n # \"fiver\": formatters_dict[\"FIRST_FIVE\"],\n}\n\nall_formatters = {}\nall_formatters.update(formatters_dict)\nall_formatters.update(formatters_words)\n\nmod = Module()\nmod.list(\"formatters\", desc=\"list of formatters\")\n\n\[email protected]\ndef formatters(m) -> str:\n \"Returns a comma-separated string of formatters e.g. 'SNAKE,DUBSTRING'\"\n\n\[email protected]\ndef format_text(m) -> str:\n \"Formats the text and returns a string\"\n\n\[email protected]_class\nclass Actions:\n def formatted_text(phrase: Union[str, Phrase], formatters: str) -> str:\n \"\"\"Formats a phrase according to formatters. formatters is a comma-separated string of formatters (e.g. 'CAPITALIZE_ALL_WORDS,DOUBLE_QUOTED_STRING')\"\"\"\n return format_phrase(phrase, formatters)\n\n def formatters_help_toggle():\n \"\"\"Lists all formatters\"\"\"\n if gui.showing:\n gui.hide()\n else:\n gui.show()\n\n def formatters_recent_toggle():\n \"\"\"Toggles list of recent formatters\"\"\"\n if recent_gui.showing:\n recent_gui.hide()\n else:\n recent_gui.show()\n\n def formatters_recent_select(number: int):\n \"\"\"Inserts a recent formatter\"\"\"\n if len(formatted_phrase_history) >= number:\n return formatted_phrase_history[number - 1]\n return \"\"\n\n def formatters_clear_last():\n \"\"\"Clears the last formatted phrase\"\"\"\n if len(formatted_phrase_history) > 0:\n for character in formatted_phrase_history[0]:\n actions.edit.delete()\n\n def formatters_reformat_last(formatters: str) -> str:\n \"\"\"Reformats last formatted phrase\"\"\"\n global last_phrase\n return format_phrase(last_phrase, formatters)\n\n def formatters_reformat_selection(formatters: str) -> str:\n \"\"\"Reformats the current selection.\"\"\"\n selected = edit.selected_text()\n unformatted = re.sub(r\"[^a-zA-Z0-9]+\", \" \", selected).lower()\n # TODO: Separate out camelcase & studleycase vars\n\n # Delete separately for compatibility with programs that don't overwrite\n # selected text (e.g. Emacs)\n edit.delete()\n text = actions.self.formatted_text(unformatted, formatters)\n actions.insert(text)\n return text\n\n\[email protected](rule=\"{self.formatters}+\")\ndef formatters(m):\n return \",\".join(m.formatters_list)\n\n\[email protected](rule=\"<self.formatters> <user.text>\")\ndef format_text(m):\n return format_phrase(m.text, m.formatters)\n\n\nctx.lists[\"self.formatters\"] = formatters_words.keys()\n\n\[email protected](software=False)\ndef gui(gui: imgui.GUI):\n gui.text(\"List formatters\")\n gui.line()\n for name in sorted(set(formatters_words.keys())):\n gui.text(f\"{name} | {format_phrase_no_history(['one', 'two', 'three'], name)}\")\n\n\[email protected](software=False)\ndef recent_gui(gui: imgui.GUI):\n gui.text(\"Recent formatters\")\n gui.line()\n for index, result in enumerate(formatted_phrase_history, 1):\n gui.text(\"{}. {}\".format(index, result))\n", "path": "code/formatters.py"}], "after_files": [{"content": "from talon import Module, Context, actions, ui, imgui\nfrom talon.grammar import Phrase\nfrom typing import List, Union\nimport re\n\nctx = Context()\nkey = actions.key\nedit = actions.edit\n\nwords_to_keep_lowercase = \"a,an,the,at,by,for,in,is,of,on,to,up,and,as,but,or,nor\".split(\n \",\"\n)\n\n# last_phrase has the last phrase spoken, WITHOUT formatting.\n# This is needed for reformatting.\nlast_phrase = \"\"\n\n# formatted_phrase_history keeps the most recent formatted phrases, WITH formatting.\nformatted_phrase_history = []\nformatted_phrase_history_length = 20\n\n\ndef surround(by):\n def func(i, word, last):\n if i == 0:\n word = by + word\n if last:\n word += by\n return word\n\n return func\n\n\ndef format_phrase(m: Union[str, Phrase], fmtrs: str):\n global last_phrase\n last_phrase = m\n words = []\n if isinstance(m, str):\n words = m.split(\" \")\n else:\n if m.words[-1] == \"over\":\n m.words = m.words[:-1]\n\n words = actions.dictate.parse_words(m)\n words = actions.dictate.replace_words(words)\n\n result = format_phrase_no_history(words, fmtrs)\n\n # Add result to history.\n global formatted_phrase_history\n formatted_phrase_history.insert(0, result)\n formatted_phrase_history = formatted_phrase_history[\n :formatted_phrase_history_length\n ]\n\n return result\n\n\ndef format_phrase_no_history(word_list, fmtrs: str):\n fmtr_list = fmtrs.split(\",\")\n words = []\n spaces = True\n for i, w in enumerate(word_list):\n for name in reversed(fmtr_list):\n smash, func = all_formatters[name]\n w = func(i, w, i == len(word_list) - 1)\n spaces = spaces and not smash\n words.append(w)\n sep = \" \" if spaces else \"\"\n return sep.join(words)\n\n\nNOSEP = True\nSEP = False\n\n\ndef words_with_joiner(joiner):\n \"\"\"Pass through words unchanged, but add a separator between them.\"\"\"\n\n def formatter_function(i, word, _):\n return word if i == 0 else joiner + word\n\n return (NOSEP, formatter_function)\n\n\ndef first_vs_rest(first_func, rest_func=lambda w: w):\n \"\"\"Supply one or two transformer functions for the first and rest of\n words respectively.\n\n Leave second argument out if you want all but the first word to be passed\n through unchanged.\n Set first argument to None if you want the first word to be passed\n through unchanged.\"\"\"\n if first_func is None:\n first_func = lambda w: w\n\n def formatter_function(i, word, _):\n return first_func(word) if i == 0 else rest_func(word)\n\n return formatter_function\n\n\ndef every_word(word_func):\n \"\"\"Apply one function to every word.\"\"\"\n\n def formatter_function(i, word, _):\n return word_func(word)\n\n return formatter_function\n\n\nformatters_dict = {\n \"NOOP\": (SEP, lambda i, word, _: word),\n \"DOUBLE_UNDERSCORE\": (NOSEP, first_vs_rest(lambda w: \"__%s__\" % w)),\n \"PRIVATE_CAMEL_CASE\": (NOSEP, first_vs_rest(lambda w: w, lambda w: w.capitalize())),\n \"PROTECTED_CAMEL_CASE\": (\n NOSEP,\n first_vs_rest(lambda w: w, lambda w: w.capitalize()),\n ),\n \"PUBLIC_CAMEL_CASE\": (NOSEP, every_word(lambda w: w.capitalize())),\n \"SNAKE_CASE\": (\n NOSEP,\n first_vs_rest(lambda w: w.lower(), lambda w: \"_\" + w.lower()),\n ),\n \"NO_SPACES\": (NOSEP, every_word(lambda w: w)),\n \"DASH_SEPARATED\": words_with_joiner(\"-\"),\n \"TERMINAL_DASH_SEPARATED\": (\n NOSEP,\n first_vs_rest(lambda w: \" --\" + w.lower(), lambda w: \"-\" + w.lower()),\n ),\n \"DOUBLE_COLON_SEPARATED\": words_with_joiner(\"::\"),\n \"ALL_CAPS\": (SEP, every_word(lambda w: w.upper())),\n \"ALL_LOWERCASE\": (SEP, every_word(lambda w: w.lower())),\n \"DOUBLE_QUOTED_STRING\": (SEP, surround('\"')),\n \"SINGLE_QUOTED_STRING\": (SEP, surround(\"'\")),\n \"SPACE_SURROUNDED_STRING\": (SEP, surround(\" \")),\n \"DOT_SEPARATED\": words_with_joiner(\".\"),\n \"DOT_SNAKE\": (NOSEP, lambda i, word, _: \".\" + word if i == 0 else \"_\" + word),\n \"SLASH_SEPARATED\": (NOSEP, every_word(lambda w: \"/\" + w)),\n \"CAPITALIZE_FIRST_WORD\": (SEP, first_vs_rest(lambda w: w.capitalize())),\n \"CAPITALIZE_ALL_WORDS\": (\n SEP,\n lambda i, word, _: word.capitalize()\n if i == 0 or word not in words_to_keep_lowercase\n else word,\n ),\n \"FIRST_THREE\": (NOSEP, lambda i, word, _: word[0:3]),\n \"FIRST_FOUR\": (NOSEP, lambda i, word, _: word[0:4]),\n \"FIRST_FIVE\": (NOSEP, lambda i, word, _: word[0:5]),\n}\n\n# This is the mapping from spoken phrases to formatters\nformatters_words = {\n \"allcaps\": formatters_dict[\"ALL_CAPS\"],\n \"alldown\": formatters_dict[\"ALL_LOWERCASE\"],\n \"camel\": formatters_dict[\"PRIVATE_CAMEL_CASE\"],\n \"dotted\": formatters_dict[\"DOT_SEPARATED\"],\n \"dubstring\": formatters_dict[\"DOUBLE_QUOTED_STRING\"],\n \"dunder\": formatters_dict[\"DOUBLE_UNDERSCORE\"],\n \"hammer\": formatters_dict[\"PUBLIC_CAMEL_CASE\"],\n \"kebab\": formatters_dict[\"DASH_SEPARATED\"],\n \"packed\": formatters_dict[\"DOUBLE_COLON_SEPARATED\"],\n \"padded\": formatters_dict[\"SPACE_SURROUNDED_STRING\"],\n # \"say\": formatters_dict[\"NOOP\"],\n \"sentence\": formatters_dict[\"CAPITALIZE_FIRST_WORD\"],\n \"slasher\": formatters_dict[\"SLASH_SEPARATED\"],\n \"smash\": formatters_dict[\"NO_SPACES\"],\n \"snake\": formatters_dict[\"SNAKE_CASE\"],\n # \"speak\": formatters_dict[\"NOOP\"],\n \"string\": formatters_dict[\"SINGLE_QUOTED_STRING\"],\n \"title\": formatters_dict[\"CAPITALIZE_ALL_WORDS\"],\n # disable a few formatters for now\n # \"tree\": formatters_dict[\"FIRST_THREE\"],\n # \"quad\": formatters_dict[\"FIRST_FOUR\"],\n # \"fiver\": formatters_dict[\"FIRST_FIVE\"],\n}\n\nall_formatters = {}\nall_formatters.update(formatters_dict)\nall_formatters.update(formatters_words)\n\nmod = Module()\nmod.list(\"formatters\", desc=\"list of formatters\")\n\n\[email protected]\ndef formatters(m) -> str:\n \"Returns a comma-separated string of formatters e.g. 'SNAKE,DUBSTRING'\"\n\n\[email protected]\ndef format_text(m) -> str:\n \"Formats the text and returns a string\"\n\n\nclass ImmuneString(object):\n \"\"\"Wrapper that makes a string immune from formatting.\"\"\"\n\n def __init__(self, string):\n self.string = string\n\n\[email protected]\ndef formatter_immune(m) -> ImmuneString:\n \"\"\"Text that can be interspersed into a formatter, e.g. characters.\n\n It will be inserted directly, without being formatted.\n\n \"\"\"\n\n\[email protected]_class\nclass Actions:\n def formatted_text(phrase: Union[str, Phrase], formatters: str) -> str:\n \"\"\"Formats a phrase according to formatters. formatters is a comma-separated string of formatters (e.g. 'CAPITALIZE_ALL_WORDS,DOUBLE_QUOTED_STRING')\"\"\"\n return format_phrase(phrase, formatters)\n\n def formatters_help_toggle():\n \"\"\"Lists all formatters\"\"\"\n if gui.showing:\n gui.hide()\n else:\n gui.show()\n\n def formatters_recent_toggle():\n \"\"\"Toggles list of recent formatters\"\"\"\n if recent_gui.showing:\n recent_gui.hide()\n else:\n recent_gui.show()\n\n def formatters_recent_select(number: int):\n \"\"\"Inserts a recent formatter\"\"\"\n if len(formatted_phrase_history) >= number:\n return formatted_phrase_history[number - 1]\n return \"\"\n\n def formatters_clear_last():\n \"\"\"Clears the last formatted phrase\"\"\"\n if len(formatted_phrase_history) > 0:\n for character in formatted_phrase_history[0]:\n actions.edit.delete()\n\n def formatters_reformat_last(formatters: str) -> str:\n \"\"\"Reformats last formatted phrase\"\"\"\n global last_phrase\n return format_phrase(last_phrase, formatters)\n\n def formatters_reformat_selection(formatters: str) -> str:\n \"\"\"Reformats the current selection.\"\"\"\n selected = edit.selected_text()\n unformatted = re.sub(r\"[^a-zA-Z0-9]+\", \" \", selected).lower()\n # TODO: Separate out camelcase & studleycase vars\n\n # Delete separately for compatibility with programs that don't overwrite\n # selected text (e.g. Emacs)\n edit.delete()\n text = actions.self.formatted_text(unformatted, formatters)\n actions.insert(text)\n return text\n\n def insert_many(strings: List[str]) -> None:\n \"\"\"Insert a list of strings, sequentially.\"\"\"\n for string in strings:\n actions.insert(string)\n\n\[email protected](rule=\"{self.formatters}+\")\ndef formatters(m):\n return \",\".join(m.formatters_list)\n\n\[email protected](\n # Add anything else into this that you want to be able to speak during a\n # formatter.\n rule=\"(<user.symbol_key> | <user.letter> | numb <number>)\"\n)\ndef formatter_immune(m) -> ImmuneString:\n if hasattr(m, \"number\"):\n value = m.number\n else:\n value = m[0]\n return ImmuneString(str(value))\n\n\[email protected](\n # Note that if the user speaks something like \"snake dot\", it will\n # insert \"dot\" - otherwise, they wouldn't be able to insert punctuation\n # words directly.\n rule=\"<self.formatters> <user.text> (<user.text> | <user.formatter_immune>)*\"\n)\ndef format_text(m):\n out = \"\"\n formatters = m[0]\n for chunk in m[1:]:\n if isinstance(chunk, ImmuneString):\n out += chunk.string\n else:\n out += format_phrase(chunk, formatters)\n return out\n\n\nctx.lists[\"self.formatters\"] = formatters_words.keys()\n\n\[email protected](software=False)\ndef gui(gui: imgui.GUI):\n gui.text(\"List formatters\")\n gui.line()\n for name in sorted(set(formatters_words.keys())):\n gui.text(f\"{name} | {format_phrase_no_history(['one', 'two', 'three'], name)}\")\n\n\[email protected](software=False)\ndef recent_gui(gui: imgui.GUI):\n gui.text(\"Recent formatters\")\n gui.line()\n for index, result in enumerate(formatted_phrase_history, 1):\n gui.text(\"{}. {}\".format(index, result))\n", "path": "code/formatters.py"}]} | 3,326 | 551 |
gh_patches_debug_22889 | rasdani/github-patches | git_diff | ARM-DOE__ACT-500 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BUG: Fix QC bug in tests
Now CI is failing due to some bug on ubuntu systems
```bash
def test_qc_flag_description():
[32](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:33)
"""
[33](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:34)
This will check if the cleanup() method will correctly convert convert
[34](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:35)
flag_#_description to CF flag_masks and flag_meanings.
[35](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:36)
[36](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:37)
"""
[37](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:38)
[38](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:39)
ds = read_netcdf(EXAMPLE_CO2FLX4M)
[39](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:40)
ds.clean.cleanup()
[40](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:41)
qc_var_name = ds.qcfilter.check_for_ancillary_qc(
[41](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:42)
'momentum_flux', add_if_missing=False, cleanup=False
[42](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:43)
)
[43](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:44)
[44](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:45)
assert isinstance(ds[qc_var_name].attrs['flag_masks'], list)
[45](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:46)
assert isinstance(ds[qc_var_name].attrs['flag_meanings'], list)
[46](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:47)
assert isinstance(ds[qc_var_name].attrs['flag_assessments'], list)
[47](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:48)
assert ds[qc_var_name].attrs['standard_name'] == 'quality_flag'
[48](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:49)
[49](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:50)
assert len(ds[qc_var_name].attrs['flag_masks']) == 9
[50](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:51)
unique_flag_assessments = list({'Acceptable', 'Indeterminate', 'Bad'})
[51](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:52)
> assert list(set(ds[qc_var_name].attrs['flag_assessments'])) == unique_flag_assessments
[52](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:53)
E AssertionError: assert ['Indetermina...table', 'Bad'] == ['Indetermina... 'Acceptable']
[53](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:54)
E At index 1 diff: 'Acceptable' != 'Bad'
[54](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:55)
E Full diff:
[55](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:56)
E - ['Indeterminate', 'Bad', 'Acceptable']
[56](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:57)
E ? -------
[57](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:58)
E + ['Indeterminate', 'Acceptable', 'Bad']
[58](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:59)
E ? +++++++
[59](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:60)
[60](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:61)
act/tests/test_qc.py:814: AssertionError
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `act/discovery/get_armfiles.py`
Content:
```
1 """
2 Script for downloading data from ARM's Live Data Webservice
3
4 """
5
6 import argparse
7 import json
8 import os
9 import sys
10
11 try:
12 from urllib.request import urlopen
13 except ImportError:
14 from urllib import urlopen
15
16 from act.utils import date_parser
17
18
19 def download_data(username, token, datastream, startdate, enddate, time=None, output=None):
20 """
21 This tool will help users utilize the ARM Live Data Webservice to download
22 ARM data.
23
24 Parameters
25 ----------
26 username : str
27 The username to use for logging into the ADC archive.
28 token : str
29 The access token for accessing the ADC archive.
30 datastream : str
31 The name of the datastream to acquire.
32 startdate : str
33 The start date of the data to acquire. Formats accepted are
34 YYYY-MM-DD, DD.MM.YYYY, DD/MM/YYYY, YYYYMMDD, YYYY/MM/DD or
35 any of the previous formats with THH:MM:SS added onto the end
36 (ex. 2020-09-15T12:00:00).
37 enddate : str
38 The end date of the data to acquire. Formats accepted are
39 YYYY-MM-DD, DD.MM.YYYY, DD/MM/YYYY, YYYYMMDD or YYYY/MM/DD, or
40 any of the previous formats with THH:MM:SS added onto the end
41 (ex. 2020-09-15T13:00:00).
42 time: str or None
43 The specific time. Format is HHMMSS. Set to None to download all files
44 in the given date interval.
45 output : str
46 The output directory for the data. Set to None to make a folder in the
47 current working directory with the same name as *datastream* to place
48 the files in.
49
50 Returns
51 -------
52 files : list
53 Returns list of files retrieved
54
55 Notes
56 -----
57 This programmatic interface allows users to query and automate
58 machine-to-machine downloads of ARM data. This tool uses a REST URL and
59 specific parameters (saveData, query), user ID and access token, a
60 datastream name, a start date, and an end date, and data files matching
61 the criteria will be returned to the user and downloaded.
62
63 By using this web service, users can setup cron jobs and automatically
64 download data from /data/archive into their workspace. This will also
65 eliminate the manual step of following a link in an email to download data.
66 All other data files, which are not on the spinning
67 disk (on HPSS), will have to go through the regular ordering process.
68 More information about this REST API and tools can be found on `ARM Live
69 <https://adc.arm.gov/armlive/#scripts>`_.
70
71 To login/register for an access token click `here
72 <https://adc.arm.gov/armlive/livedata/home>`_.
73
74 Author: Michael Giansiracusa
75 Email: [email protected]
76
77 Web Tools Contact: Ranjeet Devarakonda [email protected]
78
79 Examples
80 --------
81 This code will download the netCDF files from the sgpmetE13.b1 datastream
82 and place them in a directory named sgpmetE13.b1. The data from 14 Jan to
83 20 Jan 2017 will be downloaded. Replace *userName* and *XXXXXXXXXXXXXXXX*
84 with your username and token for ARM Data Discovery. See the Notes for
85 information on how to obtain a username and token.
86
87 .. code-block:: python
88
89 act.discovery.download_data(
90 "userName", "XXXXXXXXXXXXXXXX", "sgpmetE13.b1", "2017-01-14", "2017-01-20"
91 )
92
93 """
94 # default start and end are empty
95 start, end = '', ''
96 # start and end strings for query_url are constructed
97 # if the arguments were provided
98 if startdate:
99 start = date_parser(startdate, output_format='%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
100 start = f'&start={start}'
101 if enddate:
102 end = date_parser(enddate, output_format='%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
103 end = f'&end={end}'
104 # build the url to query the web service using the arguments provided
105 query_url = (
106 'https://adc.arm.gov/armlive/livedata/query?' + 'user={0}&ds={1}{2}{3}&wt=json'
107 ).format(':'.join([username, token]), datastream, start, end)
108
109 # get url response, read the body of the message,
110 # and decode from bytes type to utf-8 string
111 response_body = urlopen(query_url).read().decode('utf-8')
112 # if the response is an html doc, then there was an error with the user
113 if response_body[1:14] == '!DOCTYPE html':
114 raise ConnectionRefusedError('Error with user. Check username or token.')
115
116 # parse into json object
117 response_body_json = json.loads(response_body)
118
119 # construct output directory
120 if output:
121 # output files to directory specified
122 output_dir = os.path.join(output)
123 else:
124 # if no folder given, add datastream folder
125 # to current working dir to prevent file mix-up
126 output_dir = os.path.join(os.getcwd(), datastream)
127
128 # not testing, response is successful and files were returned
129 if response_body_json is None:
130 print('ARM Data Live Webservice does not appear to be functioning')
131 return []
132
133 num_files = len(response_body_json['files'])
134 file_names = []
135 if response_body_json['status'] == 'success' and num_files > 0:
136 for fname in response_body_json['files']:
137 if time is not None:
138 if time not in fname:
139 continue
140 print(f'[DOWNLOADING] {fname}')
141 # construct link to web service saveData function
142 save_data_url = (
143 'https://adc.arm.gov/armlive/livedata/' + 'saveData?user={0}&file={1}'
144 ).format(':'.join([username, token]), fname)
145 output_file = os.path.join(output_dir, fname)
146 # make directory if it doesn't exist
147 if not os.path.isdir(output_dir):
148 os.makedirs(output_dir)
149 # create file and write bytes to file
150 with open(output_file, 'wb') as open_bytes_file:
151 open_bytes_file.write(urlopen(save_data_url).read())
152 file_names.append(output_file)
153 else:
154 print(
155 'No files returned or url status error.\n' 'Check datastream name, start, and end date.'
156 )
157
158 return file_names
159
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/act/discovery/get_armfiles.py b/act/discovery/get_armfiles.py
--- a/act/discovery/get_armfiles.py
+++ b/act/discovery/get_armfiles.py
@@ -7,6 +7,7 @@
import json
import os
import sys
+from datetime import timedelta
try:
from urllib.request import urlopen
@@ -96,10 +97,15 @@
# start and end strings for query_url are constructed
# if the arguments were provided
if startdate:
- start = date_parser(startdate, output_format='%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
+ start_datetime = date_parser(startdate, return_datetime=True)
+ start = start_datetime.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
start = f'&start={start}'
if enddate:
- end = date_parser(enddate, output_format='%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
+ end_datetime = date_parser(enddate, return_datetime=True)
+ # If the start and end date are the same, and a day to the end date
+ if start_datetime == end_datetime:
+ end_datetime += timedelta(days=1)
+ end = end_datetime.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
end = f'&end={end}'
# build the url to query the web service using the arguments provided
query_url = (
| {"golden_diff": "diff --git a/act/discovery/get_armfiles.py b/act/discovery/get_armfiles.py\n--- a/act/discovery/get_armfiles.py\n+++ b/act/discovery/get_armfiles.py\n@@ -7,6 +7,7 @@\n import json\n import os\n import sys\n+from datetime import timedelta\n \n try:\n from urllib.request import urlopen\n@@ -96,10 +97,15 @@\n # start and end strings for query_url are constructed\n # if the arguments were provided\n if startdate:\n- start = date_parser(startdate, output_format='%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'\n+ start_datetime = date_parser(startdate, return_datetime=True)\n+ start = start_datetime.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'\n start = f'&start={start}'\n if enddate:\n- end = date_parser(enddate, output_format='%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'\n+ end_datetime = date_parser(enddate, return_datetime=True)\n+ # If the start and end date are the same, and a day to the end date\n+ if start_datetime == end_datetime:\n+ end_datetime += timedelta(days=1)\n+ end = end_datetime.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'\n end = f'&end={end}'\n # build the url to query the web service using the arguments provided\n query_url = (\n", "issue": "BUG: Fix QC bug in tests\nNow CI is failing due to some bug on ubuntu systems\r\n\r\n```bash\r\ndef test_qc_flag_description():\r\n[32](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:33)\r\n \"\"\"\r\n[33](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:34)\r\n This will check if the cleanup() method will correctly convert convert\r\n[34](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:35)\r\n flag_#_description to CF flag_masks and flag_meanings.\r\n[35](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:36)\r\n \r\n[36](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:37)\r\n \"\"\"\r\n[37](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:38)\r\n \r\n[38](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:39)\r\n ds = read_netcdf(EXAMPLE_CO2FLX4M)\r\n[39](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:40)\r\n ds.clean.cleanup()\r\n[40](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:41)\r\n qc_var_name = ds.qcfilter.check_for_ancillary_qc(\r\n[41](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:42)\r\n 'momentum_flux', add_if_missing=False, cleanup=False\r\n[42](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:43)\r\n )\r\n[43](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:44)\r\n \r\n[44](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:45)\r\n assert isinstance(ds[qc_var_name].attrs['flag_masks'], list)\r\n[45](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:46)\r\n assert isinstance(ds[qc_var_name].attrs['flag_meanings'], list)\r\n[46](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:47)\r\n assert isinstance(ds[qc_var_name].attrs['flag_assessments'], list)\r\n[47](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:48)\r\n assert ds[qc_var_name].attrs['standard_name'] == 'quality_flag'\r\n[48](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:49)\r\n \r\n[49](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:50)\r\n assert len(ds[qc_var_name].attrs['flag_masks']) == 9\r\n[50](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:51)\r\n unique_flag_assessments = list({'Acceptable', 'Indeterminate', 'Bad'})\r\n[51](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:52)\r\n> assert list(set(ds[qc_var_name].attrs['flag_assessments'])) == unique_flag_assessments\r\n[52](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:53)\r\nE AssertionError: assert ['Indetermina...table', 'Bad'] == ['Indetermina... 'Acceptable']\r\n[53](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:54)\r\nE At index 1 diff: 'Acceptable' != 'Bad'\r\n[54](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:55)\r\nE Full diff:\r\n[55](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:56)\r\nE - ['Indeterminate', 'Bad', 'Acceptable']\r\n[56](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:57)\r\nE ? -------\r\n[57](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:58)\r\nE + ['Indeterminate', 'Acceptable', 'Bad']\r\n[58](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:59)\r\nE ? +++++++\r\n[59](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:60)\r\n\r\n[60](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:61)\r\nact/tests/test_qc.py:814: AssertionError\r\n```\n", "before_files": [{"content": "\"\"\"\nScript for downloading data from ARM's Live Data Webservice\n\n\"\"\"\n\nimport argparse\nimport json\nimport os\nimport sys\n\ntry:\n from urllib.request import urlopen\nexcept ImportError:\n from urllib import urlopen\n\nfrom act.utils import date_parser\n\n\ndef download_data(username, token, datastream, startdate, enddate, time=None, output=None):\n \"\"\"\n This tool will help users utilize the ARM Live Data Webservice to download\n ARM data.\n\n Parameters\n ----------\n username : str\n The username to use for logging into the ADC archive.\n token : str\n The access token for accessing the ADC archive.\n datastream : str\n The name of the datastream to acquire.\n startdate : str\n The start date of the data to acquire. Formats accepted are\n YYYY-MM-DD, DD.MM.YYYY, DD/MM/YYYY, YYYYMMDD, YYYY/MM/DD or\n any of the previous formats with THH:MM:SS added onto the end\n (ex. 2020-09-15T12:00:00).\n enddate : str\n The end date of the data to acquire. Formats accepted are\n YYYY-MM-DD, DD.MM.YYYY, DD/MM/YYYY, YYYYMMDD or YYYY/MM/DD, or\n any of the previous formats with THH:MM:SS added onto the end\n (ex. 2020-09-15T13:00:00).\n time: str or None\n The specific time. Format is HHMMSS. Set to None to download all files\n in the given date interval.\n output : str\n The output directory for the data. Set to None to make a folder in the\n current working directory with the same name as *datastream* to place\n the files in.\n\n Returns\n -------\n files : list\n Returns list of files retrieved\n\n Notes\n -----\n This programmatic interface allows users to query and automate\n machine-to-machine downloads of ARM data. This tool uses a REST URL and\n specific parameters (saveData, query), user ID and access token, a\n datastream name, a start date, and an end date, and data files matching\n the criteria will be returned to the user and downloaded.\n\n By using this web service, users can setup cron jobs and automatically\n download data from /data/archive into their workspace. This will also\n eliminate the manual step of following a link in an email to download data.\n All other data files, which are not on the spinning\n disk (on HPSS), will have to go through the regular ordering process.\n More information about this REST API and tools can be found on `ARM Live\n <https://adc.arm.gov/armlive/#scripts>`_.\n\n To login/register for an access token click `here\n <https://adc.arm.gov/armlive/livedata/home>`_.\n\n Author: Michael Giansiracusa\n Email: [email protected]\n\n Web Tools Contact: Ranjeet Devarakonda [email protected]\n\n Examples\n --------\n This code will download the netCDF files from the sgpmetE13.b1 datastream\n and place them in a directory named sgpmetE13.b1. The data from 14 Jan to\n 20 Jan 2017 will be downloaded. Replace *userName* and *XXXXXXXXXXXXXXXX*\n with your username and token for ARM Data Discovery. See the Notes for\n information on how to obtain a username and token.\n\n .. code-block:: python\n\n act.discovery.download_data(\n \"userName\", \"XXXXXXXXXXXXXXXX\", \"sgpmetE13.b1\", \"2017-01-14\", \"2017-01-20\"\n )\n\n \"\"\"\n # default start and end are empty\n start, end = '', ''\n # start and end strings for query_url are constructed\n # if the arguments were provided\n if startdate:\n start = date_parser(startdate, output_format='%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'\n start = f'&start={start}'\n if enddate:\n end = date_parser(enddate, output_format='%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'\n end = f'&end={end}'\n # build the url to query the web service using the arguments provided\n query_url = (\n 'https://adc.arm.gov/armlive/livedata/query?' + 'user={0}&ds={1}{2}{3}&wt=json'\n ).format(':'.join([username, token]), datastream, start, end)\n\n # get url response, read the body of the message,\n # and decode from bytes type to utf-8 string\n response_body = urlopen(query_url).read().decode('utf-8')\n # if the response is an html doc, then there was an error with the user\n if response_body[1:14] == '!DOCTYPE html':\n raise ConnectionRefusedError('Error with user. Check username or token.')\n\n # parse into json object\n response_body_json = json.loads(response_body)\n\n # construct output directory\n if output:\n # output files to directory specified\n output_dir = os.path.join(output)\n else:\n # if no folder given, add datastream folder\n # to current working dir to prevent file mix-up\n output_dir = os.path.join(os.getcwd(), datastream)\n\n # not testing, response is successful and files were returned\n if response_body_json is None:\n print('ARM Data Live Webservice does not appear to be functioning')\n return []\n\n num_files = len(response_body_json['files'])\n file_names = []\n if response_body_json['status'] == 'success' and num_files > 0:\n for fname in response_body_json['files']:\n if time is not None:\n if time not in fname:\n continue\n print(f'[DOWNLOADING] {fname}')\n # construct link to web service saveData function\n save_data_url = (\n 'https://adc.arm.gov/armlive/livedata/' + 'saveData?user={0}&file={1}'\n ).format(':'.join([username, token]), fname)\n output_file = os.path.join(output_dir, fname)\n # make directory if it doesn't exist\n if not os.path.isdir(output_dir):\n os.makedirs(output_dir)\n # create file and write bytes to file\n with open(output_file, 'wb') as open_bytes_file:\n open_bytes_file.write(urlopen(save_data_url).read())\n file_names.append(output_file)\n else:\n print(\n 'No files returned or url status error.\\n' 'Check datastream name, start, and end date.'\n )\n\n return file_names\n", "path": "act/discovery/get_armfiles.py"}], "after_files": [{"content": "\"\"\"\nScript for downloading data from ARM's Live Data Webservice\n\n\"\"\"\n\nimport argparse\nimport json\nimport os\nimport sys\nfrom datetime import timedelta\n\ntry:\n from urllib.request import urlopen\nexcept ImportError:\n from urllib import urlopen\n\nfrom act.utils import date_parser\n\n\ndef download_data(username, token, datastream, startdate, enddate, time=None, output=None):\n \"\"\"\n This tool will help users utilize the ARM Live Data Webservice to download\n ARM data.\n\n Parameters\n ----------\n username : str\n The username to use for logging into the ADC archive.\n token : str\n The access token for accessing the ADC archive.\n datastream : str\n The name of the datastream to acquire.\n startdate : str\n The start date of the data to acquire. Formats accepted are\n YYYY-MM-DD, DD.MM.YYYY, DD/MM/YYYY, YYYYMMDD, YYYY/MM/DD or\n any of the previous formats with THH:MM:SS added onto the end\n (ex. 2020-09-15T12:00:00).\n enddate : str\n The end date of the data to acquire. Formats accepted are\n YYYY-MM-DD, DD.MM.YYYY, DD/MM/YYYY, YYYYMMDD or YYYY/MM/DD, or\n any of the previous formats with THH:MM:SS added onto the end\n (ex. 2020-09-15T13:00:00).\n time: str or None\n The specific time. Format is HHMMSS. Set to None to download all files\n in the given date interval.\n output : str\n The output directory for the data. Set to None to make a folder in the\n current working directory with the same name as *datastream* to place\n the files in.\n\n Returns\n -------\n files : list\n Returns list of files retrieved\n\n Notes\n -----\n This programmatic interface allows users to query and automate\n machine-to-machine downloads of ARM data. This tool uses a REST URL and\n specific parameters (saveData, query), user ID and access token, a\n datastream name, a start date, and an end date, and data files matching\n the criteria will be returned to the user and downloaded.\n\n By using this web service, users can setup cron jobs and automatically\n download data from /data/archive into their workspace. This will also\n eliminate the manual step of following a link in an email to download data.\n All other data files, which are not on the spinning\n disk (on HPSS), will have to go through the regular ordering process.\n More information about this REST API and tools can be found on `ARM Live\n <https://adc.arm.gov/armlive/#scripts>`_.\n\n To login/register for an access token click `here\n <https://adc.arm.gov/armlive/livedata/home>`_.\n\n Author: Michael Giansiracusa\n Email: [email protected]\n\n Web Tools Contact: Ranjeet Devarakonda [email protected]\n\n Examples\n --------\n This code will download the netCDF files from the sgpmetE13.b1 datastream\n and place them in a directory named sgpmetE13.b1. The data from 14 Jan to\n 20 Jan 2017 will be downloaded. Replace *userName* and *XXXXXXXXXXXXXXXX*\n with your username and token for ARM Data Discovery. See the Notes for\n information on how to obtain a username and token.\n\n .. code-block:: python\n\n act.discovery.download_data(\n \"userName\", \"XXXXXXXXXXXXXXXX\", \"sgpmetE13.b1\", \"2017-01-14\", \"2017-01-20\"\n )\n\n \"\"\"\n # default start and end are empty\n start, end = '', ''\n # start and end strings for query_url are constructed\n # if the arguments were provided\n if startdate:\n start_datetime = date_parser(startdate, return_datetime=True)\n start = start_datetime.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'\n start = f'&start={start}'\n if enddate:\n end_datetime = date_parser(enddate, return_datetime=True)\n # If the start and end date are the same, and a day to the end date\n if start_datetime == end_datetime:\n end_datetime += timedelta(days=1)\n end = end_datetime.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'\n end = f'&end={end}'\n # build the url to query the web service using the arguments provided\n query_url = (\n 'https://adc.arm.gov/armlive/livedata/query?' + 'user={0}&ds={1}{2}{3}&wt=json'\n ).format(':'.join([username, token]), datastream, start, end)\n\n # get url response, read the body of the message,\n # and decode from bytes type to utf-8 string\n response_body = urlopen(query_url).read().decode('utf-8')\n # if the response is an html doc, then there was an error with the user\n if response_body[1:14] == '!DOCTYPE html':\n raise ConnectionRefusedError('Error with user. Check username or token.')\n\n # parse into json object\n response_body_json = json.loads(response_body)\n\n # construct output directory\n if output:\n # output files to directory specified\n output_dir = os.path.join(output)\n else:\n # if no folder given, add datastream folder\n # to current working dir to prevent file mix-up\n output_dir = os.path.join(os.getcwd(), datastream)\n\n # not testing, response is successful and files were returned\n if response_body_json is None:\n print('ARM Data Live Webservice does not appear to be functioning')\n return []\n\n num_files = len(response_body_json['files'])\n file_names = []\n if response_body_json['status'] == 'success' and num_files > 0:\n for fname in response_body_json['files']:\n if time is not None:\n if time not in fname:\n continue\n print(f'[DOWNLOADING] {fname}')\n # construct link to web service saveData function\n save_data_url = (\n 'https://adc.arm.gov/armlive/livedata/' + 'saveData?user={0}&file={1}'\n ).format(':'.join([username, token]), fname)\n output_file = os.path.join(output_dir, fname)\n # make directory if it doesn't exist\n if not os.path.isdir(output_dir):\n os.makedirs(output_dir)\n # create file and write bytes to file\n with open(output_file, 'wb') as open_bytes_file:\n open_bytes_file.write(urlopen(save_data_url).read())\n file_names.append(output_file)\n else:\n print(\n 'No files returned or url status error.\\n' 'Check datastream name, start, and end date.'\n )\n\n return file_names\n", "path": "act/discovery/get_armfiles.py"}]} | 3,701 | 351 |
gh_patches_debug_30993 | rasdani/github-patches | git_diff | PaddlePaddle__PaddleSpeech-1609 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[vec][search] update to paddlespeech model
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `demos/audio_searching/src/config.py`
Content:
```
1 # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import os
15
16 ############### Milvus Configuration ###############
17 MILVUS_HOST = os.getenv("MILVUS_HOST", "127.0.0.1")
18 MILVUS_PORT = int(os.getenv("MILVUS_PORT", "19530"))
19 VECTOR_DIMENSION = int(os.getenv("VECTOR_DIMENSION", "2048"))
20 INDEX_FILE_SIZE = int(os.getenv("INDEX_FILE_SIZE", "1024"))
21 METRIC_TYPE = os.getenv("METRIC_TYPE", "L2")
22 DEFAULT_TABLE = os.getenv("DEFAULT_TABLE", "audio_table")
23 TOP_K = int(os.getenv("TOP_K", "10"))
24
25 ############### MySQL Configuration ###############
26 MYSQL_HOST = os.getenv("MYSQL_HOST", "127.0.0.1")
27 MYSQL_PORT = int(os.getenv("MYSQL_PORT", "3306"))
28 MYSQL_USER = os.getenv("MYSQL_USER", "root")
29 MYSQL_PWD = os.getenv("MYSQL_PWD", "123456")
30 MYSQL_DB = os.getenv("MYSQL_DB", "mysql")
31
32 ############### Data Path ###############
33 UPLOAD_PATH = os.getenv("UPLOAD_PATH", "tmp/audio-data")
34
35 ############### Number of Log Files ###############
36 LOGS_NUM = int(os.getenv("logs_num", "0"))
37
```
Path: `demos/audio_searching/src/encode.py`
Content:
```
1 # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import os
15
16 import librosa
17 import numpy as np
18 from logs import LOGGER
19
20
21 def get_audio_embedding(path):
22 """
23 Use vpr_inference to generate embedding of audio
24 """
25 try:
26 RESAMPLE_RATE = 16000
27 audio, _ = librosa.load(path, sr=RESAMPLE_RATE, mono=True)
28
29 # TODO add infer/python interface to get embedding, now fake it by rand
30 # vpr = ECAPATDNN(checkpoint_path=None, device='cuda')
31 # embedding = vpr.inference(audio)
32 np.random.seed(hash(os.path.basename(path)) % 1000000)
33 embedding = np.random.rand(1, 2048)
34 embedding = embedding / np.linalg.norm(embedding)
35 embedding = embedding.tolist()[0]
36 return embedding
37 except Exception as e:
38 LOGGER.error(f"Error with embedding:{e}")
39 return None
40
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/demos/audio_searching/src/config.py b/demos/audio_searching/src/config.py
--- a/demos/audio_searching/src/config.py
+++ b/demos/audio_searching/src/config.py
@@ -16,7 +16,7 @@
############### Milvus Configuration ###############
MILVUS_HOST = os.getenv("MILVUS_HOST", "127.0.0.1")
MILVUS_PORT = int(os.getenv("MILVUS_PORT", "19530"))
-VECTOR_DIMENSION = int(os.getenv("VECTOR_DIMENSION", "2048"))
+VECTOR_DIMENSION = int(os.getenv("VECTOR_DIMENSION", "192"))
INDEX_FILE_SIZE = int(os.getenv("INDEX_FILE_SIZE", "1024"))
METRIC_TYPE = os.getenv("METRIC_TYPE", "L2")
DEFAULT_TABLE = os.getenv("DEFAULT_TABLE", "audio_table")
diff --git a/demos/audio_searching/src/encode.py b/demos/audio_searching/src/encode.py
--- a/demos/audio_searching/src/encode.py
+++ b/demos/audio_searching/src/encode.py
@@ -15,7 +15,12 @@
import librosa
import numpy as np
+from config import DEFAULT_TABLE
+
from logs import LOGGER
+from paddlespeech.cli import VectorExecutor
+
+vector_executor = VectorExecutor()
def get_audio_embedding(path):
@@ -23,16 +28,9 @@
Use vpr_inference to generate embedding of audio
"""
try:
- RESAMPLE_RATE = 16000
- audio, _ = librosa.load(path, sr=RESAMPLE_RATE, mono=True)
-
- # TODO add infer/python interface to get embedding, now fake it by rand
- # vpr = ECAPATDNN(checkpoint_path=None, device='cuda')
- # embedding = vpr.inference(audio)
- np.random.seed(hash(os.path.basename(path)) % 1000000)
- embedding = np.random.rand(1, 2048)
+ embedding = vector_executor(audio_file=path)
embedding = embedding / np.linalg.norm(embedding)
- embedding = embedding.tolist()[0]
+ embedding = embedding.tolist()
return embedding
except Exception as e:
LOGGER.error(f"Error with embedding:{e}")
| {"golden_diff": "diff --git a/demos/audio_searching/src/config.py b/demos/audio_searching/src/config.py\n--- a/demos/audio_searching/src/config.py\n+++ b/demos/audio_searching/src/config.py\n@@ -16,7 +16,7 @@\n ############### Milvus Configuration ###############\n MILVUS_HOST = os.getenv(\"MILVUS_HOST\", \"127.0.0.1\")\n MILVUS_PORT = int(os.getenv(\"MILVUS_PORT\", \"19530\"))\n-VECTOR_DIMENSION = int(os.getenv(\"VECTOR_DIMENSION\", \"2048\"))\n+VECTOR_DIMENSION = int(os.getenv(\"VECTOR_DIMENSION\", \"192\"))\n INDEX_FILE_SIZE = int(os.getenv(\"INDEX_FILE_SIZE\", \"1024\"))\n METRIC_TYPE = os.getenv(\"METRIC_TYPE\", \"L2\")\n DEFAULT_TABLE = os.getenv(\"DEFAULT_TABLE\", \"audio_table\")\ndiff --git a/demos/audio_searching/src/encode.py b/demos/audio_searching/src/encode.py\n--- a/demos/audio_searching/src/encode.py\n+++ b/demos/audio_searching/src/encode.py\n@@ -15,7 +15,12 @@\n \n import librosa\n import numpy as np\n+from config import DEFAULT_TABLE\n+\n from logs import LOGGER\n+from paddlespeech.cli import VectorExecutor\n+\n+vector_executor = VectorExecutor()\n \n \n def get_audio_embedding(path):\n@@ -23,16 +28,9 @@\n Use vpr_inference to generate embedding of audio\n \"\"\"\n try:\n- RESAMPLE_RATE = 16000\n- audio, _ = librosa.load(path, sr=RESAMPLE_RATE, mono=True)\n-\n- # TODO add infer/python interface to get embedding, now fake it by rand\n- # vpr = ECAPATDNN(checkpoint_path=None, device='cuda')\n- # embedding = vpr.inference(audio)\n- np.random.seed(hash(os.path.basename(path)) % 1000000)\n- embedding = np.random.rand(1, 2048)\n+ embedding = vector_executor(audio_file=path)\n embedding = embedding / np.linalg.norm(embedding)\n- embedding = embedding.tolist()[0]\n+ embedding = embedding.tolist()\n return embedding\n except Exception as e:\n LOGGER.error(f\"Error with embedding:{e}\")\n", "issue": "[vec][search] update to paddlespeech model\n\n", "before_files": [{"content": "# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\n\n############### Milvus Configuration ###############\nMILVUS_HOST = os.getenv(\"MILVUS_HOST\", \"127.0.0.1\")\nMILVUS_PORT = int(os.getenv(\"MILVUS_PORT\", \"19530\"))\nVECTOR_DIMENSION = int(os.getenv(\"VECTOR_DIMENSION\", \"2048\"))\nINDEX_FILE_SIZE = int(os.getenv(\"INDEX_FILE_SIZE\", \"1024\"))\nMETRIC_TYPE = os.getenv(\"METRIC_TYPE\", \"L2\")\nDEFAULT_TABLE = os.getenv(\"DEFAULT_TABLE\", \"audio_table\")\nTOP_K = int(os.getenv(\"TOP_K\", \"10\"))\n\n############### MySQL Configuration ###############\nMYSQL_HOST = os.getenv(\"MYSQL_HOST\", \"127.0.0.1\")\nMYSQL_PORT = int(os.getenv(\"MYSQL_PORT\", \"3306\"))\nMYSQL_USER = os.getenv(\"MYSQL_USER\", \"root\")\nMYSQL_PWD = os.getenv(\"MYSQL_PWD\", \"123456\")\nMYSQL_DB = os.getenv(\"MYSQL_DB\", \"mysql\")\n\n############### Data Path ###############\nUPLOAD_PATH = os.getenv(\"UPLOAD_PATH\", \"tmp/audio-data\")\n\n############### Number of Log Files ###############\nLOGS_NUM = int(os.getenv(\"logs_num\", \"0\"))\n", "path": "demos/audio_searching/src/config.py"}, {"content": "# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\n\nimport librosa\nimport numpy as np\nfrom logs import LOGGER\n\n\ndef get_audio_embedding(path):\n \"\"\"\n Use vpr_inference to generate embedding of audio\n \"\"\"\n try:\n RESAMPLE_RATE = 16000\n audio, _ = librosa.load(path, sr=RESAMPLE_RATE, mono=True)\n\n # TODO add infer/python interface to get embedding, now fake it by rand\n # vpr = ECAPATDNN(checkpoint_path=None, device='cuda')\n # embedding = vpr.inference(audio)\n np.random.seed(hash(os.path.basename(path)) % 1000000)\n embedding = np.random.rand(1, 2048)\n embedding = embedding / np.linalg.norm(embedding)\n embedding = embedding.tolist()[0]\n return embedding\n except Exception as e:\n LOGGER.error(f\"Error with embedding:{e}\")\n return None\n", "path": "demos/audio_searching/src/encode.py"}], "after_files": [{"content": "# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\n\n############### Milvus Configuration ###############\nMILVUS_HOST = os.getenv(\"MILVUS_HOST\", \"127.0.0.1\")\nMILVUS_PORT = int(os.getenv(\"MILVUS_PORT\", \"19530\"))\nVECTOR_DIMENSION = int(os.getenv(\"VECTOR_DIMENSION\", \"192\"))\nINDEX_FILE_SIZE = int(os.getenv(\"INDEX_FILE_SIZE\", \"1024\"))\nMETRIC_TYPE = os.getenv(\"METRIC_TYPE\", \"L2\")\nDEFAULT_TABLE = os.getenv(\"DEFAULT_TABLE\", \"audio_table\")\nTOP_K = int(os.getenv(\"TOP_K\", \"10\"))\n\n############### MySQL Configuration ###############\nMYSQL_HOST = os.getenv(\"MYSQL_HOST\", \"127.0.0.1\")\nMYSQL_PORT = int(os.getenv(\"MYSQL_PORT\", \"3306\"))\nMYSQL_USER = os.getenv(\"MYSQL_USER\", \"root\")\nMYSQL_PWD = os.getenv(\"MYSQL_PWD\", \"123456\")\nMYSQL_DB = os.getenv(\"MYSQL_DB\", \"mysql\")\n\n############### Data Path ###############\nUPLOAD_PATH = os.getenv(\"UPLOAD_PATH\", \"tmp/audio-data\")\n\n############### Number of Log Files ###############\nLOGS_NUM = int(os.getenv(\"logs_num\", \"0\"))\n", "path": "demos/audio_searching/src/config.py"}, {"content": "# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\n\nimport librosa\nimport numpy as np\nfrom config import DEFAULT_TABLE\n\nfrom logs import LOGGER\nfrom paddlespeech.cli import VectorExecutor\n\nvector_executor = VectorExecutor()\n\n\ndef get_audio_embedding(path):\n \"\"\"\n Use vpr_inference to generate embedding of audio\n \"\"\"\n try:\n embedding = vector_executor(audio_file=path)\n embedding = embedding / np.linalg.norm(embedding)\n embedding = embedding.tolist()\n return embedding\n except Exception as e:\n LOGGER.error(f\"Error with embedding:{e}\")\n return None\n", "path": "demos/audio_searching/src/encode.py"}]} | 1,179 | 511 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.