problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
10.2k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 582
21k
| num_tokens
int64 271
2.05k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_28889 | rasdani/github-patches | git_diff | piskvorky__gensim-968 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Lsi distributed fail
Hi,
I've got a problem with the lsi distributed. When i executed the example:
https://radimrehurek.com/gensim/dist_lsi.html
First configure the server (enviroment variables), then i run the server, worker and dispatcher.
And all without errros. But when i executed the code. I have this fail:

Why does this happens? How can i solve?
Thank you in advance.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gensim/models/lsi_worker.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) 2010 Radim Rehurek <[email protected]>
5 # Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
6
7 """
8 USAGE: %(program)s
9
10 Worker ("slave") process used in computing distributed LSI. Run this script \
11 on every node in your cluster. If you wish, you may even run it multiple times \
12 on a single machine, to make better use of multiple cores (just beware that \
13 memory footprint increases accordingly).
14
15 Example: python -m gensim.models.lsi_worker
16 """
17
18
19 from __future__ import with_statement
20 import os, sys, logging
21 import threading
22 import tempfile
23 try:
24 import Queue
25 except ImportError:
26 import queue as Queue
27 import Pyro4
28 from gensim.models import lsimodel
29 from gensim import utils
30
31 logger = logging.getLogger('gensim.models.lsi_worker')
32
33
34 SAVE_DEBUG = 0 # save intermediate models after every SAVE_DEBUG updates (0 for never)
35
36
37
38 class Worker(object):
39 def __init__(self):
40 self.model = None
41
42
43 def initialize(self, myid, dispatcher, **model_params):
44 self.lock_update = threading.Lock()
45 self.jobsdone = 0 # how many jobs has this worker completed?
46 self.myid = myid # id of this worker in the dispatcher; just a convenience var for easy access/logging TODO remove?
47 self.dispatcher = dispatcher
48 self.finished = False
49 logger.info("initializing worker #%s" % myid)
50 self.model = lsimodel.LsiModel(**model_params)
51
52
53 @Pyro4.oneway
54 def requestjob(self):
55 """
56 Request jobs from the dispatcher, in a perpetual loop until `getstate()` is called.
57 """
58 if self.model is None:
59 raise RuntimeError("worker must be initialized before receiving jobs")
60
61 job = None
62 while job is None and not self.finished:
63 try:
64 job = self.dispatcher.getjob(self.myid)
65 except Queue.Empty:
66 # no new job: try again, unless we're finished with all work
67 continue
68 if job is not None:
69 logger.info("worker #%s received job #%i" % (self.myid, self.jobsdone))
70 self.processjob(job)
71 self.dispatcher.jobdone(self.myid)
72 else:
73 logger.info("worker #%i stopping asking for jobs" % self.myid)
74
75
76 @utils.synchronous('lock_update')
77 def processjob(self, job):
78 self.model.add_documents(job)
79 self.jobsdone += 1
80 if SAVE_DEBUG and self.jobsdone % SAVE_DEBUG == 0:
81 fname = os.path.join(tempfile.gettempdir(), 'lsi_worker.pkl')
82 self.model.save(fname)
83
84
85 @utils.synchronous('lock_update')
86 def getstate(self):
87 logger.info("worker #%i returning its state after %s jobs" %
88 (self.myid, self.jobsdone))
89 assert isinstance(self.model.projection, lsimodel.Projection)
90 self.finished = True
91 return self.model.projection
92
93
94 @utils.synchronous('lock_update')
95 def reset(self):
96 logger.info("resetting worker #%i" % self.myid)
97 self.model.projection = self.model.projection.empty_like()
98 self.finished = False
99
100
101 @Pyro4.oneway
102 def exit(self):
103 logger.info("terminating worker #%i" % self.myid)
104 os._exit(0)
105 #endclass Worker
106
107
108
109 def main():
110 logging.basicConfig(format = '%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
111 logger.info("running %s" % " ".join(sys.argv))
112
113 program = os.path.basename(sys.argv[0])
114 # make sure we have enough cmd line parameters
115 if len(sys.argv) < 1:
116 print(globals()["__doc__"] % locals())
117 sys.exit(1)
118
119 utils.pyro_daemon('gensim.lsi_worker', Worker(), random_suffix=True)
120
121 logger.info("finished running %s" % program)
122
123
124
125 if __name__ == '__main__':
126 main()
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gensim/models/lsi_worker.py b/gensim/models/lsi_worker.py
--- a/gensim/models/lsi_worker.py
+++ b/gensim/models/lsi_worker.py
@@ -39,7 +39,7 @@
def __init__(self):
self.model = None
-
+ @Pyro4.expose
def initialize(self, myid, dispatcher, **model_params):
self.lock_update = threading.Lock()
self.jobsdone = 0 # how many jobs has this worker completed?
@@ -49,7 +49,7 @@
logger.info("initializing worker #%s" % myid)
self.model = lsimodel.LsiModel(**model_params)
-
+ @Pyro4.expose
@Pyro4.oneway
def requestjob(self):
"""
@@ -81,7 +81,7 @@
fname = os.path.join(tempfile.gettempdir(), 'lsi_worker.pkl')
self.model.save(fname)
-
+ @Pyro4.expose
@utils.synchronous('lock_update')
def getstate(self):
logger.info("worker #%i returning its state after %s jobs" %
@@ -90,7 +90,7 @@
self.finished = True
return self.model.projection
-
+ @Pyro4.expose
@utils.synchronous('lock_update')
def reset(self):
logger.info("resetting worker #%i" % self.myid)
| {"golden_diff": "diff --git a/gensim/models/lsi_worker.py b/gensim/models/lsi_worker.py\n--- a/gensim/models/lsi_worker.py\n+++ b/gensim/models/lsi_worker.py\n@@ -39,7 +39,7 @@\n def __init__(self):\n self.model = None\n \n-\n+ @Pyro4.expose\n def initialize(self, myid, dispatcher, **model_params):\n self.lock_update = threading.Lock()\n self.jobsdone = 0 # how many jobs has this worker completed?\n@@ -49,7 +49,7 @@\n logger.info(\"initializing worker #%s\" % myid)\n self.model = lsimodel.LsiModel(**model_params)\n \n-\n+ @Pyro4.expose\n @Pyro4.oneway\n def requestjob(self):\n \"\"\"\n@@ -81,7 +81,7 @@\n fname = os.path.join(tempfile.gettempdir(), 'lsi_worker.pkl')\n self.model.save(fname)\n \n-\n+ @Pyro4.expose\n @utils.synchronous('lock_update')\n def getstate(self):\n logger.info(\"worker #%i returning its state after %s jobs\" %\n@@ -90,7 +90,7 @@\n self.finished = True\n return self.model.projection\n \n-\n+ @Pyro4.expose\n @utils.synchronous('lock_update')\n def reset(self):\n logger.info(\"resetting worker #%i\" % self.myid)\n", "issue": "Lsi distributed fail\nHi, \nI've got a problem with the lsi distributed. When i executed the example:\n\nhttps://radimrehurek.com/gensim/dist_lsi.html\n\nFirst configure the server (enviroment variables), then i run the server, worker and dispatcher.\n\nAnd all without errros. But when i executed the code. I have this fail:\n\n\nWhy does this happens? How can i solve?\n\nThank you in advance.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2010 Radim Rehurek <[email protected]>\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\"\"\"\nUSAGE: %(program)s\n\n Worker (\"slave\") process used in computing distributed LSI. Run this script \\\non every node in your cluster. If you wish, you may even run it multiple times \\\non a single machine, to make better use of multiple cores (just beware that \\\nmemory footprint increases accordingly).\n\nExample: python -m gensim.models.lsi_worker\n\"\"\"\n\n\nfrom __future__ import with_statement\nimport os, sys, logging\nimport threading\nimport tempfile\ntry:\n import Queue\nexcept ImportError:\n import queue as Queue\nimport Pyro4\nfrom gensim.models import lsimodel\nfrom gensim import utils\n\nlogger = logging.getLogger('gensim.models.lsi_worker')\n\n\nSAVE_DEBUG = 0 # save intermediate models after every SAVE_DEBUG updates (0 for never)\n\n\n\nclass Worker(object):\n def __init__(self):\n self.model = None\n\n\n def initialize(self, myid, dispatcher, **model_params):\n self.lock_update = threading.Lock()\n self.jobsdone = 0 # how many jobs has this worker completed?\n self.myid = myid # id of this worker in the dispatcher; just a convenience var for easy access/logging TODO remove?\n self.dispatcher = dispatcher\n self.finished = False\n logger.info(\"initializing worker #%s\" % myid)\n self.model = lsimodel.LsiModel(**model_params)\n\n\n @Pyro4.oneway\n def requestjob(self):\n \"\"\"\n Request jobs from the dispatcher, in a perpetual loop until `getstate()` is called.\n \"\"\"\n if self.model is None:\n raise RuntimeError(\"worker must be initialized before receiving jobs\")\n\n job = None\n while job is None and not self.finished:\n try:\n job = self.dispatcher.getjob(self.myid)\n except Queue.Empty:\n # no new job: try again, unless we're finished with all work\n continue\n if job is not None:\n logger.info(\"worker #%s received job #%i\" % (self.myid, self.jobsdone))\n self.processjob(job)\n self.dispatcher.jobdone(self.myid)\n else:\n logger.info(\"worker #%i stopping asking for jobs\" % self.myid)\n\n\n @utils.synchronous('lock_update')\n def processjob(self, job):\n self.model.add_documents(job)\n self.jobsdone += 1\n if SAVE_DEBUG and self.jobsdone % SAVE_DEBUG == 0:\n fname = os.path.join(tempfile.gettempdir(), 'lsi_worker.pkl')\n self.model.save(fname)\n\n\n @utils.synchronous('lock_update')\n def getstate(self):\n logger.info(\"worker #%i returning its state after %s jobs\" %\n (self.myid, self.jobsdone))\n assert isinstance(self.model.projection, lsimodel.Projection)\n self.finished = True\n return self.model.projection\n\n\n @utils.synchronous('lock_update')\n def reset(self):\n logger.info(\"resetting worker #%i\" % self.myid)\n self.model.projection = self.model.projection.empty_like()\n self.finished = False\n\n\n @Pyro4.oneway\n def exit(self):\n logger.info(\"terminating worker #%i\" % self.myid)\n os._exit(0)\n#endclass Worker\n\n\n\ndef main():\n logging.basicConfig(format = '%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n logger.info(\"running %s\" % \" \".join(sys.argv))\n\n program = os.path.basename(sys.argv[0])\n # make sure we have enough cmd line parameters\n if len(sys.argv) < 1:\n print(globals()[\"__doc__\"] % locals())\n sys.exit(1)\n\n utils.pyro_daemon('gensim.lsi_worker', Worker(), random_suffix=True)\n\n logger.info(\"finished running %s\" % program)\n\n\n\nif __name__ == '__main__':\n main()\n", "path": "gensim/models/lsi_worker.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2010 Radim Rehurek <[email protected]>\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\"\"\"\nUSAGE: %(program)s\n\n Worker (\"slave\") process used in computing distributed LSI. Run this script \\\non every node in your cluster. If you wish, you may even run it multiple times \\\non a single machine, to make better use of multiple cores (just beware that \\\nmemory footprint increases accordingly).\n\nExample: python -m gensim.models.lsi_worker\n\"\"\"\n\n\nfrom __future__ import with_statement\nimport os, sys, logging\nimport threading\nimport tempfile\ntry:\n import Queue\nexcept ImportError:\n import queue as Queue\nimport Pyro4\nfrom gensim.models import lsimodel\nfrom gensim import utils\n\nlogger = logging.getLogger('gensim.models.lsi_worker')\n\n\nSAVE_DEBUG = 0 # save intermediate models after every SAVE_DEBUG updates (0 for never)\n\n\n\nclass Worker(object):\n def __init__(self):\n self.model = None\n\n @Pyro4.expose\n def initialize(self, myid, dispatcher, **model_params):\n self.lock_update = threading.Lock()\n self.jobsdone = 0 # how many jobs has this worker completed?\n self.myid = myid # id of this worker in the dispatcher; just a convenience var for easy access/logging TODO remove?\n self.dispatcher = dispatcher\n self.finished = False\n logger.info(\"initializing worker #%s\" % myid)\n self.model = lsimodel.LsiModel(**model_params)\n\n @Pyro4.expose\n @Pyro4.oneway\n def requestjob(self):\n \"\"\"\n Request jobs from the dispatcher, in a perpetual loop until `getstate()` is called.\n \"\"\"\n if self.model is None:\n raise RuntimeError(\"worker must be initialized before receiving jobs\")\n\n job = None\n while job is None and not self.finished:\n try:\n job = self.dispatcher.getjob(self.myid)\n except Queue.Empty:\n # no new job: try again, unless we're finished with all work\n continue\n if job is not None:\n logger.info(\"worker #%s received job #%i\" % (self.myid, self.jobsdone))\n self.processjob(job)\n self.dispatcher.jobdone(self.myid)\n else:\n logger.info(\"worker #%i stopping asking for jobs\" % self.myid)\n\n\n @utils.synchronous('lock_update')\n def processjob(self, job):\n self.model.add_documents(job)\n self.jobsdone += 1\n if SAVE_DEBUG and self.jobsdone % SAVE_DEBUG == 0:\n fname = os.path.join(tempfile.gettempdir(), 'lsi_worker.pkl')\n self.model.save(fname)\n\n @Pyro4.expose\n @utils.synchronous('lock_update')\n def getstate(self):\n logger.info(\"worker #%i returning its state after %s jobs\" %\n (self.myid, self.jobsdone))\n assert isinstance(self.model.projection, lsimodel.Projection)\n self.finished = True\n return self.model.projection\n\n @Pyro4.expose\n @utils.synchronous('lock_update')\n def reset(self):\n logger.info(\"resetting worker #%i\" % self.myid)\n self.model.projection = self.model.projection.empty_like()\n self.finished = False\n\n\n @Pyro4.oneway\n def exit(self):\n logger.info(\"terminating worker #%i\" % self.myid)\n os._exit(0)\n#endclass Worker\n\n\n\ndef main():\n logging.basicConfig(format = '%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n logger.info(\"running %s\" % \" \".join(sys.argv))\n\n program = os.path.basename(sys.argv[0])\n # make sure we have enough cmd line parameters\n if len(sys.argv) < 1:\n print(globals()[\"__doc__\"] % locals())\n sys.exit(1)\n\n utils.pyro_daemon('gensim.lsi_worker', Worker(), random_suffix=True)\n\n logger.info(\"finished running %s\" % program)\n\n\n\nif __name__ == '__main__':\n main()\n", "path": "gensim/models/lsi_worker.py"}]} | 1,607 | 325 |
gh_patches_debug_20388 | rasdani/github-patches | git_diff | vnpy__vnpy-1500 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ubuntu ctp导入问题
## 环境
* 操作系统: Ubuntu 18.04
* Anaconda版本: Python 3.7 64位
* vn.py版本: DEV-2.0.1 branch 20190313(下载日期)
## Issue类型
三选一:Bug
## 预期程序行为
```
from vnpy.gateway.ctp import ctp_gateway导入成功
## 实际程序行为
'''from vnpy.gateway.ctp.ctp_gateway import CtpGateWay
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/vnpy/vnpy/vnpy/gateway/ctp/__init__.py", line 1, in <module>
from .ctp_gateway import CtpGateway
File "/home/vnpy/vnpy/vnpy/gateway/ctp/ctp_gateway.py", line 6, in <module>
from vnpy.api.ctp import (
File "/home/vnpy/vnpy/vnpy/api/ctp/__init__.py", line 1, in <module>
from .vnctpmd import MdApi
ModuleNotFoundError: No module named 'vnpy.api.ctp.vnctpmd'
```
## 重现步骤
```
删除setup下面的oes安装模块
git clone -b v2.0.1-DEV https://github.com/vnpy/vnpy
cd vnpy
vim setup.py #具体删除删除相关代码即可
chmod +x install.sh && ./install.sh
# 安装会正常进行
```
针对Bug类型Issue,请提供具体重现步骤以及报错截图
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import ast
2 import platform
3 import re
4
5 from setuptools import Extension, find_packages, setup
6
7 with open("vnpy/__init__.py", "rb") as f:
8 version_line = re.search(
9 r"__version__\s+=\s+(.*)", f.read().decode("utf-8")
10 ).group(1)
11 version = str(ast.literal_eval(version_line))
12
13 if platform.uname().system == "Windows":
14 compiler_flags = ["/MP", "/std:c++17", # standard
15 "/O2", "/Ob2", "/Oi", "/Ot", "/Oy", "/GL", # Optimization
16 "/wd4819" # 936 code page
17 ]
18 extra_link_args = []
19 else:
20 compiler_flags = ["-std=c++17",
21 "-Wno-delete-incomplete", "-Wno-sign-compare",
22 ]
23 extra_link_args = ["-lstdc++"]
24
25 vnctpmd = Extension("vnpy.api.ctp.vnctpmd",
26 [
27 "vnpy/api/ctp/vnctp/vnctpmd/vnctpmd.cpp",
28 ],
29 include_dirs=["vnpy/api/ctp/include", "vnpy/api/ctp/vnctp", ],
30 define_macros=[],
31 undef_macros=[],
32 library_dirs=["vnpy/api/ctp/libs", "vnpy/api/ctp"],
33 libraries=["thostmduserapi", "thosttraderapi", ],
34 extra_compile_args=compiler_flags,
35 extra_link_args=extra_link_args,
36 depends=[],
37 runtime_library_dirs=["vnpy/api/ctp"],
38 language="cpp",
39 )
40 vnctptd = Extension("vnpy.api.ctp.vnctptd",
41 [
42 "vnpy/api/ctp/vnctp/vnctptd/vnctptd.cpp",
43 ],
44 include_dirs=["vnpy/api/ctp/include", "vnpy/api/ctp/vnctp", ],
45 define_macros=[],
46 undef_macros=[],
47 library_dirs=["vnpy/api/ctp/libs", "vnpy/api/ctp"],
48 libraries=["thostmduserapi", "thosttraderapi", ],
49 extra_compile_args=compiler_flags,
50 extra_link_args=extra_link_args,
51 runtime_library_dirs=["vnpy/api/ctp"],
52 depends=[],
53 language="cpp",
54 )
55 vnoes = Extension("vnpy.api.oes.vnoes",
56 [
57 "vnpy/api/oes/vnoes/generated_files/classes_1.cpp",
58 "vnpy/api/oes/vnoes/generated_files/classes_2.cpp",
59 "vnpy/api/oes/vnoes/generated_files/module.cpp",
60 ],
61 include_dirs=["vnpy/api/oes/include", "vnpy/api/oes/vnoes", ],
62 define_macros=[("BRIGAND_NO_BOOST_SUPPORT", "1")],
63 undef_macros=[],
64 library_dirs=["vnpy/api/oes/libs"],
65 libraries=["oes_api"],
66 extra_compile_args=compiler_flags,
67 extra_link_args=extra_link_args,
68 depends=[],
69 language="cpp",
70 )
71
72 if platform.uname().system == "Windows":
73 # use pre-built pyd for windows ( support python 3.7 only )
74 ext_modules = []
75 else:
76 ext_modules = [vnctptd, vnctpmd, vnoes]
77
78 pkgs = find_packages()
79
80 setup(
81 name="vnpy",
82 version=version,
83 include_package_data=True,
84 packages=pkgs,
85 package_data={"": [
86 "*.json", "*.md", "*.ico", "*.ini",
87 "*.dll", "*.so", "*.pyd"
88 ]},
89 install_requires=[],
90 ext_modules=ext_modules
91 )
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -34,7 +34,7 @@
extra_compile_args=compiler_flags,
extra_link_args=extra_link_args,
depends=[],
- runtime_library_dirs=["vnpy/api/ctp"],
+ runtime_library_dirs=["$ORIGIN"],
language="cpp",
)
vnctptd = Extension("vnpy.api.ctp.vnctptd",
@@ -48,7 +48,7 @@
libraries=["thostmduserapi", "thosttraderapi", ],
extra_compile_args=compiler_flags,
extra_link_args=extra_link_args,
- runtime_library_dirs=["vnpy/api/ctp"],
+ runtime_library_dirs=["$ORIGIN"],
depends=[],
language="cpp",
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -34,7 +34,7 @@\n extra_compile_args=compiler_flags,\n extra_link_args=extra_link_args,\n depends=[],\n- runtime_library_dirs=[\"vnpy/api/ctp\"],\n+ runtime_library_dirs=[\"$ORIGIN\"],\n language=\"cpp\",\n )\n vnctptd = Extension(\"vnpy.api.ctp.vnctptd\",\n@@ -48,7 +48,7 @@\n libraries=[\"thostmduserapi\", \"thosttraderapi\", ],\n extra_compile_args=compiler_flags,\n extra_link_args=extra_link_args,\n- runtime_library_dirs=[\"vnpy/api/ctp\"],\n+ runtime_library_dirs=[\"$ORIGIN\"],\n depends=[],\n language=\"cpp\",\n )\n", "issue": "ubuntu\u3000 ctp\u5bfc\u5165\u95ee\u9898\n## \u73af\u5883\r\n\r\n* \u64cd\u4f5c\u7cfb\u7edf: Ubuntu 18.04\r\n* Anaconda\u7248\u672c: Python 3.7 64\u4f4d\r\n* vn.py\u7248\u672c: DEV-2.0.1 branch 20190313\uff08\u4e0b\u8f7d\u65e5\u671f\uff09\r\n\r\n## Issue\u7c7b\u578b\r\n\u4e09\u9009\u4e00\uff1aBu\uff47\r\n\r\n## \u9884\u671f\u7a0b\u5e8f\u884c\u4e3a\r\n```\r\nfrom vnpy.gateway.ctp import ctp_gateway\u5bfc\u5165\u6210\u529f\r\n## \u5b9e\u9645\u7a0b\u5e8f\u884c\u4e3a\r\n'''from vnpy.gateway.ctp.ctp_gateway import CtpGateWay\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/vnpy/vnpy/vnpy/gateway/ctp/__init__.py\", line 1, in <module>\r\n from .ctp_gateway import CtpGateway\r\n File \"/home/vnpy/vnpy/vnpy/gateway/ctp/ctp_gateway.py\", line 6, in <module>\r\n from vnpy.api.ctp import (\r\n File \"/home/vnpy/vnpy/vnpy/api/ctp/__init__.py\", line 1, in <module>\r\n from .vnctpmd import MdApi\r\nModuleNotFoundError: No module named 'vnpy.api.ctp.vnctpmd'\r\n```\r\n\r\n## \u91cd\u73b0\u6b65\u9aa4\r\n\r\n```\r\n\u5220\u9664setup\u4e0b\u9762\u7684oes\u5b89\u88c5\u6a21\u5757 \r\ngit clone -b v2.0.1-DEV https://github.com/vnpy/vnpy\r\ncd vnpy\r\nvim setup.py #\u5177\u4f53\u5220\u9664\u5220\u9664\u76f8\u5173\u4ee3\u7801\u5373\u53ef \r\nchmod +x install.sh && ./install.sh \r\n# \u5b89\u88c5\u4f1a\u6b63\u5e38\u8fdb\u884c \r\n```\r\n\r\n\u9488\u5bf9Bug\u7c7b\u578bIssue\uff0c\u8bf7\u63d0\u4f9b\u5177\u4f53\u91cd\u73b0\u6b65\u9aa4\u4ee5\u53ca\u62a5\u9519\u622a\u56fe\r\n\r\n\n", "before_files": [{"content": "import ast\nimport platform\nimport re\n\nfrom setuptools import Extension, find_packages, setup\n\nwith open(\"vnpy/__init__.py\", \"rb\") as f:\n version_line = re.search(\n r\"__version__\\s+=\\s+(.*)\", f.read().decode(\"utf-8\")\n ).group(1)\n version = str(ast.literal_eval(version_line))\n\nif platform.uname().system == \"Windows\":\n compiler_flags = [\"/MP\", \"/std:c++17\", # standard\n \"/O2\", \"/Ob2\", \"/Oi\", \"/Ot\", \"/Oy\", \"/GL\", # Optimization\n \"/wd4819\" # 936 code page\n ]\n extra_link_args = []\nelse:\n compiler_flags = [\"-std=c++17\",\n \"-Wno-delete-incomplete\", \"-Wno-sign-compare\",\n ]\n extra_link_args = [\"-lstdc++\"]\n\nvnctpmd = Extension(\"vnpy.api.ctp.vnctpmd\",\n [\n \"vnpy/api/ctp/vnctp/vnctpmd/vnctpmd.cpp\",\n ],\n include_dirs=[\"vnpy/api/ctp/include\", \"vnpy/api/ctp/vnctp\", ],\n define_macros=[],\n undef_macros=[],\n library_dirs=[\"vnpy/api/ctp/libs\", \"vnpy/api/ctp\"],\n libraries=[\"thostmduserapi\", \"thosttraderapi\", ],\n extra_compile_args=compiler_flags,\n extra_link_args=extra_link_args,\n depends=[],\n runtime_library_dirs=[\"vnpy/api/ctp\"],\n language=\"cpp\",\n )\nvnctptd = Extension(\"vnpy.api.ctp.vnctptd\",\n [\n \"vnpy/api/ctp/vnctp/vnctptd/vnctptd.cpp\",\n ],\n include_dirs=[\"vnpy/api/ctp/include\", \"vnpy/api/ctp/vnctp\", ],\n define_macros=[],\n undef_macros=[],\n library_dirs=[\"vnpy/api/ctp/libs\", \"vnpy/api/ctp\"],\n libraries=[\"thostmduserapi\", \"thosttraderapi\", ],\n extra_compile_args=compiler_flags,\n extra_link_args=extra_link_args,\n runtime_library_dirs=[\"vnpy/api/ctp\"],\n depends=[],\n language=\"cpp\",\n )\nvnoes = Extension(\"vnpy.api.oes.vnoes\",\n [\n \"vnpy/api/oes/vnoes/generated_files/classes_1.cpp\",\n \"vnpy/api/oes/vnoes/generated_files/classes_2.cpp\",\n \"vnpy/api/oes/vnoes/generated_files/module.cpp\",\n ],\n include_dirs=[\"vnpy/api/oes/include\", \"vnpy/api/oes/vnoes\", ],\n define_macros=[(\"BRIGAND_NO_BOOST_SUPPORT\", \"1\")],\n undef_macros=[],\n library_dirs=[\"vnpy/api/oes/libs\"],\n libraries=[\"oes_api\"],\n extra_compile_args=compiler_flags,\n extra_link_args=extra_link_args,\n depends=[],\n language=\"cpp\",\n )\n\nif platform.uname().system == \"Windows\":\n # use pre-built pyd for windows ( support python 3.7 only )\n ext_modules = []\nelse:\n ext_modules = [vnctptd, vnctpmd, vnoes]\n\npkgs = find_packages()\n\nsetup(\n name=\"vnpy\",\n version=version,\n include_package_data=True,\n packages=pkgs,\n package_data={\"\": [\n \"*.json\", \"*.md\", \"*.ico\", \"*.ini\",\n \"*.dll\", \"*.so\", \"*.pyd\"\n ]},\n install_requires=[],\n ext_modules=ext_modules\n)\n", "path": "setup.py"}], "after_files": [{"content": "import ast\nimport platform\nimport re\n\nfrom setuptools import Extension, find_packages, setup\n\nwith open(\"vnpy/__init__.py\", \"rb\") as f:\n version_line = re.search(\n r\"__version__\\s+=\\s+(.*)\", f.read().decode(\"utf-8\")\n ).group(1)\n version = str(ast.literal_eval(version_line))\n\nif platform.uname().system == \"Windows\":\n compiler_flags = [\"/MP\", \"/std:c++17\", # standard\n \"/O2\", \"/Ob2\", \"/Oi\", \"/Ot\", \"/Oy\", \"/GL\", # Optimization\n \"/wd4819\" # 936 code page\n ]\n extra_link_args = []\nelse:\n compiler_flags = [\"-std=c++17\",\n \"-Wno-delete-incomplete\", \"-Wno-sign-compare\",\n ]\n extra_link_args = [\"-lstdc++\"]\n\nvnctpmd = Extension(\"vnpy.api.ctp.vnctpmd\",\n [\n \"vnpy/api/ctp/vnctp/vnctpmd/vnctpmd.cpp\",\n ],\n include_dirs=[\"vnpy/api/ctp/include\", \"vnpy/api/ctp/vnctp\", ],\n define_macros=[],\n undef_macros=[],\n library_dirs=[\"vnpy/api/ctp/libs\", \"vnpy/api/ctp\"],\n libraries=[\"thostmduserapi\", \"thosttraderapi\", ],\n extra_compile_args=compiler_flags,\n extra_link_args=extra_link_args,\n depends=[],\n runtime_library_dirs=[\"$ORIGIN\"],\n language=\"cpp\",\n )\nvnctptd = Extension(\"vnpy.api.ctp.vnctptd\",\n [\n \"vnpy/api/ctp/vnctp/vnctptd/vnctptd.cpp\",\n ],\n include_dirs=[\"vnpy/api/ctp/include\", \"vnpy/api/ctp/vnctp\", ],\n define_macros=[],\n undef_macros=[],\n library_dirs=[\"vnpy/api/ctp/libs\", \"vnpy/api/ctp\"],\n libraries=[\"thostmduserapi\", \"thosttraderapi\", ],\n extra_compile_args=compiler_flags,\n extra_link_args=extra_link_args,\n runtime_library_dirs=[\"$ORIGIN\"],\n depends=[],\n language=\"cpp\",\n )\nvnoes = Extension(\"vnpy.api.oes.vnoes\",\n [\n \"vnpy/api/oes/vnoes/generated_files/classes_1.cpp\",\n \"vnpy/api/oes/vnoes/generated_files/classes_2.cpp\",\n \"vnpy/api/oes/vnoes/generated_files/module.cpp\",\n ],\n include_dirs=[\"vnpy/api/oes/include\", \"vnpy/api/oes/vnoes\", ],\n define_macros=[(\"BRIGAND_NO_BOOST_SUPPORT\", \"1\")],\n undef_macros=[],\n library_dirs=[\"vnpy/api/oes/libs\"],\n libraries=[\"oes_api\"],\n extra_compile_args=compiler_flags,\n extra_link_args=extra_link_args,\n depends=[],\n language=\"cpp\",\n )\n\nif platform.uname().system == \"Windows\":\n # use pre-built pyd for windows ( support python 3.7 only )\n ext_modules = []\nelse:\n ext_modules = [vnctptd, vnctpmd, vnoes]\n\npkgs = find_packages()\n\nsetup(\n name=\"vnpy\",\n version=version,\n include_package_data=True,\n packages=pkgs,\n package_data={\"\": [\n \"*.json\", \"*.md\", \"*.ico\", \"*.ini\",\n \"*.dll\", \"*.so\", \"*.pyd\"\n ]},\n install_requires=[],\n ext_modules=ext_modules\n)\n", "path": "setup.py"}]} | 1,594 | 178 |
gh_patches_debug_35150 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-2973 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider ljsilvers is broken
During the global build at 2021-06-02-14-42-40, spider **ljsilvers** failed with **0 features** and **0 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/logs/ljsilvers.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/ljsilvers.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/ljsilvers.geojson))
Long John Silver's
http://www.ljsilvers.com/
(location search box top right)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/ljsilvers.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import scrapy
3 import json
4 import re
5
6 from locations.items import GeojsonPointItem
7
8
9 class LjsilversSpider(scrapy.Spider):
10 name = "ljsilvers"
11 item_attributes = { 'brand': "Long John Silver's", 'brand_wikidata': "Q1535221" }
12 allowed_domains = ["ljsilvers.com"]
13 start_urls = (
14 'http://www.ljsilvers.com/locator?postalcode=76010',
15 )
16
17 def parse(self, response):
18 data = response.body_as_unicode()
19 base_data = re.search(r'dataout\s--Array\s\((.*)\)\s\s--><style type="text/css">', data, re.DOTALL).group(1)
20 detail_matches = re.findall(r'\((.*?)\)', base_data, re.DOTALL)
21
22 for detail_match in detail_matches:
23 key_values = re.findall(r'(.*?)\s=>\s(.*)', detail_match)
24 props = {}
25
26 for key_value in key_values:
27 key = key_value[0].strip()
28 value = key_value[1].strip()
29
30 if key == '[storeID]':
31 props['ref'] = value
32 if key == '[address]':
33 props['addr_full'] = value
34 if key == '[city]':
35 props['city'] = value
36 if key == '[state]':
37 props['state'] = value
38 if key == '[zip]':
39 props['postcode'] = value
40 if key == '[phone_number]':
41 props['phone'] = value
42 if key == '[latitude]':
43 props['lat'] = value
44 if key == '[longitude]':
45 props['lon'] = value
46
47 yield GeojsonPointItem(**props)
48
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/ljsilvers.py b/locations/spiders/ljsilvers.py
--- a/locations/spiders/ljsilvers.py
+++ b/locations/spiders/ljsilvers.py
@@ -1,47 +1,32 @@
# -*- coding: utf-8 -*-
import scrapy
-import json
-import re
from locations.items import GeojsonPointItem
class LjsilversSpider(scrapy.Spider):
name = "ljsilvers"
- item_attributes = { 'brand': "Long John Silver's", 'brand_wikidata': "Q1535221" }
+ item_attributes = {"brand": "Long John Silver's", "brand_wikidata": "Q1535221"}
allowed_domains = ["ljsilvers.com"]
start_urls = (
- 'http://www.ljsilvers.com/locator?postalcode=76010',
+ "https://viewer.blipstar.com/searchdbnew?uid=2483677&lat=45&lng=-103&value=10000",
)
def parse(self, response):
- data = response.body_as_unicode()
- base_data = re.search(r'dataout\s--Array\s\((.*)\)\s\s--><style type="text/css">', data, re.DOTALL).group(1)
- detail_matches = re.findall(r'\((.*?)\)', base_data, re.DOTALL)
-
- for detail_match in detail_matches:
- key_values = re.findall(r'(.*?)\s=>\s(.*)', detail_match)
- props = {}
-
- for key_value in key_values:
- key = key_value[0].strip()
- value = key_value[1].strip()
-
- if key == '[storeID]':
- props['ref'] = value
- if key == '[address]':
- props['addr_full'] = value
- if key == '[city]':
- props['city'] = value
- if key == '[state]':
- props['state'] = value
- if key == '[zip]':
- props['postcode'] = value
- if key == '[phone_number]':
- props['phone'] = value
- if key == '[latitude]':
- props['lat'] = value
- if key == '[longitude]':
- props['lon'] = value
-
- yield GeojsonPointItem(**props)
+ for row in response.json():
+ if row.keys() == {"fulltotal", "total", "units"}:
+ continue
+ addr = scrapy.Selector(text=row["a"])
+ properties = {
+ "name": row["n"],
+ "ref": row["bpid"],
+ "lat": row["lat"],
+ "lon": row["lng"],
+ "addr_full": addr.xpath("//p/text()").extract_first(),
+ "city": addr.css(".storecity ::text").extract_first(),
+ "state": addr.css(".storestate ::text").extract_first(),
+ "postcode": addr.css(".storepostalcode ::text").extract_first(),
+ "country": row["c"],
+ "phone": row.get("p"),
+ }
+ yield GeojsonPointItem(**properties)
| {"golden_diff": "diff --git a/locations/spiders/ljsilvers.py b/locations/spiders/ljsilvers.py\n--- a/locations/spiders/ljsilvers.py\n+++ b/locations/spiders/ljsilvers.py\n@@ -1,47 +1,32 @@\n # -*- coding: utf-8 -*-\n import scrapy\n-import json\n-import re\n \n from locations.items import GeojsonPointItem\n \n \n class LjsilversSpider(scrapy.Spider):\n name = \"ljsilvers\"\n- item_attributes = { 'brand': \"Long John Silver's\", 'brand_wikidata': \"Q1535221\" }\n+ item_attributes = {\"brand\": \"Long John Silver's\", \"brand_wikidata\": \"Q1535221\"}\n allowed_domains = [\"ljsilvers.com\"]\n start_urls = (\n- 'http://www.ljsilvers.com/locator?postalcode=76010',\n+ \"https://viewer.blipstar.com/searchdbnew?uid=2483677&lat=45&lng=-103&value=10000\",\n )\n \n def parse(self, response):\n- data = response.body_as_unicode()\n- base_data = re.search(r'dataout\\s--Array\\s\\((.*)\\)\\s\\s--><style type=\"text/css\">', data, re.DOTALL).group(1)\n- detail_matches = re.findall(r'\\((.*?)\\)', base_data, re.DOTALL)\n-\n- for detail_match in detail_matches:\n- key_values = re.findall(r'(.*?)\\s=>\\s(.*)', detail_match)\n- props = {}\n-\n- for key_value in key_values:\n- key = key_value[0].strip()\n- value = key_value[1].strip()\n-\n- if key == '[storeID]':\n- props['ref'] = value\n- if key == '[address]':\n- props['addr_full'] = value\n- if key == '[city]':\n- props['city'] = value\n- if key == '[state]':\n- props['state'] = value\n- if key == '[zip]':\n- props['postcode'] = value\n- if key == '[phone_number]':\n- props['phone'] = value\n- if key == '[latitude]':\n- props['lat'] = value\n- if key == '[longitude]':\n- props['lon'] = value\n-\n- yield GeojsonPointItem(**props)\n+ for row in response.json():\n+ if row.keys() == {\"fulltotal\", \"total\", \"units\"}:\n+ continue\n+ addr = scrapy.Selector(text=row[\"a\"])\n+ properties = {\n+ \"name\": row[\"n\"],\n+ \"ref\": row[\"bpid\"],\n+ \"lat\": row[\"lat\"],\n+ \"lon\": row[\"lng\"],\n+ \"addr_full\": addr.xpath(\"//p/text()\").extract_first(),\n+ \"city\": addr.css(\".storecity ::text\").extract_first(),\n+ \"state\": addr.css(\".storestate ::text\").extract_first(),\n+ \"postcode\": addr.css(\".storepostalcode ::text\").extract_first(),\n+ \"country\": row[\"c\"],\n+ \"phone\": row.get(\"p\"),\n+ }\n+ yield GeojsonPointItem(**properties)\n", "issue": "Spider ljsilvers is broken\nDuring the global build at 2021-06-02-14-42-40, spider **ljsilvers** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/logs/ljsilvers.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/ljsilvers.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/ljsilvers.geojson))\nLong John Silver's\nhttp://www.ljsilvers.com/\r\n\r\n(location search box top right)\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nimport re\n\nfrom locations.items import GeojsonPointItem\n\n\nclass LjsilversSpider(scrapy.Spider):\n name = \"ljsilvers\"\n item_attributes = { 'brand': \"Long John Silver's\", 'brand_wikidata': \"Q1535221\" }\n allowed_domains = [\"ljsilvers.com\"]\n start_urls = (\n 'http://www.ljsilvers.com/locator?postalcode=76010',\n )\n\n def parse(self, response):\n data = response.body_as_unicode()\n base_data = re.search(r'dataout\\s--Array\\s\\((.*)\\)\\s\\s--><style type=\"text/css\">', data, re.DOTALL).group(1)\n detail_matches = re.findall(r'\\((.*?)\\)', base_data, re.DOTALL)\n\n for detail_match in detail_matches:\n key_values = re.findall(r'(.*?)\\s=>\\s(.*)', detail_match)\n props = {}\n\n for key_value in key_values:\n key = key_value[0].strip()\n value = key_value[1].strip()\n\n if key == '[storeID]':\n props['ref'] = value\n if key == '[address]':\n props['addr_full'] = value\n if key == '[city]':\n props['city'] = value\n if key == '[state]':\n props['state'] = value\n if key == '[zip]':\n props['postcode'] = value\n if key == '[phone_number]':\n props['phone'] = value\n if key == '[latitude]':\n props['lat'] = value\n if key == '[longitude]':\n props['lon'] = value\n\n yield GeojsonPointItem(**props)\n", "path": "locations/spiders/ljsilvers.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\n\n\nclass LjsilversSpider(scrapy.Spider):\n name = \"ljsilvers\"\n item_attributes = {\"brand\": \"Long John Silver's\", \"brand_wikidata\": \"Q1535221\"}\n allowed_domains = [\"ljsilvers.com\"]\n start_urls = (\n \"https://viewer.blipstar.com/searchdbnew?uid=2483677&lat=45&lng=-103&value=10000\",\n )\n\n def parse(self, response):\n for row in response.json():\n if row.keys() == {\"fulltotal\", \"total\", \"units\"}:\n continue\n addr = scrapy.Selector(text=row[\"a\"])\n properties = {\n \"name\": row[\"n\"],\n \"ref\": row[\"bpid\"],\n \"lat\": row[\"lat\"],\n \"lon\": row[\"lng\"],\n \"addr_full\": addr.xpath(\"//p/text()\").extract_first(),\n \"city\": addr.css(\".storecity ::text\").extract_first(),\n \"state\": addr.css(\".storestate ::text\").extract_first(),\n \"postcode\": addr.css(\".storepostalcode ::text\").extract_first(),\n \"country\": row[\"c\"],\n \"phone\": row.get(\"p\"),\n }\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/ljsilvers.py"}]} | 958 | 738 |
gh_patches_debug_1832 | rasdani/github-patches | git_diff | conan-io__conan-center-index-18494 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[package] clickhouse-cpp/*: fPIC option is not respected
In the recipe file fPIC option is always removed during configure stage, which can lead to not working static library.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `recipes/clickhouse-cpp/all/conanfile.py`
Content:
```
1 from conan import ConanFile
2 from conan.tools.cmake import CMake, CMakeToolchain,CMakeDeps, cmake_layout
3 from conan.tools.files import copy, get
4 from conan.tools.build import check_min_cppstd
5 from conan.errors import ConanInvalidConfiguration
6 from conan.tools.scm import Version
7 import os
8
9 required_conan_version = ">=1.53.0"
10
11 class ClickHouseCppConan(ConanFile):
12 name = "clickhouse-cpp"
13 homepage = "https://github.com/ClickHouse/clickhouse-cpp"
14 url = "https://github.com/conan-io/conan-center-index"
15 description = "ClickHouse C++ API"
16 license = "Apache-2.0"
17 topics = ("database", "db", "clickhouse")
18 settings = "os", "arch", "compiler", "build_type"
19 options = {
20 "shared": [True, False],
21 "fPIC": [True, False],
22 "enable_benchmark": [True, False],
23 "with_openssl": [True, False]
24 }
25 default_options = {
26 "shared": False,
27 "fPIC": True,
28 "enable_benchmark": False,
29 "with_openssl": False
30 }
31
32 def requirements(self):
33
34 self.requires("lz4/1.9.4")
35
36 self.requires("abseil/20230125.3", transitive_headers=True)
37
38 self.requires("cityhash/cci.20130801")
39 if self.options.with_openssl:
40 self.requires("openssl/[>=1.1 <4]")
41
42 def build_requirements(self):
43 if self.options.enable_benchmark:
44 self.requires("benchmark/1.8.0")
45
46 @property
47 def _min_cppstd(self):
48 return "17"
49
50 @property
51 def _compilers_minimum_version(self):
52 return {
53 "Visual Studio": "15",
54 "msvc": "191",
55 "gcc": "7",
56 "clang": "6",
57 }
58
59 @property
60 def _requires_compiler_rt(self):
61 return self.settings.compiler == "clang" and (( self.settings.compiler.libcxx in ["libstdc++", "libstdc++11"] and not self.options.shared) or self.settings.compiler.libcxx == "libc++" )
62
63 def validate(self):
64 if self.settings.compiler.get_safe("cppstd"):
65 check_min_cppstd(self, self._min_cppstd)
66 minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)
67 if minimum_version and Version(self.settings.compiler.version) < minimum_version:
68 raise ConanInvalidConfiguration(f"{self.ref} requires C++17, which your compiler does not support.")
69 if self.settings.os == "Windows" and self.options.shared:
70 raise ConanInvalidConfiguration("f{self.ref} does not support shared library on Windows.")
71 # look at https://github.com/ClickHouse/clickhouse-cpp/pull/226
72
73 def config_options(self):
74 if self.settings.os == "Windows":
75 del self.options.fPIC
76
77 def configure(self):
78 self.options.rm_safe("fPIC")
79
80 def layout(self):
81 cmake_layout(self, src_folder="src")
82
83 def source(self):
84 get(self, **self.conan_data["sources"][self.version],
85 destination=self.source_folder, strip_root=True)
86
87 def generate(self):
88 tc = CMakeToolchain(self)
89 tc.variables["BUILD_BENCHMARK"] = self.options.enable_benchmark
90 tc.cache_variables["BUILD_SHARED_LIBS"] = self.options.shared
91 tc.variables["WITH_OPENSSL"] = self.options.with_openssl
92 tc.cache_variables["WITH_SYSTEM_ABSEIL"] = True
93 tc.cache_variables["WITH_SYSTEM_LZ4"] = True
94 tc.cache_variables["WITH_SYSTEM_CITYHASH"] = True
95 tc.generate()
96
97 cd = CMakeDeps(self)
98 cd.generate()
99
100 def build(self):
101 cmake = CMake(self)
102 cmake.configure()
103 cmake.build()
104
105 def package(self):
106 copy(self, "LICENSE", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
107 cmake = CMake(self)
108 cmake.install()
109
110 def package_info(self):
111 self.cpp_info.libs.append("clickhouse-cpp-lib")
112 self.cpp_info.set_property("cmake_target_name", "clickhouse-cpp-lib::clickhouse-cpp-lib")
113
114 if self._requires_compiler_rt:
115 ldflags = ["--rtlib=compiler-rt"]
116 self.cpp_info.exelinkflags = ldflags
117 self.cpp_info.sharedlinkflags = ldflags
118 self.cpp_info.system_libs.append("gcc_s")
119
120 self.cpp_info.filenames["cmake_find_package"] = "clickhouse-cpp"
121 self.cpp_info.filenames["cmake_find_package_multi"] = "clickhouse-cpp"
122 self.cpp_info.names["cmake_find_package"] = "clickhouse-cpp-lib"
123 self.cpp_info.names["cmake_find_package_multi"] = "clickhouse-cpp-lib"
124
125 if self.settings.os == 'Windows':
126 self.cpp_info.system_libs = ['ws2_32', 'wsock32']
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/recipes/clickhouse-cpp/all/conanfile.py b/recipes/clickhouse-cpp/all/conanfile.py
--- a/recipes/clickhouse-cpp/all/conanfile.py
+++ b/recipes/clickhouse-cpp/all/conanfile.py
@@ -75,7 +75,8 @@
del self.options.fPIC
def configure(self):
- self.options.rm_safe("fPIC")
+ if self.options.shared:
+ self.options.rm_safe("fPIC")
def layout(self):
cmake_layout(self, src_folder="src")
| {"golden_diff": "diff --git a/recipes/clickhouse-cpp/all/conanfile.py b/recipes/clickhouse-cpp/all/conanfile.py\n--- a/recipes/clickhouse-cpp/all/conanfile.py\n+++ b/recipes/clickhouse-cpp/all/conanfile.py\n@@ -75,7 +75,8 @@\n del self.options.fPIC\n \n def configure(self):\n- self.options.rm_safe(\"fPIC\")\n+ if self.options.shared:\n+ self.options.rm_safe(\"fPIC\")\n \n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n", "issue": "[package] clickhouse-cpp/*: fPIC option is not respected\nIn the recipe file fPIC option is always removed during configure stage, which can lead to not working static library.\n", "before_files": [{"content": "from conan import ConanFile\nfrom conan.tools.cmake import CMake, CMakeToolchain,CMakeDeps, cmake_layout\nfrom conan.tools.files import copy, get\nfrom conan.tools.build import check_min_cppstd\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.scm import Version\nimport os\n\nrequired_conan_version = \">=1.53.0\"\n\nclass ClickHouseCppConan(ConanFile):\n name = \"clickhouse-cpp\"\n homepage = \"https://github.com/ClickHouse/clickhouse-cpp\"\n url = \"https://github.com/conan-io/conan-center-index\"\n description = \"ClickHouse C++ API\"\n license = \"Apache-2.0\"\n topics = (\"database\", \"db\", \"clickhouse\")\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"enable_benchmark\": [True, False],\n \"with_openssl\": [True, False]\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"enable_benchmark\": False,\n \"with_openssl\": False\n }\n\n def requirements(self):\n\n self.requires(\"lz4/1.9.4\")\n\n self.requires(\"abseil/20230125.3\", transitive_headers=True)\n\n self.requires(\"cityhash/cci.20130801\")\n if self.options.with_openssl:\n self.requires(\"openssl/[>=1.1 <4]\")\n\n def build_requirements(self):\n if self.options.enable_benchmark:\n self.requires(\"benchmark/1.8.0\")\n\n @property\n def _min_cppstd(self):\n return \"17\"\n\n @property\n def _compilers_minimum_version(self):\n return {\n \"Visual Studio\": \"15\",\n \"msvc\": \"191\",\n \"gcc\": \"7\",\n \"clang\": \"6\",\n }\n\n @property\n def _requires_compiler_rt(self):\n return self.settings.compiler == \"clang\" and (( self.settings.compiler.libcxx in [\"libstdc++\", \"libstdc++11\"] and not self.options.shared) or self.settings.compiler.libcxx == \"libc++\" )\n\n def validate(self):\n if self.settings.compiler.get_safe(\"cppstd\"):\n check_min_cppstd(self, self._min_cppstd)\n minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)\n if minimum_version and Version(self.settings.compiler.version) < minimum_version:\n raise ConanInvalidConfiguration(f\"{self.ref} requires C++17, which your compiler does not support.\")\n if self.settings.os == \"Windows\" and self.options.shared:\n raise ConanInvalidConfiguration(\"f{self.ref} does not support shared library on Windows.\")\n # look at https://github.com/ClickHouse/clickhouse-cpp/pull/226\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n self.options.rm_safe(\"fPIC\")\n\n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version],\n destination=self.source_folder, strip_root=True)\n\n def generate(self):\n tc = CMakeToolchain(self)\n tc.variables[\"BUILD_BENCHMARK\"] = self.options.enable_benchmark\n tc.cache_variables[\"BUILD_SHARED_LIBS\"] = self.options.shared\n tc.variables[\"WITH_OPENSSL\"] = self.options.with_openssl\n tc.cache_variables[\"WITH_SYSTEM_ABSEIL\"] = True\n tc.cache_variables[\"WITH_SYSTEM_LZ4\"] = True\n tc.cache_variables[\"WITH_SYSTEM_CITYHASH\"] = True\n tc.generate()\n\n cd = CMakeDeps(self)\n cd.generate()\n\n def build(self):\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n copy(self, \"LICENSE\", src=self.source_folder, dst=os.path.join(self.package_folder, \"licenses\"))\n cmake = CMake(self)\n cmake.install()\n\n def package_info(self):\n self.cpp_info.libs.append(\"clickhouse-cpp-lib\")\n self.cpp_info.set_property(\"cmake_target_name\", \"clickhouse-cpp-lib::clickhouse-cpp-lib\")\n\n if self._requires_compiler_rt:\n ldflags = [\"--rtlib=compiler-rt\"]\n self.cpp_info.exelinkflags = ldflags\n self.cpp_info.sharedlinkflags = ldflags\n self.cpp_info.system_libs.append(\"gcc_s\")\n\n self.cpp_info.filenames[\"cmake_find_package\"] = \"clickhouse-cpp\"\n self.cpp_info.filenames[\"cmake_find_package_multi\"] = \"clickhouse-cpp\"\n self.cpp_info.names[\"cmake_find_package\"] = \"clickhouse-cpp-lib\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"clickhouse-cpp-lib\"\n\n if self.settings.os == 'Windows':\n self.cpp_info.system_libs = ['ws2_32', 'wsock32']\n", "path": "recipes/clickhouse-cpp/all/conanfile.py"}], "after_files": [{"content": "from conan import ConanFile\nfrom conan.tools.cmake import CMake, CMakeToolchain,CMakeDeps, cmake_layout\nfrom conan.tools.files import copy, get\nfrom conan.tools.build import check_min_cppstd\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.scm import Version\nimport os\n\nrequired_conan_version = \">=1.53.0\"\n\nclass ClickHouseCppConan(ConanFile):\n name = \"clickhouse-cpp\"\n homepage = \"https://github.com/ClickHouse/clickhouse-cpp\"\n url = \"https://github.com/conan-io/conan-center-index\"\n description = \"ClickHouse C++ API\"\n license = \"Apache-2.0\"\n topics = (\"database\", \"db\", \"clickhouse\")\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"enable_benchmark\": [True, False],\n \"with_openssl\": [True, False]\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"enable_benchmark\": False,\n \"with_openssl\": False\n }\n\n def requirements(self):\n\n self.requires(\"lz4/1.9.4\")\n\n self.requires(\"abseil/20230125.3\", transitive_headers=True)\n\n self.requires(\"cityhash/cci.20130801\")\n if self.options.with_openssl:\n self.requires(\"openssl/[>=1.1 <4]\")\n\n def build_requirements(self):\n if self.options.enable_benchmark:\n self.requires(\"benchmark/1.8.0\")\n\n @property\n def _min_cppstd(self):\n return \"17\"\n\n @property\n def _compilers_minimum_version(self):\n return {\n \"Visual Studio\": \"15\",\n \"msvc\": \"191\",\n \"gcc\": \"7\",\n \"clang\": \"6\",\n }\n\n @property\n def _requires_compiler_rt(self):\n return self.settings.compiler == \"clang\" and (( self.settings.compiler.libcxx in [\"libstdc++\", \"libstdc++11\"] and not self.options.shared) or self.settings.compiler.libcxx == \"libc++\" )\n\n def validate(self):\n if self.settings.compiler.get_safe(\"cppstd\"):\n check_min_cppstd(self, self._min_cppstd)\n minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)\n if minimum_version and Version(self.settings.compiler.version) < minimum_version:\n raise ConanInvalidConfiguration(f\"{self.ref} requires C++17, which your compiler does not support.\")\n if self.settings.os == \"Windows\" and self.options.shared:\n raise ConanInvalidConfiguration(\"f{self.ref} does not support shared library on Windows.\")\n # look at https://github.com/ClickHouse/clickhouse-cpp/pull/226\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n self.options.rm_safe(\"fPIC\")\n\n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version],\n destination=self.source_folder, strip_root=True)\n\n def generate(self):\n tc = CMakeToolchain(self)\n tc.variables[\"BUILD_BENCHMARK\"] = self.options.enable_benchmark\n tc.cache_variables[\"BUILD_SHARED_LIBS\"] = self.options.shared\n tc.variables[\"WITH_OPENSSL\"] = self.options.with_openssl\n tc.cache_variables[\"WITH_SYSTEM_ABSEIL\"] = True\n tc.cache_variables[\"WITH_SYSTEM_LZ4\"] = True\n tc.cache_variables[\"WITH_SYSTEM_CITYHASH\"] = True\n tc.generate()\n\n cd = CMakeDeps(self)\n cd.generate()\n\n def build(self):\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n copy(self, \"LICENSE\", src=self.source_folder, dst=os.path.join(self.package_folder, \"licenses\"))\n cmake = CMake(self)\n cmake.install()\n\n def package_info(self):\n self.cpp_info.libs.append(\"clickhouse-cpp-lib\")\n self.cpp_info.set_property(\"cmake_target_name\", \"clickhouse-cpp-lib::clickhouse-cpp-lib\")\n\n if self._requires_compiler_rt:\n ldflags = [\"--rtlib=compiler-rt\"]\n self.cpp_info.exelinkflags = ldflags\n self.cpp_info.sharedlinkflags = ldflags\n self.cpp_info.system_libs.append(\"gcc_s\")\n\n self.cpp_info.filenames[\"cmake_find_package\"] = \"clickhouse-cpp\"\n self.cpp_info.filenames[\"cmake_find_package_multi\"] = \"clickhouse-cpp\"\n self.cpp_info.names[\"cmake_find_package\"] = \"clickhouse-cpp-lib\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"clickhouse-cpp-lib\"\n\n if self.settings.os == 'Windows':\n self.cpp_info.system_libs = ['ws2_32', 'wsock32']\n", "path": "recipes/clickhouse-cpp/all/conanfile.py"}]} | 1,719 | 127 |
gh_patches_debug_30216 | rasdani/github-patches | git_diff | vega__altair-982 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BUG: if selenium is installed but not properly configured, Altair cannot be imported
Fix is to use a more robust lazy import of selenium.
The main issue is that ``import altair`` ends up trying to import selenium. It would be better if selenium weren't imported until it is actually needed. Same for other optional imports.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `altair/utils/headless.py`
Content:
```
1 """
2 Utilities that use selenium + chrome headless to save figures
3 """
4
5 import contextlib
6 import os
7 import tempfile
8
9 try:
10 import selenium.webdriver
11 except ImportError:
12 selenium = None
13
14
15 @contextlib.contextmanager
16 def temporary_filename(**kwargs):
17 """Create and clean-up a temporary file
18
19 Arguments are the same as those passed to tempfile.mkstemp
20
21 We could use tempfile.NamedTemporaryFile here, but that causes issues on
22 windows (see https://bugs.python.org/issue14243).
23 """
24 filedescriptor, filename = tempfile.mkstemp(**kwargs)
25 os.close(filedescriptor)
26
27 try:
28 yield filename
29 finally:
30 if os.path.exists(filename):
31 os.remove(filename)
32
33
34 HTML_TEMPLATE = """
35 <!DOCTYPE html>
36 <html>
37 <head>
38 <title>Embedding Vega-Lite</title>
39 <script src="https://cdn.jsdelivr.net/npm/vega@{vega_version}"></script>
40 <script src="https://cdn.jsdelivr.net/npm/vega-lite@{vegalite_version}"></script>
41 <script src="https://cdn.jsdelivr.net/npm/vega-embed@{vegaembed_version}"></script>
42 </head>
43 <body>
44 <div id="vis"></div>
45 </body>
46 </html>
47 """
48
49 EXTRACT_CODE = {
50 'png': """
51 var spec = arguments[0];
52 var mode = arguments[1];
53 var scaleFactor = arguments[2];
54 var done = arguments[3];
55
56 if(mode === 'vega-lite'){
57 // compile vega-lite to vega
58 const compiled = vl.compile(spec);
59 spec = compiled.spec;
60 }
61
62 new vega.View(vega.parse(spec), {
63 loader: vega.loader(),
64 logLevel: vega.Warn,
65 renderer: 'none',
66 })
67 .initialize()
68 .toCanvas(scaleFactor)
69 .then(function(canvas){return canvas.toDataURL('image/png');})
70 .then(done)
71 .catch(function(err) { console.error(err); });
72 """,
73 'svg': """
74 var spec = arguments[0];
75 var mode = arguments[1];
76 var scaleFactor = arguments[2];
77 var done = arguments[3];
78
79 if(mode === 'vega-lite'){
80 // compile vega-lite to vega
81 const compiled = vl.compile(spec);
82 spec = compiled.spec;
83 }
84
85 new vega.View(vega.parse(spec), {
86 loader: vega.loader(),
87 logLevel: vega.Warn,
88 renderer: 'none',
89 })
90 .initialize()
91 .toSVG(scaleFactor)
92 .then(done)
93 .catch(function(err) { console.error(err); });
94 """,
95 'vega': """
96 var spec = arguments[0];
97 var mode = arguments[1];
98 var done = arguments[3];
99
100 if(mode === 'vega-lite'){
101 // compile vega-lite to vega
102 const compiled = vl.compile(spec);
103 spec = compiled.spec;
104 }
105
106 done(spec);
107 """}
108
109
110 def compile_spec(spec, format, mode,
111 vega_version, vegaembed_version, vegalite_version,
112 scale_factor=1, driver_timeout=20, webdriver='chrome'):
113
114 # TODO: detect & use local Jupyter caches of JS packages?
115
116 if format not in ['png', 'svg', 'vega']:
117 raise NotImplementedError("format must be 'svg', 'png' or 'vega'")
118
119 if mode not in ['vega', 'vega-lite']:
120 raise ValueError("mode must be either 'vega' or 'vega-lite'")
121
122 if vega_version is None:
123 raise ValueError("must specify vega_version")
124
125 if vegaembed_version is None:
126 raise ValueError("must specify vegaembed_version")
127
128 if mode == 'vega-lite' and vegalite_version is None:
129 raise ValueError("must specify vega-lite version")
130
131 if selenium is None:
132 raise ImportError("selenium package is required "
133 "for saving chart as {0}".format(format))
134 if webdriver == 'chrome':
135 webdriver_class = selenium.webdriver.Chrome
136 webdriver_options_class = selenium.webdriver.chrome.options.Options
137 elif webdriver == 'firefox':
138 webdriver_class = selenium.webdriver.Firefox
139 webdriver_options_class = selenium.webdriver.firefox.options.Options
140 else:
141 raise ValueError("webdriver must be 'chrome' or 'firefox'")
142
143 html = HTML_TEMPLATE.format(vega_version=vega_version,
144 vegalite_version=vegalite_version,
145 vegaembed_version=vegaembed_version)
146
147 webdriver_options = webdriver_options_class()
148 webdriver_options.add_argument("--headless")
149
150 if issubclass(webdriver_class, selenium.webdriver.Chrome):
151 # for linux/osx root user, need to add --no-sandbox option.
152 # since geteuid doesn't exist on windows, we don't check it
153 if hasattr(os, 'geteuid') and (os.geteuid() == 0):
154 webdriver_options.add_argument('--no-sandbox')
155
156 driver = webdriver_class(options=webdriver_options)
157
158 try:
159 driver.set_page_load_timeout(driver_timeout)
160
161 with temporary_filename(suffix='.html') as htmlfile:
162 with open(htmlfile, 'w') as f:
163 f.write(html)
164 driver.get("file://" + htmlfile)
165 online = driver.execute_script("return navigator.onLine")
166 if not online:
167 raise ValueError("Internet connection required for saving "
168 "chart as {0}".format(format))
169 return driver.execute_async_script(EXTRACT_CODE[format],
170 spec, mode, scale_factor)
171 finally:
172 driver.close()
173
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/altair/utils/headless.py b/altair/utils/headless.py
--- a/altair/utils/headless.py
+++ b/altair/utils/headless.py
@@ -6,11 +6,6 @@
import os
import tempfile
-try:
- import selenium.webdriver
-except ImportError:
- selenium = None
-
@contextlib.contextmanager
def temporary_filename(**kwargs):
@@ -110,9 +105,15 @@
def compile_spec(spec, format, mode,
vega_version, vegaembed_version, vegalite_version,
scale_factor=1, driver_timeout=20, webdriver='chrome'):
-
# TODO: detect & use local Jupyter caches of JS packages?
+ # selenium is an optional dependency, so import it here
+ try:
+ import selenium.webdriver
+ except ImportError:
+ raise ImportError("selenium package is required "
+ "for saving chart as {0}".format(format))
+
if format not in ['png', 'svg', 'vega']:
raise NotImplementedError("format must be 'svg', 'png' or 'vega'")
@@ -128,9 +129,6 @@
if mode == 'vega-lite' and vegalite_version is None:
raise ValueError("must specify vega-lite version")
- if selenium is None:
- raise ImportError("selenium package is required "
- "for saving chart as {0}".format(format))
if webdriver == 'chrome':
webdriver_class = selenium.webdriver.Chrome
webdriver_options_class = selenium.webdriver.chrome.options.Options
| {"golden_diff": "diff --git a/altair/utils/headless.py b/altair/utils/headless.py\n--- a/altair/utils/headless.py\n+++ b/altair/utils/headless.py\n@@ -6,11 +6,6 @@\n import os\n import tempfile\n \n-try:\n- import selenium.webdriver\n-except ImportError:\n- selenium = None\n-\n \n @contextlib.contextmanager\n def temporary_filename(**kwargs):\n@@ -110,9 +105,15 @@\n def compile_spec(spec, format, mode,\n vega_version, vegaembed_version, vegalite_version,\n scale_factor=1, driver_timeout=20, webdriver='chrome'):\n- \n # TODO: detect & use local Jupyter caches of JS packages?\n \n+ # selenium is an optional dependency, so import it here\n+ try:\n+ import selenium.webdriver\n+ except ImportError:\n+ raise ImportError(\"selenium package is required \"\n+ \"for saving chart as {0}\".format(format))\n+\n if format not in ['png', 'svg', 'vega']:\n raise NotImplementedError(\"format must be 'svg', 'png' or 'vega'\")\n \n@@ -128,9 +129,6 @@\n if mode == 'vega-lite' and vegalite_version is None:\n raise ValueError(\"must specify vega-lite version\")\n \n- if selenium is None:\n- raise ImportError(\"selenium package is required \"\n- \"for saving chart as {0}\".format(format))\n if webdriver == 'chrome':\n webdriver_class = selenium.webdriver.Chrome\n webdriver_options_class = selenium.webdriver.chrome.options.Options\n", "issue": "BUG: if selenium is installed but not properly configured, Altair cannot be imported\nFix is to use a more robust lazy import of selenium.\r\n\r\nThe main issue is that ``import altair`` ends up trying to import selenium. It would be better if selenium weren't imported until it is actually needed. Same for other optional imports.\n", "before_files": [{"content": "\"\"\"\nUtilities that use selenium + chrome headless to save figures\n\"\"\"\n\nimport contextlib\nimport os\nimport tempfile\n\ntry:\n import selenium.webdriver\nexcept ImportError:\n selenium = None\n\n\[email protected]\ndef temporary_filename(**kwargs):\n \"\"\"Create and clean-up a temporary file\n\n Arguments are the same as those passed to tempfile.mkstemp\n\n We could use tempfile.NamedTemporaryFile here, but that causes issues on\n windows (see https://bugs.python.org/issue14243).\n \"\"\"\n filedescriptor, filename = tempfile.mkstemp(**kwargs)\n os.close(filedescriptor)\n\n try:\n yield filename\n finally:\n if os.path.exists(filename):\n os.remove(filename)\n\n\nHTML_TEMPLATE = \"\"\"\n<!DOCTYPE html>\n<html>\n<head>\n <title>Embedding Vega-Lite</title>\n <script src=\"https://cdn.jsdelivr.net/npm/vega@{vega_version}\"></script>\n <script src=\"https://cdn.jsdelivr.net/npm/vega-lite@{vegalite_version}\"></script>\n <script src=\"https://cdn.jsdelivr.net/npm/vega-embed@{vegaembed_version}\"></script>\n</head>\n<body>\n <div id=\"vis\"></div>\n</body>\n</html>\n\"\"\"\n\nEXTRACT_CODE = {\n'png': \"\"\"\n var spec = arguments[0];\n var mode = arguments[1];\n var scaleFactor = arguments[2];\n var done = arguments[3];\n\n if(mode === 'vega-lite'){\n // compile vega-lite to vega\n const compiled = vl.compile(spec);\n spec = compiled.spec;\n }\n\n new vega.View(vega.parse(spec), {\n loader: vega.loader(),\n logLevel: vega.Warn,\n renderer: 'none',\n })\n .initialize()\n .toCanvas(scaleFactor)\n .then(function(canvas){return canvas.toDataURL('image/png');})\n .then(done)\n .catch(function(err) { console.error(err); });\n \"\"\",\n'svg': \"\"\"\n var spec = arguments[0];\n var mode = arguments[1];\n var scaleFactor = arguments[2];\n var done = arguments[3];\n\n if(mode === 'vega-lite'){\n // compile vega-lite to vega\n const compiled = vl.compile(spec);\n spec = compiled.spec;\n }\n\n new vega.View(vega.parse(spec), {\n loader: vega.loader(),\n logLevel: vega.Warn,\n renderer: 'none',\n })\n .initialize()\n .toSVG(scaleFactor)\n .then(done)\n .catch(function(err) { console.error(err); });\n \"\"\",\n'vega': \"\"\"\n var spec = arguments[0];\n var mode = arguments[1];\n var done = arguments[3];\n\n if(mode === 'vega-lite'){\n // compile vega-lite to vega\n const compiled = vl.compile(spec);\n spec = compiled.spec;\n }\n\n done(spec);\n \"\"\"}\n\n\ndef compile_spec(spec, format, mode,\n vega_version, vegaembed_version, vegalite_version,\n scale_factor=1, driver_timeout=20, webdriver='chrome'):\n \n # TODO: detect & use local Jupyter caches of JS packages?\n\n if format not in ['png', 'svg', 'vega']:\n raise NotImplementedError(\"format must be 'svg', 'png' or 'vega'\")\n\n if mode not in ['vega', 'vega-lite']:\n raise ValueError(\"mode must be either 'vega' or 'vega-lite'\")\n\n if vega_version is None:\n raise ValueError(\"must specify vega_version\")\n\n if vegaembed_version is None:\n raise ValueError(\"must specify vegaembed_version\")\n\n if mode == 'vega-lite' and vegalite_version is None:\n raise ValueError(\"must specify vega-lite version\")\n\n if selenium is None:\n raise ImportError(\"selenium package is required \"\n \"for saving chart as {0}\".format(format))\n if webdriver == 'chrome':\n webdriver_class = selenium.webdriver.Chrome\n webdriver_options_class = selenium.webdriver.chrome.options.Options\n elif webdriver == 'firefox':\n webdriver_class = selenium.webdriver.Firefox\n webdriver_options_class = selenium.webdriver.firefox.options.Options\n else:\n raise ValueError(\"webdriver must be 'chrome' or 'firefox'\")\n\n html = HTML_TEMPLATE.format(vega_version=vega_version,\n vegalite_version=vegalite_version,\n vegaembed_version=vegaembed_version)\n\n webdriver_options = webdriver_options_class()\n webdriver_options.add_argument(\"--headless\")\n\n if issubclass(webdriver_class, selenium.webdriver.Chrome):\n # for linux/osx root user, need to add --no-sandbox option.\n # since geteuid doesn't exist on windows, we don't check it\n if hasattr(os, 'geteuid') and (os.geteuid() == 0):\n webdriver_options.add_argument('--no-sandbox')\n\n driver = webdriver_class(options=webdriver_options)\n\n try:\n driver.set_page_load_timeout(driver_timeout)\n\n with temporary_filename(suffix='.html') as htmlfile:\n with open(htmlfile, 'w') as f:\n f.write(html)\n driver.get(\"file://\" + htmlfile)\n online = driver.execute_script(\"return navigator.onLine\")\n if not online:\n raise ValueError(\"Internet connection required for saving \"\n \"chart as {0}\".format(format))\n return driver.execute_async_script(EXTRACT_CODE[format],\n spec, mode, scale_factor)\n finally:\n driver.close()\n", "path": "altair/utils/headless.py"}], "after_files": [{"content": "\"\"\"\nUtilities that use selenium + chrome headless to save figures\n\"\"\"\n\nimport contextlib\nimport os\nimport tempfile\n\n\[email protected]\ndef temporary_filename(**kwargs):\n \"\"\"Create and clean-up a temporary file\n\n Arguments are the same as those passed to tempfile.mkstemp\n\n We could use tempfile.NamedTemporaryFile here, but that causes issues on\n windows (see https://bugs.python.org/issue14243).\n \"\"\"\n filedescriptor, filename = tempfile.mkstemp(**kwargs)\n os.close(filedescriptor)\n\n try:\n yield filename\n finally:\n if os.path.exists(filename):\n os.remove(filename)\n\n\nHTML_TEMPLATE = \"\"\"\n<!DOCTYPE html>\n<html>\n<head>\n <title>Embedding Vega-Lite</title>\n <script src=\"https://cdn.jsdelivr.net/npm/vega@{vega_version}\"></script>\n <script src=\"https://cdn.jsdelivr.net/npm/vega-lite@{vegalite_version}\"></script>\n <script src=\"https://cdn.jsdelivr.net/npm/vega-embed@{vegaembed_version}\"></script>\n</head>\n<body>\n <div id=\"vis\"></div>\n</body>\n</html>\n\"\"\"\n\nEXTRACT_CODE = {\n'png': \"\"\"\n var spec = arguments[0];\n var mode = arguments[1];\n var scaleFactor = arguments[2];\n var done = arguments[3];\n\n if(mode === 'vega-lite'){\n // compile vega-lite to vega\n const compiled = vl.compile(spec);\n spec = compiled.spec;\n }\n\n new vega.View(vega.parse(spec), {\n loader: vega.loader(),\n logLevel: vega.Warn,\n renderer: 'none',\n })\n .initialize()\n .toCanvas(scaleFactor)\n .then(function(canvas){return canvas.toDataURL('image/png');})\n .then(done)\n .catch(function(err) { console.error(err); });\n \"\"\",\n'svg': \"\"\"\n var spec = arguments[0];\n var mode = arguments[1];\n var scaleFactor = arguments[2];\n var done = arguments[3];\n\n if(mode === 'vega-lite'){\n // compile vega-lite to vega\n const compiled = vl.compile(spec);\n spec = compiled.spec;\n }\n\n new vega.View(vega.parse(spec), {\n loader: vega.loader(),\n logLevel: vega.Warn,\n renderer: 'none',\n })\n .initialize()\n .toSVG(scaleFactor)\n .then(done)\n .catch(function(err) { console.error(err); });\n \"\"\",\n'vega': \"\"\"\n var spec = arguments[0];\n var mode = arguments[1];\n var done = arguments[3];\n\n if(mode === 'vega-lite'){\n // compile vega-lite to vega\n const compiled = vl.compile(spec);\n spec = compiled.spec;\n }\n\n done(spec);\n \"\"\"}\n\n\ndef compile_spec(spec, format, mode,\n vega_version, vegaembed_version, vegalite_version,\n scale_factor=1, driver_timeout=20, webdriver='chrome'):\n # TODO: detect & use local Jupyter caches of JS packages?\n\n # selenium is an optional dependency, so import it here\n try:\n import selenium.webdriver\n except ImportError:\n raise ImportError(\"selenium package is required \"\n \"for saving chart as {0}\".format(format))\n\n if format not in ['png', 'svg', 'vega']:\n raise NotImplementedError(\"format must be 'svg', 'png' or 'vega'\")\n\n if mode not in ['vega', 'vega-lite']:\n raise ValueError(\"mode must be either 'vega' or 'vega-lite'\")\n\n if vega_version is None:\n raise ValueError(\"must specify vega_version\")\n\n if vegaembed_version is None:\n raise ValueError(\"must specify vegaembed_version\")\n\n if mode == 'vega-lite' and vegalite_version is None:\n raise ValueError(\"must specify vega-lite version\")\n\n if webdriver == 'chrome':\n webdriver_class = selenium.webdriver.Chrome\n webdriver_options_class = selenium.webdriver.chrome.options.Options\n elif webdriver == 'firefox':\n webdriver_class = selenium.webdriver.Firefox\n webdriver_options_class = selenium.webdriver.firefox.options.Options\n else:\n raise ValueError(\"webdriver must be 'chrome' or 'firefox'\")\n\n html = HTML_TEMPLATE.format(vega_version=vega_version,\n vegalite_version=vegalite_version,\n vegaembed_version=vegaembed_version)\n\n webdriver_options = webdriver_options_class()\n webdriver_options.add_argument(\"--headless\")\n\n if issubclass(webdriver_class, selenium.webdriver.Chrome):\n # for linux/osx root user, need to add --no-sandbox option.\n # since geteuid doesn't exist on windows, we don't check it\n if hasattr(os, 'geteuid') and (os.geteuid() == 0):\n webdriver_options.add_argument('--no-sandbox')\n\n driver = webdriver_class(options=webdriver_options)\n\n try:\n driver.set_page_load_timeout(driver_timeout)\n\n with temporary_filename(suffix='.html') as htmlfile:\n with open(htmlfile, 'w') as f:\n f.write(html)\n driver.get(\"file://\" + htmlfile)\n online = driver.execute_script(\"return navigator.onLine\")\n if not online:\n raise ValueError(\"Internet connection required for saving \"\n \"chart as {0}\".format(format))\n return driver.execute_async_script(EXTRACT_CODE[format],\n spec, mode, scale_factor)\n finally:\n driver.close()\n", "path": "altair/utils/headless.py"}]} | 1,952 | 351 |
gh_patches_debug_61783 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-1155 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Taiwan TW is offline
Currently, Taiwan is grey and 24-hours-history is empty as well.
- [The link ](http://data.taipower.com.tw/opendata01/apply/file/d006001/001.txt) in the [TW.py parser](https://github.com/tmrowco/electricitymap/blob/master/parsers/TW.py) seems to show data, though.
Maybe there have been some crucial changes?
Some other TW related things that should be fixed:
- The source link on the electricitymap website for Taiwan is not shown / shown as "?".

- In general, the link in README.md will show 404 error and is leading nowhere. Seems like they updated/revised their website a bit?
Here is the website with the 10-min-generation mix that should be linked in README.md:
http://www.taipower.com.tw/tc/page.aspx?mid=206&cid=404&cchk=8ccc1918-8cae-4f40-a2d0-b43454f4f218

Taiwan TW is offline
Currently, Taiwan is grey and 24-hours-history is empty as well.
- [The link ](http://data.taipower.com.tw/opendata01/apply/file/d006001/001.txt) in the [TW.py parser](https://github.com/tmrowco/electricitymap/blob/master/parsers/TW.py) seems to show data, though.
Maybe there have been some crucial changes?
Some other TW related things that should be fixed:
- The source link on the electricitymap website for Taiwan is not shown / shown as "?".

- In general, the link in README.md will show 404 error and is leading nowhere. Seems like they updated/revised their website a bit?
Here is the website with the 10-min-generation mix that should be linked in README.md:
http://www.taipower.com.tw/tc/page.aspx?mid=206&cid=404&cchk=8ccc1918-8cae-4f40-a2d0-b43454f4f218

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsers/TW.py`
Content:
```
1 #!/usr/bin/env python3
2 import arrow
3 import requests
4 import pandas
5 import dateutil
6
7
8 def fetch_production(country_code='TW'):
9 url = 'http://data.taipower.com.tw/opendata01/apply/file/d006001/001.txt'
10 response = requests.get(url)
11 data = response.json()
12
13 dumpDate = data['']
14 prodData = data['aaData']
15
16 tz = 'Asia/Taipei'
17 dumpDate = arrow.get(dumpDate, 'YYYY-MM-DD HH:mm').replace(tzinfo=dateutil.tz.gettz(tz))
18
19 objData = pandas.DataFrame(prodData)
20
21 objData.columns = ['fueltype', 'name', 'capacity', 'output', 'percentage',
22 'additional']
23
24 objData['fueltype'] = objData.fueltype.str.split('(').str[1]
25 objData['fueltype'] = objData.fueltype.str.split(')').str[0]
26 objData.drop('additional', axis=1, inplace=True)
27 objData.drop('percentage', axis=1, inplace=True)
28
29 objData = objData.convert_objects(convert_numeric=True)
30 production = pandas.DataFrame(objData.groupby('fueltype').sum())
31 production.columns = ['capacity', 'output']
32
33 coal_capacity = production.ix['Coal'].capacity + production.ix['IPP-Coal'].capacity
34 gas_capacity = production.ix['LNG'].capacity + production.ix['IPP-LNG'].capacity
35 oil_capacity = production.ix['Oil'].capacity + production.ix['Diesel'].capacity
36
37 coal_production = production.ix['Coal'].output + production.ix['IPP-Coal'].output
38 gas_production = production.ix['LNG'].output + production.ix['IPP-LNG'].output
39 oil_production = production.ix['Oil'].output + production.ix['Diesel'].output
40
41 # For storage, note that load will be negative, and generation positive.
42 # We require the opposite
43
44 returndata = {
45 'countryCode': country_code,
46 'datetime': dumpDate.datetime,
47 'production': {
48 'coal': coal_production,
49 'gas': gas_production,
50 'oil': oil_production,
51 'hydro': production.ix['Hydro'].output,
52 'nuclear': production.ix['Nuclear'].output,
53 'solar': production.ix['Solar'].output,
54 'wind': production.ix['Wind'].output,
55 'unknown': production.ix['Co-Gen'].output
56 },
57 'capacity': {
58 'coal': coal_capacity,
59 'gas': gas_capacity,
60 'oil': oil_capacity,
61 'hydro': production.ix['Hydro'].capacity,
62 'nuclear': production.ix['Nuclear'].capacity,
63 'solar': production.ix['Solar'].capacity,
64 'wind': production.ix['Wind'].capacity,
65 'unknown': production.ix['Co-Gen'].capacity
66 },
67 'storage': {
68 'hydro': -1 * production.ix['Pumping Load'].output - production.ix['Pumping Gen'].output
69 },
70 'source': 'taipower.com.tw'
71 }
72
73 return returndata
74
75
76 if __name__ == '__main__':
77 print(fetch_production())
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parsers/TW.py b/parsers/TW.py
--- a/parsers/TW.py
+++ b/parsers/TW.py
@@ -5,7 +5,7 @@
import dateutil
-def fetch_production(country_code='TW'):
+def fetch_production(country_code='TW', session=None):
url = 'http://data.taipower.com.tw/opendata01/apply/file/d006001/001.txt'
response = requests.get(url)
data = response.json()
| {"golden_diff": "diff --git a/parsers/TW.py b/parsers/TW.py\n--- a/parsers/TW.py\n+++ b/parsers/TW.py\n@@ -5,7 +5,7 @@\n import dateutil\n \n \n-def fetch_production(country_code='TW'):\n+def fetch_production(country_code='TW', session=None):\n url = 'http://data.taipower.com.tw/opendata01/apply/file/d006001/001.txt'\n response = requests.get(url)\n data = response.json()\n", "issue": "Taiwan TW is offline\nCurrently, Taiwan is grey and 24-hours-history is empty as well.\r\n\r\n- [The link ](http://data.taipower.com.tw/opendata01/apply/file/d006001/001.txt) in the [TW.py parser](https://github.com/tmrowco/electricitymap/blob/master/parsers/TW.py) seems to show data, though. \r\nMaybe there have been some crucial changes?\r\n\r\nSome other TW related things that should be fixed:\r\n- The source link on the electricitymap website for Taiwan is not shown / shown as \"?\".\r\n\r\n\r\n- In general, the link in README.md will show 404 error and is leading nowhere. Seems like they updated/revised their website a bit?\r\nHere is the website with the 10-min-generation mix that should be linked in README.md:\r\nhttp://www.taipower.com.tw/tc/page.aspx?mid=206&cid=404&cchk=8ccc1918-8cae-4f40-a2d0-b43454f4f218\r\n\r\n\r\n\nTaiwan TW is offline\nCurrently, Taiwan is grey and 24-hours-history is empty as well.\r\n\r\n- [The link ](http://data.taipower.com.tw/opendata01/apply/file/d006001/001.txt) in the [TW.py parser](https://github.com/tmrowco/electricitymap/blob/master/parsers/TW.py) seems to show data, though. \r\nMaybe there have been some crucial changes?\r\n\r\nSome other TW related things that should be fixed:\r\n- The source link on the electricitymap website for Taiwan is not shown / shown as \"?\".\r\n\r\n\r\n- In general, the link in README.md will show 404 error and is leading nowhere. Seems like they updated/revised their website a bit?\r\nHere is the website with the 10-min-generation mix that should be linked in README.md:\r\nhttp://www.taipower.com.tw/tc/page.aspx?mid=206&cid=404&cchk=8ccc1918-8cae-4f40-a2d0-b43454f4f218\r\n\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\nimport arrow\nimport requests\nimport pandas\nimport dateutil\n\n\ndef fetch_production(country_code='TW'):\n url = 'http://data.taipower.com.tw/opendata01/apply/file/d006001/001.txt'\n response = requests.get(url)\n data = response.json()\n\n dumpDate = data['']\n prodData = data['aaData']\n\n tz = 'Asia/Taipei'\n dumpDate = arrow.get(dumpDate, 'YYYY-MM-DD HH:mm').replace(tzinfo=dateutil.tz.gettz(tz))\n\n objData = pandas.DataFrame(prodData)\n\n objData.columns = ['fueltype', 'name', 'capacity', 'output', 'percentage',\n 'additional']\n\n objData['fueltype'] = objData.fueltype.str.split('(').str[1]\n objData['fueltype'] = objData.fueltype.str.split(')').str[0]\n objData.drop('additional', axis=1, inplace=True)\n objData.drop('percentage', axis=1, inplace=True)\n\n objData = objData.convert_objects(convert_numeric=True)\n production = pandas.DataFrame(objData.groupby('fueltype').sum())\n production.columns = ['capacity', 'output']\n\n coal_capacity = production.ix['Coal'].capacity + production.ix['IPP-Coal'].capacity\n gas_capacity = production.ix['LNG'].capacity + production.ix['IPP-LNG'].capacity\n oil_capacity = production.ix['Oil'].capacity + production.ix['Diesel'].capacity\n\n coal_production = production.ix['Coal'].output + production.ix['IPP-Coal'].output\n gas_production = production.ix['LNG'].output + production.ix['IPP-LNG'].output\n oil_production = production.ix['Oil'].output + production.ix['Diesel'].output\n\n # For storage, note that load will be negative, and generation positive.\n # We require the opposite\n\n returndata = {\n 'countryCode': country_code,\n 'datetime': dumpDate.datetime,\n 'production': {\n 'coal': coal_production,\n 'gas': gas_production,\n 'oil': oil_production,\n 'hydro': production.ix['Hydro'].output,\n 'nuclear': production.ix['Nuclear'].output,\n 'solar': production.ix['Solar'].output,\n 'wind': production.ix['Wind'].output,\n 'unknown': production.ix['Co-Gen'].output\n },\n 'capacity': {\n 'coal': coal_capacity,\n 'gas': gas_capacity,\n 'oil': oil_capacity,\n 'hydro': production.ix['Hydro'].capacity,\n 'nuclear': production.ix['Nuclear'].capacity,\n 'solar': production.ix['Solar'].capacity,\n 'wind': production.ix['Wind'].capacity,\n 'unknown': production.ix['Co-Gen'].capacity\n },\n 'storage': {\n 'hydro': -1 * production.ix['Pumping Load'].output - production.ix['Pumping Gen'].output\n },\n 'source': 'taipower.com.tw'\n }\n\n return returndata\n\n\nif __name__ == '__main__':\n print(fetch_production())\n", "path": "parsers/TW.py"}], "after_files": [{"content": "#!/usr/bin/env python3\nimport arrow\nimport requests\nimport pandas\nimport dateutil\n\n\ndef fetch_production(country_code='TW', session=None):\n url = 'http://data.taipower.com.tw/opendata01/apply/file/d006001/001.txt'\n response = requests.get(url)\n data = response.json()\n\n dumpDate = data['']\n prodData = data['aaData']\n\n tz = 'Asia/Taipei'\n dumpDate = arrow.get(dumpDate, 'YYYY-MM-DD HH:mm').replace(tzinfo=dateutil.tz.gettz(tz))\n\n objData = pandas.DataFrame(prodData)\n\n objData.columns = ['fueltype', 'name', 'capacity', 'output', 'percentage',\n 'additional']\n\n objData['fueltype'] = objData.fueltype.str.split('(').str[1]\n objData['fueltype'] = objData.fueltype.str.split(')').str[0]\n objData.drop('additional', axis=1, inplace=True)\n objData.drop('percentage', axis=1, inplace=True)\n\n objData = objData.convert_objects(convert_numeric=True)\n production = pandas.DataFrame(objData.groupby('fueltype').sum())\n production.columns = ['capacity', 'output']\n\n coal_capacity = production.ix['Coal'].capacity + production.ix['IPP-Coal'].capacity\n gas_capacity = production.ix['LNG'].capacity + production.ix['IPP-LNG'].capacity\n oil_capacity = production.ix['Oil'].capacity + production.ix['Diesel'].capacity\n\n coal_production = production.ix['Coal'].output + production.ix['IPP-Coal'].output\n gas_production = production.ix['LNG'].output + production.ix['IPP-LNG'].output\n oil_production = production.ix['Oil'].output + production.ix['Diesel'].output\n\n # For storage, note that load will be negative, and generation positive.\n # We require the opposite\n\n returndata = {\n 'countryCode': country_code,\n 'datetime': dumpDate.datetime,\n 'production': {\n 'coal': coal_production,\n 'gas': gas_production,\n 'oil': oil_production,\n 'hydro': production.ix['Hydro'].output,\n 'nuclear': production.ix['Nuclear'].output,\n 'solar': production.ix['Solar'].output,\n 'wind': production.ix['Wind'].output,\n 'unknown': production.ix['Co-Gen'].output\n },\n 'capacity': {\n 'coal': coal_capacity,\n 'gas': gas_capacity,\n 'oil': oil_capacity,\n 'hydro': production.ix['Hydro'].capacity,\n 'nuclear': production.ix['Nuclear'].capacity,\n 'solar': production.ix['Solar'].capacity,\n 'wind': production.ix['Wind'].capacity,\n 'unknown': production.ix['Co-Gen'].capacity\n },\n 'storage': {\n 'hydro': -1 * production.ix['Pumping Load'].output - production.ix['Pumping Gen'].output\n },\n 'source': 'taipower.com.tw'\n }\n\n return returndata\n\n\nif __name__ == '__main__':\n print(fetch_production())\n", "path": "parsers/TW.py"}]} | 1,822 | 113 |
gh_patches_debug_14691 | rasdani/github-patches | git_diff | google__timesketch-406 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pylint not present in requirements.txt
Not pinning version of Pylint makes our build a bit non-deterministic. Pylint's behavior can change between versions and break our build.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # Copyright 2015 Google Inc. All rights reserved.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """This is the setup file for the project. The standard setup rules apply:
16
17 python setup.py build
18 sudo python setup.py install
19 """
20
21 import os.path
22 import sys
23 import time
24
25 from setuptools import find_packages
26 from setuptools import setup
27
28 timesketch_version = u'20170721'
29
30 timesketch_description = (
31 u'Timesketch is a web based tool for collaborative forensic timeline '
32 u'analysis. Using sketches you and your collaborators can easily organize '
33 u'timelines and analyze them all at the same time. Add meaning to '
34 u'your raw data with rich annotations, comments, tags and stars.')
35
36 def check_before_upload():
37 """Warn user if frontend build is not present or is not recent.
38
39 Make sure that .js and .css bundles included in the PyPI package are up to
40 date.
41
42 Raises:
43 UserWarning
44 """
45 this_dir = os.path.dirname(__file__)
46 frontend_dist_dir = os.path.join(
47 this_dir, 'timesketch', 'ui', 'static', 'dist',
48 )
49 js = os.path.join(frontend_dist_dir, 'bundle.js')
50 css = os.path.join(frontend_dist_dir, 'bundle.css')
51 if not (os.path.isfile(js) and os.path.isfile(css)):
52 raise UserWarning(
53 "Build the frontend before uploading to PyPI!"
54 + " (see docs/Developers-Guide.md)"
55 )
56 mtime = min(os.path.getmtime(js), os.path.getmtime(css))
57 if time.time() - mtime > 180:
58 raise UserWarning(
59 "Frontend build is older than 3 minutes, please rebuild!"
60 + " (see docs/Developers-Guide.md)"
61 )
62
63 if 'upload' in sys.argv:
64 check_before_upload()
65
66 setup(
67 name=u'timesketch',
68 version=timesketch_version,
69 description=u'Digital forensic timeline analysis',
70 long_description=timesketch_description,
71 license=u'Apache License, Version 2.0',
72 url=u'http://www.timesketch.org/',
73 maintainer=u'Timesketch development team',
74 maintainer_email=u'[email protected]',
75 classifiers=[
76 u'Development Status :: 4 - Beta',
77 u'Environment :: Web Environment',
78 u'Operating System :: OS Independent',
79 u'Programming Language :: Python',
80 ],
81 data_files=[(u'share/timesketch', [u'timesketch.conf'])],
82 packages=find_packages(),
83 include_package_data=True,
84 zip_safe=False,
85 scripts=[u'tsctl'],
86 install_requires=frozenset([
87 u'Flask', u'Flask-Login', u'Flask-script', u'Flask-SQLAlchemy',
88 u'Flask-Bcrypt', u'Flask-RESTful', u'Flask-WTF', u'Flask-Migrate',
89 u'SQLAlchemy', u'celery', u'redis', u'blinker', u'elasticsearch',
90 u'neo4jrestclient', u'python-dateutil'
91 ]))
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -24,6 +24,8 @@
from setuptools import find_packages
from setuptools import setup
+from pip.req import parse_requirements
+from pip.download import PipSession
timesketch_version = u'20170721'
@@ -83,9 +85,7 @@
include_package_data=True,
zip_safe=False,
scripts=[u'tsctl'],
- install_requires=frozenset([
- u'Flask', u'Flask-Login', u'Flask-script', u'Flask-SQLAlchemy',
- u'Flask-Bcrypt', u'Flask-RESTful', u'Flask-WTF', u'Flask-Migrate',
- u'SQLAlchemy', u'celery', u'redis', u'blinker', u'elasticsearch',
- u'neo4jrestclient', u'python-dateutil'
- ]))
+ install_requires=[str(req.req) for req in parse_requirements(
+ "requirements.txt", session=PipSession(),
+ )],
+)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -24,6 +24,8 @@\n \n from setuptools import find_packages\n from setuptools import setup\n+from pip.req import parse_requirements\n+from pip.download import PipSession\n \n timesketch_version = u'20170721'\n \n@@ -83,9 +85,7 @@\n include_package_data=True,\n zip_safe=False,\n scripts=[u'tsctl'],\n- install_requires=frozenset([\n- u'Flask', u'Flask-Login', u'Flask-script', u'Flask-SQLAlchemy',\n- u'Flask-Bcrypt', u'Flask-RESTful', u'Flask-WTF', u'Flask-Migrate',\n- u'SQLAlchemy', u'celery', u'redis', u'blinker', u'elasticsearch',\n- u'neo4jrestclient', u'python-dateutil'\n- ]))\n+ install_requires=[str(req.req) for req in parse_requirements(\n+ \"requirements.txt\", session=PipSession(),\n+ )],\n+)\n", "issue": "Pylint not present in requirements.txt\nNot pinning version of Pylint makes our build a bit non-deterministic. Pylint's behavior can change between versions and break our build.\n", "before_files": [{"content": "#!/usr/bin/env python\n# Copyright 2015 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This is the setup file for the project. The standard setup rules apply:\n\n python setup.py build\n sudo python setup.py install\n\"\"\"\n\nimport os.path\nimport sys\nimport time\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\ntimesketch_version = u'20170721'\n\ntimesketch_description = (\n u'Timesketch is a web based tool for collaborative forensic timeline '\n u'analysis. Using sketches you and your collaborators can easily organize '\n u'timelines and analyze them all at the same time. Add meaning to '\n u'your raw data with rich annotations, comments, tags and stars.')\n\ndef check_before_upload():\n \"\"\"Warn user if frontend build is not present or is not recent.\n\n Make sure that .js and .css bundles included in the PyPI package are up to\n date.\n\n Raises:\n UserWarning\n \"\"\"\n this_dir = os.path.dirname(__file__)\n frontend_dist_dir = os.path.join(\n this_dir, 'timesketch', 'ui', 'static', 'dist',\n )\n js = os.path.join(frontend_dist_dir, 'bundle.js')\n css = os.path.join(frontend_dist_dir, 'bundle.css')\n if not (os.path.isfile(js) and os.path.isfile(css)):\n raise UserWarning(\n \"Build the frontend before uploading to PyPI!\"\n + \" (see docs/Developers-Guide.md)\"\n )\n mtime = min(os.path.getmtime(js), os.path.getmtime(css))\n if time.time() - mtime > 180:\n raise UserWarning(\n \"Frontend build is older than 3 minutes, please rebuild!\"\n + \" (see docs/Developers-Guide.md)\"\n )\n\nif 'upload' in sys.argv:\n check_before_upload()\n\nsetup(\n name=u'timesketch',\n version=timesketch_version,\n description=u'Digital forensic timeline analysis',\n long_description=timesketch_description,\n license=u'Apache License, Version 2.0',\n url=u'http://www.timesketch.org/',\n maintainer=u'Timesketch development team',\n maintainer_email=u'[email protected]',\n classifiers=[\n u'Development Status :: 4 - Beta',\n u'Environment :: Web Environment',\n u'Operating System :: OS Independent',\n u'Programming Language :: Python',\n ],\n data_files=[(u'share/timesketch', [u'timesketch.conf'])],\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n scripts=[u'tsctl'],\n install_requires=frozenset([\n u'Flask', u'Flask-Login', u'Flask-script', u'Flask-SQLAlchemy',\n u'Flask-Bcrypt', u'Flask-RESTful', u'Flask-WTF', u'Flask-Migrate',\n u'SQLAlchemy', u'celery', u'redis', u'blinker', u'elasticsearch',\n u'neo4jrestclient', u'python-dateutil'\n ]))\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# Copyright 2015 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This is the setup file for the project. The standard setup rules apply:\n\n python setup.py build\n sudo python setup.py install\n\"\"\"\n\nimport os.path\nimport sys\nimport time\n\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom pip.req import parse_requirements\nfrom pip.download import PipSession\n\ntimesketch_version = u'20170721'\n\ntimesketch_description = (\n u'Timesketch is a web based tool for collaborative forensic timeline '\n u'analysis. Using sketches you and your collaborators can easily organize '\n u'timelines and analyze them all at the same time. Add meaning to '\n u'your raw data with rich annotations, comments, tags and stars.')\n\ndef check_before_upload():\n \"\"\"Warn user if frontend build is not present or is not recent.\n\n Make sure that .js and .css bundles included in the PyPI package are up to\n date.\n\n Raises:\n UserWarning\n \"\"\"\n this_dir = os.path.dirname(__file__)\n frontend_dist_dir = os.path.join(\n this_dir, 'timesketch', 'ui', 'static', 'dist',\n )\n js = os.path.join(frontend_dist_dir, 'bundle.js')\n css = os.path.join(frontend_dist_dir, 'bundle.css')\n if not (os.path.isfile(js) and os.path.isfile(css)):\n raise UserWarning(\n \"Build the frontend before uploading to PyPI!\"\n + \" (see docs/Developers-Guide.md)\"\n )\n mtime = min(os.path.getmtime(js), os.path.getmtime(css))\n if time.time() - mtime > 180:\n raise UserWarning(\n \"Frontend build is older than 3 minutes, please rebuild!\"\n + \" (see docs/Developers-Guide.md)\"\n )\n\nif 'upload' in sys.argv:\n check_before_upload()\n\nsetup(\n name=u'timesketch',\n version=timesketch_version,\n description=u'Digital forensic timeline analysis',\n long_description=timesketch_description,\n license=u'Apache License, Version 2.0',\n url=u'http://www.timesketch.org/',\n maintainer=u'Timesketch development team',\n maintainer_email=u'[email protected]',\n classifiers=[\n u'Development Status :: 4 - Beta',\n u'Environment :: Web Environment',\n u'Operating System :: OS Independent',\n u'Programming Language :: Python',\n ],\n data_files=[(u'share/timesketch', [u'timesketch.conf'])],\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n scripts=[u'tsctl'],\n install_requires=[str(req.req) for req in parse_requirements(\n \"requirements.txt\", session=PipSession(),\n )],\n)\n", "path": "setup.py"}]} | 1,291 | 254 |
gh_patches_debug_18530 | rasdani/github-patches | git_diff | pulp__pulpcore-5377 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Task cleanup must not delete content nor artifacts
Deleting content or artifacts outside of orphan cleanup is breaking the rules.
And no, we cannot get away with that.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pulpcore/tasking/util.py`
Content:
```
1 import logging
2 from gettext import gettext as _
3
4 from django.db import transaction
5 from django.db import connection
6
7 from pulpcore.app.models import Task
8 from pulpcore.constants import TASK_FINAL_STATES, TASK_INCOMPLETE_STATES, TASK_STATES
9
10 _logger = logging.getLogger(__name__)
11
12
13 def cancel(task_id):
14 """
15 Cancel the task that is represented by the given task_id.
16
17 This method cancels only the task with given task_id, not the spawned tasks. This also updates
18 task's state to either 'canceled' or 'canceling'.
19
20 Args:
21 task_id (str): The ID of the task you wish to cancel
22
23 Raises:
24 rest_framework.exceptions.NotFound: If a task with given task_id does not exist
25 """
26 task_status = Task.objects.get(pk=task_id)
27
28 if task_status.state in TASK_FINAL_STATES:
29 # If the task is already done, just stop
30 _logger.debug(
31 "Task [{task_id}] already in a final state: {state}".format(
32 task_id=task_id, state=task_status.state
33 )
34 )
35 return task_status
36
37 _logger.info(_("Canceling task: {id}").format(id=task_id))
38
39 task = task_status
40 # This is the only valid transition without holding the task lock
41 rows = Task.objects.filter(pk=task.pk, state__in=TASK_INCOMPLETE_STATES).update(
42 state=TASK_STATES.CANCELING
43 )
44 # Notify the worker that might be running that task and other workers to clean up
45 with connection.cursor() as cursor:
46 cursor.execute("SELECT pg_notify('pulp_worker_cancel', %s)", (str(task.pk),))
47 cursor.execute("NOTIFY pulp_worker_wakeup")
48 if rows == 1:
49 task.refresh_from_db()
50 return task
51
52
53 def _delete_incomplete_resources(task):
54 """
55 Delete all incomplete created-resources on a canceled task.
56
57 Args:
58 task (Task): A task.
59 """
60 if task.state not in [TASK_STATES.CANCELED, TASK_STATES.CANCELING]:
61 raise RuntimeError(_("Task must be canceled."))
62 for model in (r.content_object for r in task.created_resources.all()):
63 try:
64 if model.complete:
65 continue
66 except AttributeError:
67 continue
68 try:
69 with transaction.atomic():
70 model.delete()
71 except Exception as error:
72 _logger.error(_("Delete created resource, failed: {}").format(str(error)))
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pulpcore/tasking/util.py b/pulpcore/tasking/util.py
--- a/pulpcore/tasking/util.py
+++ b/pulpcore/tasking/util.py
@@ -4,7 +4,7 @@
from django.db import transaction
from django.db import connection
-from pulpcore.app.models import Task
+from pulpcore.app.models import Artifact, Content, Task
from pulpcore.constants import TASK_FINAL_STATES, TASK_INCOMPLETE_STATES, TASK_STATES
_logger = logging.getLogger(__name__)
@@ -60,6 +60,8 @@
if task.state not in [TASK_STATES.CANCELED, TASK_STATES.CANCELING]:
raise RuntimeError(_("Task must be canceled."))
for model in (r.content_object for r in task.created_resources.all()):
+ if isinstance(model, (Artifact, Content)):
+ continue
try:
if model.complete:
continue
| {"golden_diff": "diff --git a/pulpcore/tasking/util.py b/pulpcore/tasking/util.py\n--- a/pulpcore/tasking/util.py\n+++ b/pulpcore/tasking/util.py\n@@ -4,7 +4,7 @@\n from django.db import transaction\n from django.db import connection\n \n-from pulpcore.app.models import Task\n+from pulpcore.app.models import Artifact, Content, Task\n from pulpcore.constants import TASK_FINAL_STATES, TASK_INCOMPLETE_STATES, TASK_STATES\n \n _logger = logging.getLogger(__name__)\n@@ -60,6 +60,8 @@\n if task.state not in [TASK_STATES.CANCELED, TASK_STATES.CANCELING]:\n raise RuntimeError(_(\"Task must be canceled.\"))\n for model in (r.content_object for r in task.created_resources.all()):\n+ if isinstance(model, (Artifact, Content)):\n+ continue\n try:\n if model.complete:\n continue\n", "issue": "Task cleanup must not delete content nor artifacts\nDeleting content or artifacts outside of orphan cleanup is breaking the rules.\r\nAnd no, we cannot get away with that.\r\n\n", "before_files": [{"content": "import logging\nfrom gettext import gettext as _\n\nfrom django.db import transaction\nfrom django.db import connection\n\nfrom pulpcore.app.models import Task\nfrom pulpcore.constants import TASK_FINAL_STATES, TASK_INCOMPLETE_STATES, TASK_STATES\n\n_logger = logging.getLogger(__name__)\n\n\ndef cancel(task_id):\n \"\"\"\n Cancel the task that is represented by the given task_id.\n\n This method cancels only the task with given task_id, not the spawned tasks. This also updates\n task's state to either 'canceled' or 'canceling'.\n\n Args:\n task_id (str): The ID of the task you wish to cancel\n\n Raises:\n rest_framework.exceptions.NotFound: If a task with given task_id does not exist\n \"\"\"\n task_status = Task.objects.get(pk=task_id)\n\n if task_status.state in TASK_FINAL_STATES:\n # If the task is already done, just stop\n _logger.debug(\n \"Task [{task_id}] already in a final state: {state}\".format(\n task_id=task_id, state=task_status.state\n )\n )\n return task_status\n\n _logger.info(_(\"Canceling task: {id}\").format(id=task_id))\n\n task = task_status\n # This is the only valid transition without holding the task lock\n rows = Task.objects.filter(pk=task.pk, state__in=TASK_INCOMPLETE_STATES).update(\n state=TASK_STATES.CANCELING\n )\n # Notify the worker that might be running that task and other workers to clean up\n with connection.cursor() as cursor:\n cursor.execute(\"SELECT pg_notify('pulp_worker_cancel', %s)\", (str(task.pk),))\n cursor.execute(\"NOTIFY pulp_worker_wakeup\")\n if rows == 1:\n task.refresh_from_db()\n return task\n\n\ndef _delete_incomplete_resources(task):\n \"\"\"\n Delete all incomplete created-resources on a canceled task.\n\n Args:\n task (Task): A task.\n \"\"\"\n if task.state not in [TASK_STATES.CANCELED, TASK_STATES.CANCELING]:\n raise RuntimeError(_(\"Task must be canceled.\"))\n for model in (r.content_object for r in task.created_resources.all()):\n try:\n if model.complete:\n continue\n except AttributeError:\n continue\n try:\n with transaction.atomic():\n model.delete()\n except Exception as error:\n _logger.error(_(\"Delete created resource, failed: {}\").format(str(error)))\n", "path": "pulpcore/tasking/util.py"}], "after_files": [{"content": "import logging\nfrom gettext import gettext as _\n\nfrom django.db import transaction\nfrom django.db import connection\n\nfrom pulpcore.app.models import Artifact, Content, Task\nfrom pulpcore.constants import TASK_FINAL_STATES, TASK_INCOMPLETE_STATES, TASK_STATES\n\n_logger = logging.getLogger(__name__)\n\n\ndef cancel(task_id):\n \"\"\"\n Cancel the task that is represented by the given task_id.\n\n This method cancels only the task with given task_id, not the spawned tasks. This also updates\n task's state to either 'canceled' or 'canceling'.\n\n Args:\n task_id (str): The ID of the task you wish to cancel\n\n Raises:\n rest_framework.exceptions.NotFound: If a task with given task_id does not exist\n \"\"\"\n task_status = Task.objects.get(pk=task_id)\n\n if task_status.state in TASK_FINAL_STATES:\n # If the task is already done, just stop\n _logger.debug(\n \"Task [{task_id}] already in a final state: {state}\".format(\n task_id=task_id, state=task_status.state\n )\n )\n return task_status\n\n _logger.info(_(\"Canceling task: {id}\").format(id=task_id))\n\n task = task_status\n # This is the only valid transition without holding the task lock\n rows = Task.objects.filter(pk=task.pk, state__in=TASK_INCOMPLETE_STATES).update(\n state=TASK_STATES.CANCELING\n )\n # Notify the worker that might be running that task and other workers to clean up\n with connection.cursor() as cursor:\n cursor.execute(\"SELECT pg_notify('pulp_worker_cancel', %s)\", (str(task.pk),))\n cursor.execute(\"NOTIFY pulp_worker_wakeup\")\n if rows == 1:\n task.refresh_from_db()\n return task\n\n\ndef _delete_incomplete_resources(task):\n \"\"\"\n Delete all incomplete created-resources on a canceled task.\n\n Args:\n task (Task): A task.\n \"\"\"\n if task.state not in [TASK_STATES.CANCELED, TASK_STATES.CANCELING]:\n raise RuntimeError(_(\"Task must be canceled.\"))\n for model in (r.content_object for r in task.created_resources.all()):\n if isinstance(model, (Artifact, Content)):\n continue\n try:\n if model.complete:\n continue\n except AttributeError:\n continue\n try:\n with transaction.atomic():\n model.delete()\n except Exception as error:\n _logger.error(_(\"Delete created resource, failed: {}\").format(str(error)))\n", "path": "pulpcore/tasking/util.py"}]} | 956 | 190 |
gh_patches_debug_3397 | rasdani/github-patches | git_diff | Netflix__lemur-238 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Creating an authority does not allow others with the role to issue certificates
When creating an authority currently only the creator can see the authority, anyone with the owning role should be able to see and use the certificate.
Currently even when a valid role is assigned and the user can see the authority they cannot use it because the cannot access the authorities key.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lemur/authorities/service.py`
Content:
```
1 """
2 .. module: lemur.authorities.service
3 :platform: Unix
4 :synopsis: This module contains all of the services level functions used to
5 administer authorities in Lemur
6 :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more
7 :license: Apache, see LICENSE for more details.
8 .. moduleauthor:: Kevin Glisson <[email protected]>
9
10 """
11 from flask import g
12 from flask import current_app
13
14 from lemur import database
15 from lemur.authorities.models import Authority
16 from lemur.roles import service as role_service
17 from lemur.notifications import service as notification_service
18
19 from lemur.roles.models import Role
20 from lemur.certificates.models import Certificate
21
22 from lemur.plugins.base import plugins
23
24
25 def update(authority_id, description=None, owner=None, active=None, roles=None):
26 """
27 Update a an authority with new values.
28
29 :param authority_id:
30 :param roles: roles that are allowed to use this authority
31 :return:
32 """
33 authority = get(authority_id)
34 if roles:
35 authority = database.update_list(authority, 'roles', Role, roles)
36
37 if active:
38 authority.active = active
39
40 authority.description = description
41 authority.owner = owner
42 return database.update(authority)
43
44
45 def create(kwargs):
46 """
47 Create a new authority.
48
49 :return:
50 """
51
52 issuer = plugins.get(kwargs.get('pluginName'))
53
54 kwargs['creator'] = g.current_user.email
55 cert_body, intermediate, issuer_roles = issuer.create_authority(kwargs)
56
57 cert = Certificate(cert_body, chain=intermediate)
58 cert.owner = kwargs['ownerEmail']
59
60 if kwargs['caType'] == 'subca':
61 cert.description = "This is the ROOT certificate for the {0} sub certificate authority the parent \
62 authority is {1}.".format(kwargs.get('caName'), kwargs.get('caParent'))
63 else:
64 cert.description = "This is the ROOT certificate for the {0} certificate authority.".format(
65 kwargs.get('caName')
66 )
67
68 cert.user = g.current_user
69
70 cert.notifications = notification_service.create_default_expiration_notifications(
71 'DEFAULT_SECURITY',
72 current_app.config.get('LEMUR_SECURITY_TEAM_EMAIL')
73 )
74
75 # we create and attach any roles that the issuer gives us
76 role_objs = []
77 for r in issuer_roles:
78
79 role = role_service.create(
80 r['name'],
81 password=r['password'],
82 description="{0} auto generated role".format(kwargs.get('pluginName')),
83 username=r['username'])
84
85 # the user creating the authority should be able to administer it
86 if role.username == 'admin':
87 g.current_user.roles.append(role)
88
89 role_objs.append(role)
90
91 authority = Authority(
92 kwargs.get('caName'),
93 kwargs['ownerEmail'],
94 kwargs['pluginName'],
95 cert_body,
96 description=kwargs['caDescription'],
97 chain=intermediate,
98 roles=role_objs
99 )
100
101 database.update(cert)
102 authority = database.create(authority)
103
104 g.current_user.authorities.append(authority)
105
106 return authority
107
108
109 def get_all():
110 """
111 Get all authorities that are currently in Lemur.
112
113 :rtype : List
114 :return:
115 """
116 query = database.session_query(Authority)
117 return database.find_all(query, Authority, {}).all()
118
119
120 def get(authority_id):
121 """
122 Retrieves an authority given it's ID
123
124 :param authority_id:
125 :return:
126 """
127 return database.get(Authority, authority_id)
128
129
130 def get_by_name(authority_name):
131 """
132 Retrieves an authority given it's name.
133
134 :param authority_name:
135 :return:
136 """
137 return database.get(Authority, authority_name, field='name')
138
139
140 def get_authority_role(ca_name):
141 """
142 Attempts to get the authority role for a given ca uses current_user
143 as a basis for accomplishing that.
144
145 :param ca_name:
146 """
147 if g.current_user.is_admin:
148 authority = get_by_name(ca_name)
149 # TODO we should pick admin ca roles for admin
150 return authority.roles[0]
151 else:
152 for role in g.current_user.roles:
153 if role.authority:
154 if role.authority.name == ca_name:
155 return role
156
157
158 def render(args):
159 """
160 Helper that helps us render the REST Api responses.
161 :param args:
162 :return:
163 """
164 query = database.session_query(Authority)
165 sort_by = args.pop('sort_by')
166 sort_dir = args.pop('sort_dir')
167 page = args.pop('page')
168 count = args.pop('count')
169 filt = args.pop('filter')
170
171 if filt:
172 terms = filt.split(';')
173 if 'active' in filt: # this is really weird but strcmp seems to not work here??
174 query = query.filter(Authority.active == terms[1])
175 else:
176 query = database.filter(query, Authority, terms)
177
178 # we make sure that a user can only use an authority they either own are are a member of - admins can see all
179 if not g.current_user.is_admin:
180 authority_ids = []
181 for role in g.current_user.roles:
182 if role.authority:
183 authority_ids.append(role.authority.id)
184 query = query.filter(Authority.id.in_(authority_ids))
185
186 query = database.find_all(query, Authority, args)
187
188 if sort_by and sort_dir:
189 query = database.sort(query, Authority, sort_by, sort_dir)
190
191 return database.paginate(query, page, count)
192
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lemur/authorities/service.py b/lemur/authorities/service.py
--- a/lemur/authorities/service.py
+++ b/lemur/authorities/service.py
@@ -101,6 +101,10 @@
database.update(cert)
authority = database.create(authority)
+ # the owning dl or role should have this authority associated with it
+ owner_role = role_service.get_by_name(kwargs['ownerEmail'])
+ owner_role.authority = authority
+
g.current_user.authorities.append(authority)
return authority
| {"golden_diff": "diff --git a/lemur/authorities/service.py b/lemur/authorities/service.py\n--- a/lemur/authorities/service.py\n+++ b/lemur/authorities/service.py\n@@ -101,6 +101,10 @@\n database.update(cert)\n authority = database.create(authority)\n \n+ # the owning dl or role should have this authority associated with it\n+ owner_role = role_service.get_by_name(kwargs['ownerEmail'])\n+ owner_role.authority = authority\n+\n g.current_user.authorities.append(authority)\n \n return authority\n", "issue": "Creating an authority does not allow others with the role to issue certificates\nWhen creating an authority currently only the creator can see the authority, anyone with the owning role should be able to see and use the certificate.\n\nCurrently even when a valid role is assigned and the user can see the authority they cannot use it because the cannot access the authorities key.\n\n", "before_files": [{"content": "\"\"\"\n.. module: lemur.authorities.service\n :platform: Unix\n :synopsis: This module contains all of the services level functions used to\n administer authorities in Lemur\n :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more\n :license: Apache, see LICENSE for more details.\n.. moduleauthor:: Kevin Glisson <[email protected]>\n\n\"\"\"\nfrom flask import g\nfrom flask import current_app\n\nfrom lemur import database\nfrom lemur.authorities.models import Authority\nfrom lemur.roles import service as role_service\nfrom lemur.notifications import service as notification_service\n\nfrom lemur.roles.models import Role\nfrom lemur.certificates.models import Certificate\n\nfrom lemur.plugins.base import plugins\n\n\ndef update(authority_id, description=None, owner=None, active=None, roles=None):\n \"\"\"\n Update a an authority with new values.\n\n :param authority_id:\n :param roles: roles that are allowed to use this authority\n :return:\n \"\"\"\n authority = get(authority_id)\n if roles:\n authority = database.update_list(authority, 'roles', Role, roles)\n\n if active:\n authority.active = active\n\n authority.description = description\n authority.owner = owner\n return database.update(authority)\n\n\ndef create(kwargs):\n \"\"\"\n Create a new authority.\n\n :return:\n \"\"\"\n\n issuer = plugins.get(kwargs.get('pluginName'))\n\n kwargs['creator'] = g.current_user.email\n cert_body, intermediate, issuer_roles = issuer.create_authority(kwargs)\n\n cert = Certificate(cert_body, chain=intermediate)\n cert.owner = kwargs['ownerEmail']\n\n if kwargs['caType'] == 'subca':\n cert.description = \"This is the ROOT certificate for the {0} sub certificate authority the parent \\\n authority is {1}.\".format(kwargs.get('caName'), kwargs.get('caParent'))\n else:\n cert.description = \"This is the ROOT certificate for the {0} certificate authority.\".format(\n kwargs.get('caName')\n )\n\n cert.user = g.current_user\n\n cert.notifications = notification_service.create_default_expiration_notifications(\n 'DEFAULT_SECURITY',\n current_app.config.get('LEMUR_SECURITY_TEAM_EMAIL')\n )\n\n # we create and attach any roles that the issuer gives us\n role_objs = []\n for r in issuer_roles:\n\n role = role_service.create(\n r['name'],\n password=r['password'],\n description=\"{0} auto generated role\".format(kwargs.get('pluginName')),\n username=r['username'])\n\n # the user creating the authority should be able to administer it\n if role.username == 'admin':\n g.current_user.roles.append(role)\n\n role_objs.append(role)\n\n authority = Authority(\n kwargs.get('caName'),\n kwargs['ownerEmail'],\n kwargs['pluginName'],\n cert_body,\n description=kwargs['caDescription'],\n chain=intermediate,\n roles=role_objs\n )\n\n database.update(cert)\n authority = database.create(authority)\n\n g.current_user.authorities.append(authority)\n\n return authority\n\n\ndef get_all():\n \"\"\"\n Get all authorities that are currently in Lemur.\n\n :rtype : List\n :return:\n \"\"\"\n query = database.session_query(Authority)\n return database.find_all(query, Authority, {}).all()\n\n\ndef get(authority_id):\n \"\"\"\n Retrieves an authority given it's ID\n\n :param authority_id:\n :return:\n \"\"\"\n return database.get(Authority, authority_id)\n\n\ndef get_by_name(authority_name):\n \"\"\"\n Retrieves an authority given it's name.\n\n :param authority_name:\n :return:\n \"\"\"\n return database.get(Authority, authority_name, field='name')\n\n\ndef get_authority_role(ca_name):\n \"\"\"\n Attempts to get the authority role for a given ca uses current_user\n as a basis for accomplishing that.\n\n :param ca_name:\n \"\"\"\n if g.current_user.is_admin:\n authority = get_by_name(ca_name)\n # TODO we should pick admin ca roles for admin\n return authority.roles[0]\n else:\n for role in g.current_user.roles:\n if role.authority:\n if role.authority.name == ca_name:\n return role\n\n\ndef render(args):\n \"\"\"\n Helper that helps us render the REST Api responses.\n :param args:\n :return:\n \"\"\"\n query = database.session_query(Authority)\n sort_by = args.pop('sort_by')\n sort_dir = args.pop('sort_dir')\n page = args.pop('page')\n count = args.pop('count')\n filt = args.pop('filter')\n\n if filt:\n terms = filt.split(';')\n if 'active' in filt: # this is really weird but strcmp seems to not work here??\n query = query.filter(Authority.active == terms[1])\n else:\n query = database.filter(query, Authority, terms)\n\n # we make sure that a user can only use an authority they either own are are a member of - admins can see all\n if not g.current_user.is_admin:\n authority_ids = []\n for role in g.current_user.roles:\n if role.authority:\n authority_ids.append(role.authority.id)\n query = query.filter(Authority.id.in_(authority_ids))\n\n query = database.find_all(query, Authority, args)\n\n if sort_by and sort_dir:\n query = database.sort(query, Authority, sort_by, sort_dir)\n\n return database.paginate(query, page, count)\n", "path": "lemur/authorities/service.py"}], "after_files": [{"content": "\"\"\"\n.. module: lemur.authorities.service\n :platform: Unix\n :synopsis: This module contains all of the services level functions used to\n administer authorities in Lemur\n :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more\n :license: Apache, see LICENSE for more details.\n.. moduleauthor:: Kevin Glisson <[email protected]>\n\n\"\"\"\nfrom flask import g\nfrom flask import current_app\n\nfrom lemur import database\nfrom lemur.authorities.models import Authority\nfrom lemur.roles import service as role_service\nfrom lemur.notifications import service as notification_service\n\nfrom lemur.roles.models import Role\nfrom lemur.certificates.models import Certificate\n\nfrom lemur.plugins.base import plugins\n\n\ndef update(authority_id, description=None, owner=None, active=None, roles=None):\n \"\"\"\n Update a an authority with new values.\n\n :param authority_id:\n :param roles: roles that are allowed to use this authority\n :return:\n \"\"\"\n authority = get(authority_id)\n if roles:\n authority = database.update_list(authority, 'roles', Role, roles)\n\n if active:\n authority.active = active\n\n authority.description = description\n authority.owner = owner\n return database.update(authority)\n\n\ndef create(kwargs):\n \"\"\"\n Create a new authority.\n\n :return:\n \"\"\"\n\n issuer = plugins.get(kwargs.get('pluginName'))\n\n kwargs['creator'] = g.current_user.email\n cert_body, intermediate, issuer_roles = issuer.create_authority(kwargs)\n\n cert = Certificate(cert_body, chain=intermediate)\n cert.owner = kwargs['ownerEmail']\n\n if kwargs['caType'] == 'subca':\n cert.description = \"This is the ROOT certificate for the {0} sub certificate authority the parent \\\n authority is {1}.\".format(kwargs.get('caName'), kwargs.get('caParent'))\n else:\n cert.description = \"This is the ROOT certificate for the {0} certificate authority.\".format(\n kwargs.get('caName')\n )\n\n cert.user = g.current_user\n\n cert.notifications = notification_service.create_default_expiration_notifications(\n 'DEFAULT_SECURITY',\n current_app.config.get('LEMUR_SECURITY_TEAM_EMAIL')\n )\n\n # we create and attach any roles that the issuer gives us\n role_objs = []\n for r in issuer_roles:\n\n role = role_service.create(\n r['name'],\n password=r['password'],\n description=\"{0} auto generated role\".format(kwargs.get('pluginName')),\n username=r['username'])\n\n # the user creating the authority should be able to administer it\n if role.username == 'admin':\n g.current_user.roles.append(role)\n\n role_objs.append(role)\n\n authority = Authority(\n kwargs.get('caName'),\n kwargs['ownerEmail'],\n kwargs['pluginName'],\n cert_body,\n description=kwargs['caDescription'],\n chain=intermediate,\n roles=role_objs\n )\n\n database.update(cert)\n authority = database.create(authority)\n\n # the owning dl or role should have this authority associated with it\n owner_role = role_service.get_by_name(kwargs['ownerEmail'])\n owner_role.authority = authority\n\n g.current_user.authorities.append(authority)\n\n return authority\n\n\ndef get_all():\n \"\"\"\n Get all authorities that are currently in Lemur.\n\n :rtype : List\n :return:\n \"\"\"\n query = database.session_query(Authority)\n return database.find_all(query, Authority, {}).all()\n\n\ndef get(authority_id):\n \"\"\"\n Retrieves an authority given it's ID\n\n :param authority_id:\n :return:\n \"\"\"\n return database.get(Authority, authority_id)\n\n\ndef get_by_name(authority_name):\n \"\"\"\n Retrieves an authority given it's name.\n\n :param authority_name:\n :return:\n \"\"\"\n return database.get(Authority, authority_name, field='name')\n\n\ndef get_authority_role(ca_name):\n \"\"\"\n Attempts to get the authority role for a given ca uses current_user\n as a basis for accomplishing that.\n\n :param ca_name:\n \"\"\"\n if g.current_user.is_admin:\n authority = get_by_name(ca_name)\n # TODO we should pick admin ca roles for admin\n return authority.roles[0]\n else:\n for role in g.current_user.roles:\n if role.authority:\n if role.authority.name == ca_name:\n return role\n\n\ndef render(args):\n \"\"\"\n Helper that helps us render the REST Api responses.\n :param args:\n :return:\n \"\"\"\n query = database.session_query(Authority)\n sort_by = args.pop('sort_by')\n sort_dir = args.pop('sort_dir')\n page = args.pop('page')\n count = args.pop('count')\n filt = args.pop('filter')\n\n if filt:\n terms = filt.split(';')\n if 'active' in filt: # this is really weird but strcmp seems to not work here??\n query = query.filter(Authority.active == terms[1])\n else:\n query = database.filter(query, Authority, terms)\n\n # we make sure that a user can only use an authority they either own are are a member of - admins can see all\n if not g.current_user.is_admin:\n authority_ids = []\n for role in g.current_user.roles:\n if role.authority:\n authority_ids.append(role.authority.id)\n query = query.filter(Authority.id.in_(authority_ids))\n\n query = database.find_all(query, Authority, args)\n\n if sort_by and sort_dir:\n query = database.sort(query, Authority, sort_by, sort_dir)\n\n return database.paginate(query, page, count)\n", "path": "lemur/authorities/service.py"}]} | 2,011 | 129 |
gh_patches_debug_11633 | rasdani/github-patches | git_diff | pypi__warehouse-1181 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Errors in celery don't get sent to Sentry
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/celery.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import celery.backends
14
15 # We need to trick Celery into supporting rediss:// URLs which is how redis-py
16 # signals that you should use Redis with TLS.
17 celery.backends.BACKEND_ALIASES["rediss"] = "warehouse.celery:TLSRedisBackend" # noqa
18
19 from celery import Celery, Task
20 from celery.backends.redis import RedisBackend as _RedisBackend
21 from celery.signals import celeryd_init
22 from pyramid import scripting
23 from pyramid.threadlocal import get_current_request
24
25 from warehouse.config import Environment, configure
26
27
28 @celeryd_init.connect
29 def _configure_celery(*args, **kwargs):
30 configure()
31
32
33 class TLSRedisBackend(_RedisBackend):
34
35 def _params_from_url(self, url, defaults):
36 params = super()._params_from_url(url, defaults)
37 params.update({"connection_class": self.redis.SSLConnection})
38 return params
39
40
41 class WarehouseTask(Task):
42
43 abstract = True
44
45 def __call__(self, *args, **kwargs):
46 registry = self.app.pyramid_config.registry
47 pyramid_env = scripting.prepare(registry=registry)
48
49 try:
50 return super().__call__(pyramid_env["request"], *args, **kwargs)
51 finally:
52 pyramid_env["closer"]()
53
54 def apply_async(self, *args, **kwargs):
55 # The API design of Celery makes this threadlocal pretty impossible to
56 # avoid :(
57 request = get_current_request()
58
59 # If for whatever reason we were unable to get a request we'll just
60 # skip this and call the original method to send this immediately.
61 if request is None or not hasattr(request, "tm"):
62 return super().apply_async(*args, **kwargs)
63
64 # This will break things that expect to get an AsyncResult because
65 # we're no longer going to be returning an async result from this when
66 # called from within a request, response cycle. Ideally we shouldn't be
67 # waiting for responses in a request/response cycle anyways though.
68 request.tm.get().addAfterCommitHook(
69 self._after_commit_hook,
70 args=args,
71 kws=kwargs,
72 )
73
74 def _after_commit_hook(self, success, *args, **kwargs):
75 if success:
76 super().apply_async(*args, **kwargs)
77
78
79 app = Celery("warehouse")
80 app.Task = WarehouseTask
81
82
83 task = app.task
84
85
86 def includeme(config):
87 s = config.registry.settings
88 app.pyramid_config = config
89 app.conf.update(
90 BROKER_URL=s["celery.broker_url"],
91 BROKER_USE_SSL=s["warehouse.env"] == Environment.production,
92 CELERY_DISABLE_RATE_LIMITS=True,
93 CELERY_RESULT_BACKEND=s["celery.result_url"],
94 CELERY_RESULT_SERIALIZER="json",
95 CELERY_TASK_SERIALIZER="json",
96 CELERY_ACCEPT_CONTENT=["json", "msgpack"],
97 CELERY_MESSAGE_COMPRESSION="gzip",
98 CELERY_QUEUE_HA_POLICY="all",
99 )
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/warehouse/celery.py b/warehouse/celery.py
--- a/warehouse/celery.py
+++ b/warehouse/celery.py
@@ -21,13 +21,16 @@
from celery.signals import celeryd_init
from pyramid import scripting
from pyramid.threadlocal import get_current_request
+from raven.contrib.celery import register_signal, register_logger_signal
from warehouse.config import Environment, configure
@celeryd_init.connect
def _configure_celery(*args, **kwargs):
- configure()
+ config = configure()
+ register_logger_signal(config.registry["raven.client"])
+ register_signal(config.registry["raven.client"])
class TLSRedisBackend(_RedisBackend):
| {"golden_diff": "diff --git a/warehouse/celery.py b/warehouse/celery.py\n--- a/warehouse/celery.py\n+++ b/warehouse/celery.py\n@@ -21,13 +21,16 @@\n from celery.signals import celeryd_init\n from pyramid import scripting\n from pyramid.threadlocal import get_current_request\n+from raven.contrib.celery import register_signal, register_logger_signal\n \n from warehouse.config import Environment, configure\n \n \n @celeryd_init.connect\n def _configure_celery(*args, **kwargs):\n- configure()\n+ config = configure()\n+ register_logger_signal(config.registry[\"raven.client\"])\n+ register_signal(config.registry[\"raven.client\"])\n \n \n class TLSRedisBackend(_RedisBackend):\n", "issue": "Errors in celery don't get sent to Sentry\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport celery.backends\n\n# We need to trick Celery into supporting rediss:// URLs which is how redis-py\n# signals that you should use Redis with TLS.\ncelery.backends.BACKEND_ALIASES[\"rediss\"] = \"warehouse.celery:TLSRedisBackend\" # noqa\n\nfrom celery import Celery, Task\nfrom celery.backends.redis import RedisBackend as _RedisBackend\nfrom celery.signals import celeryd_init\nfrom pyramid import scripting\nfrom pyramid.threadlocal import get_current_request\n\nfrom warehouse.config import Environment, configure\n\n\n@celeryd_init.connect\ndef _configure_celery(*args, **kwargs):\n configure()\n\n\nclass TLSRedisBackend(_RedisBackend):\n\n def _params_from_url(self, url, defaults):\n params = super()._params_from_url(url, defaults)\n params.update({\"connection_class\": self.redis.SSLConnection})\n return params\n\n\nclass WarehouseTask(Task):\n\n abstract = True\n\n def __call__(self, *args, **kwargs):\n registry = self.app.pyramid_config.registry\n pyramid_env = scripting.prepare(registry=registry)\n\n try:\n return super().__call__(pyramid_env[\"request\"], *args, **kwargs)\n finally:\n pyramid_env[\"closer\"]()\n\n def apply_async(self, *args, **kwargs):\n # The API design of Celery makes this threadlocal pretty impossible to\n # avoid :(\n request = get_current_request()\n\n # If for whatever reason we were unable to get a request we'll just\n # skip this and call the original method to send this immediately.\n if request is None or not hasattr(request, \"tm\"):\n return super().apply_async(*args, **kwargs)\n\n # This will break things that expect to get an AsyncResult because\n # we're no longer going to be returning an async result from this when\n # called from within a request, response cycle. Ideally we shouldn't be\n # waiting for responses in a request/response cycle anyways though.\n request.tm.get().addAfterCommitHook(\n self._after_commit_hook,\n args=args,\n kws=kwargs,\n )\n\n def _after_commit_hook(self, success, *args, **kwargs):\n if success:\n super().apply_async(*args, **kwargs)\n\n\napp = Celery(\"warehouse\")\napp.Task = WarehouseTask\n\n\ntask = app.task\n\n\ndef includeme(config):\n s = config.registry.settings\n app.pyramid_config = config\n app.conf.update(\n BROKER_URL=s[\"celery.broker_url\"],\n BROKER_USE_SSL=s[\"warehouse.env\"] == Environment.production,\n CELERY_DISABLE_RATE_LIMITS=True,\n CELERY_RESULT_BACKEND=s[\"celery.result_url\"],\n CELERY_RESULT_SERIALIZER=\"json\",\n CELERY_TASK_SERIALIZER=\"json\",\n CELERY_ACCEPT_CONTENT=[\"json\", \"msgpack\"],\n CELERY_MESSAGE_COMPRESSION=\"gzip\",\n CELERY_QUEUE_HA_POLICY=\"all\",\n )\n", "path": "warehouse/celery.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport celery.backends\n\n# We need to trick Celery into supporting rediss:// URLs which is how redis-py\n# signals that you should use Redis with TLS.\ncelery.backends.BACKEND_ALIASES[\"rediss\"] = \"warehouse.celery:TLSRedisBackend\" # noqa\n\nfrom celery import Celery, Task\nfrom celery.backends.redis import RedisBackend as _RedisBackend\nfrom celery.signals import celeryd_init\nfrom pyramid import scripting\nfrom pyramid.threadlocal import get_current_request\nfrom raven.contrib.celery import register_signal, register_logger_signal\n\nfrom warehouse.config import Environment, configure\n\n\n@celeryd_init.connect\ndef _configure_celery(*args, **kwargs):\n config = configure()\n register_logger_signal(config.registry[\"raven.client\"])\n register_signal(config.registry[\"raven.client\"])\n\n\nclass TLSRedisBackend(_RedisBackend):\n\n def _params_from_url(self, url, defaults):\n params = super()._params_from_url(url, defaults)\n params.update({\"connection_class\": self.redis.SSLConnection})\n return params\n\n\nclass WarehouseTask(Task):\n\n abstract = True\n\n def __call__(self, *args, **kwargs):\n registry = self.app.pyramid_config.registry\n pyramid_env = scripting.prepare(registry=registry)\n\n try:\n return super().__call__(pyramid_env[\"request\"], *args, **kwargs)\n finally:\n pyramid_env[\"closer\"]()\n\n def apply_async(self, *args, **kwargs):\n # The API design of Celery makes this threadlocal pretty impossible to\n # avoid :(\n request = get_current_request()\n\n # If for whatever reason we were unable to get a request we'll just\n # skip this and call the original method to send this immediately.\n if request is None or not hasattr(request, \"tm\"):\n return super().apply_async(*args, **kwargs)\n\n # This will break things that expect to get an AsyncResult because\n # we're no longer going to be returning an async result from this when\n # called from within a request, response cycle. Ideally we shouldn't be\n # waiting for responses in a request/response cycle anyways though.\n request.tm.get().addAfterCommitHook(\n self._after_commit_hook,\n args=args,\n kws=kwargs,\n )\n\n def _after_commit_hook(self, success, *args, **kwargs):\n if success:\n super().apply_async(*args, **kwargs)\n\n\napp = Celery(\"warehouse\")\napp.Task = WarehouseTask\n\n\ntask = app.task\n\n\ndef includeme(config):\n s = config.registry.settings\n app.pyramid_config = config\n app.conf.update(\n BROKER_URL=s[\"celery.broker_url\"],\n BROKER_USE_SSL=s[\"warehouse.env\"] == Environment.production,\n CELERY_DISABLE_RATE_LIMITS=True,\n CELERY_RESULT_BACKEND=s[\"celery.result_url\"],\n CELERY_RESULT_SERIALIZER=\"json\",\n CELERY_TASK_SERIALIZER=\"json\",\n CELERY_ACCEPT_CONTENT=[\"json\", \"msgpack\"],\n CELERY_MESSAGE_COMPRESSION=\"gzip\",\n CELERY_QUEUE_HA_POLICY=\"all\",\n )\n", "path": "warehouse/celery.py"}]} | 1,222 | 158 |
gh_patches_debug_29413 | rasdani/github-patches | git_diff | freedomofpress__securedrop-7045 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
determine post-upgrade failure-mode for a SHA-1-signed submission key
## Description
After #6948 (for #6399), redwood will refuse to encrypt to a submission key with a SHA-1 signature.
After #6928, `securedrop-admin sdconfig` will reject a submission key with a SHA-1 signature. This check guarantees that new and reconfigured instances will comply with #6948.
What will happen to an instance with a SHA-1-signed signature after upgrading to v2.7.0?
## Possible approaches
| Option | Documentation changes | Code changes | Implication |
| --- | --- | --- | --- |
| Fail open, but log | optional | ✓ | Admin must monitor logs and/or OSSEC alerts. |
| Fail open, but document | ✓ | ✗ | Admin must monitor release notes or check documentation. |
| Fail closed | optional | ✓[1] | Admin can contact us for help. |
**Notes:**
1. @legoktm observes that, without a code change to handle this case, Apache will come back up after reboot even if the `postinst` script fails under `unattended-upgrades`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `securedrop/journalist.py`
Content:
```
1 from encryption import EncryptionManager, GpgKeyNotFoundError
2 from execution import asynchronous
3 from journalist_app import create_app
4 from models import Source
5 from sdconfig import SecureDropConfig
6
7 config = SecureDropConfig.get_current()
8 # app is imported by journalist.wsgi
9 app = create_app(config)
10
11
12 @asynchronous
13 def prime_keycache() -> None:
14 """Pre-load the source public keys into Redis."""
15 with app.app_context():
16 encryption_mgr = EncryptionManager.get_default()
17 for source in Source.query.filter_by(pending=False, deleted_at=None).all():
18 try:
19 encryption_mgr.get_source_public_key(source.filesystem_id)
20 except GpgKeyNotFoundError:
21 pass
22
23
24 prime_keycache()
25
26
27 if __name__ == "__main__": # pragma: no cover
28 debug = getattr(config, "env", "prod") != "prod"
29 # nosemgrep: python.flask.security.audit.app-run-param-config.avoid_app_run_with_bad_host
30 app.run(debug=debug, host="0.0.0.0", port=8081)
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/securedrop/journalist.py b/securedrop/journalist.py
--- a/securedrop/journalist.py
+++ b/securedrop/journalist.py
@@ -1,9 +1,13 @@
+import sys
+
from encryption import EncryptionManager, GpgKeyNotFoundError
from execution import asynchronous
from journalist_app import create_app
from models import Source
from sdconfig import SecureDropConfig
+import redwood
+
config = SecureDropConfig.get_current()
# app is imported by journalist.wsgi
app = create_app(config)
@@ -21,10 +25,28 @@
pass
-prime_keycache()
+def validate_journalist_key() -> None:
+ """Verify the journalist PGP key is valid"""
+ encryption_mgr = EncryptionManager.get_default()
+ # First check that we can read it
+ try:
+ journalist_key = encryption_mgr.get_journalist_public_key()
+ except Exception as e:
+ print(f"ERROR: Unable to read journalist public key: {e}", file=sys.stderr)
+ app.logger.error(f"ERROR: Unable to read journalist public key: {e}")
+ sys.exit(1)
+ # And then what we read is valid
+ try:
+ redwood.is_valid_public_key(journalist_key)
+ except redwood.RedwoodError as e:
+ print(f"ERROR: Journalist public key is not valid: {e}", file=sys.stderr)
+ app.logger.error(f"ERROR: Journalist public key is not valid: {e}")
+ sys.exit(1)
if __name__ == "__main__": # pragma: no cover
+ validate_journalist_key()
+ prime_keycache()
debug = getattr(config, "env", "prod") != "prod"
# nosemgrep: python.flask.security.audit.app-run-param-config.avoid_app_run_with_bad_host
app.run(debug=debug, host="0.0.0.0", port=8081)
| {"golden_diff": "diff --git a/securedrop/journalist.py b/securedrop/journalist.py\n--- a/securedrop/journalist.py\n+++ b/securedrop/journalist.py\n@@ -1,9 +1,13 @@\n+import sys\n+\n from encryption import EncryptionManager, GpgKeyNotFoundError\n from execution import asynchronous\n from journalist_app import create_app\n from models import Source\n from sdconfig import SecureDropConfig\n \n+import redwood\n+\n config = SecureDropConfig.get_current()\n # app is imported by journalist.wsgi\n app = create_app(config)\n@@ -21,10 +25,28 @@\n pass\n \n \n-prime_keycache()\n+def validate_journalist_key() -> None:\n+ \"\"\"Verify the journalist PGP key is valid\"\"\"\n+ encryption_mgr = EncryptionManager.get_default()\n+ # First check that we can read it\n+ try:\n+ journalist_key = encryption_mgr.get_journalist_public_key()\n+ except Exception as e:\n+ print(f\"ERROR: Unable to read journalist public key: {e}\", file=sys.stderr)\n+ app.logger.error(f\"ERROR: Unable to read journalist public key: {e}\")\n+ sys.exit(1)\n+ # And then what we read is valid\n+ try:\n+ redwood.is_valid_public_key(journalist_key)\n+ except redwood.RedwoodError as e:\n+ print(f\"ERROR: Journalist public key is not valid: {e}\", file=sys.stderr)\n+ app.logger.error(f\"ERROR: Journalist public key is not valid: {e}\")\n+ sys.exit(1)\n \n \n if __name__ == \"__main__\": # pragma: no cover\n+ validate_journalist_key()\n+ prime_keycache()\n debug = getattr(config, \"env\", \"prod\") != \"prod\"\n # nosemgrep: python.flask.security.audit.app-run-param-config.avoid_app_run_with_bad_host\n app.run(debug=debug, host=\"0.0.0.0\", port=8081)\n", "issue": "determine post-upgrade failure-mode for a SHA-1-signed submission key\n## Description\r\n\r\nAfter #6948 (for #6399), redwood will refuse to encrypt to a submission key with a SHA-1 signature.\r\n\r\nAfter #6928, `securedrop-admin sdconfig` will reject a submission key with a SHA-1 signature. This check guarantees that new and reconfigured instances will comply with #6948.\r\n\r\nWhat will happen to an instance with a SHA-1-signed signature after upgrading to v2.7.0?\r\n\r\n## Possible approaches\r\n\r\n| Option | Documentation changes | Code changes | Implication |\r\n| --- | --- | --- | --- |\r\n| Fail open, but log | optional | \u2713 | Admin must monitor logs and/or OSSEC alerts. |\r\n| Fail open, but document | \u2713 | \u2717 | Admin must monitor release notes or check documentation. |\r\n| Fail closed | optional | \u2713[1] | Admin can contact us for help. |\r\n\r\n**Notes:**\r\n1. @legoktm observes that, without a code change to handle this case, Apache will come back up after reboot even if the `postinst` script fails under `unattended-upgrades`.\n", "before_files": [{"content": "from encryption import EncryptionManager, GpgKeyNotFoundError\nfrom execution import asynchronous\nfrom journalist_app import create_app\nfrom models import Source\nfrom sdconfig import SecureDropConfig\n\nconfig = SecureDropConfig.get_current()\n# app is imported by journalist.wsgi\napp = create_app(config)\n\n\n@asynchronous\ndef prime_keycache() -> None:\n \"\"\"Pre-load the source public keys into Redis.\"\"\"\n with app.app_context():\n encryption_mgr = EncryptionManager.get_default()\n for source in Source.query.filter_by(pending=False, deleted_at=None).all():\n try:\n encryption_mgr.get_source_public_key(source.filesystem_id)\n except GpgKeyNotFoundError:\n pass\n\n\nprime_keycache()\n\n\nif __name__ == \"__main__\": # pragma: no cover\n debug = getattr(config, \"env\", \"prod\") != \"prod\"\n # nosemgrep: python.flask.security.audit.app-run-param-config.avoid_app_run_with_bad_host\n app.run(debug=debug, host=\"0.0.0.0\", port=8081)\n", "path": "securedrop/journalist.py"}], "after_files": [{"content": "import sys\n\nfrom encryption import EncryptionManager, GpgKeyNotFoundError\nfrom execution import asynchronous\nfrom journalist_app import create_app\nfrom models import Source\nfrom sdconfig import SecureDropConfig\n\nimport redwood\n\nconfig = SecureDropConfig.get_current()\n# app is imported by journalist.wsgi\napp = create_app(config)\n\n\n@asynchronous\ndef prime_keycache() -> None:\n \"\"\"Pre-load the source public keys into Redis.\"\"\"\n with app.app_context():\n encryption_mgr = EncryptionManager.get_default()\n for source in Source.query.filter_by(pending=False, deleted_at=None).all():\n try:\n encryption_mgr.get_source_public_key(source.filesystem_id)\n except GpgKeyNotFoundError:\n pass\n\n\ndef validate_journalist_key() -> None:\n \"\"\"Verify the journalist PGP key is valid\"\"\"\n encryption_mgr = EncryptionManager.get_default()\n # First check that we can read it\n try:\n journalist_key = encryption_mgr.get_journalist_public_key()\n except Exception as e:\n print(f\"ERROR: Unable to read journalist public key: {e}\", file=sys.stderr)\n app.logger.error(f\"ERROR: Unable to read journalist public key: {e}\")\n sys.exit(1)\n # And then what we read is valid\n try:\n redwood.is_valid_public_key(journalist_key)\n except redwood.RedwoodError as e:\n print(f\"ERROR: Journalist public key is not valid: {e}\", file=sys.stderr)\n app.logger.error(f\"ERROR: Journalist public key is not valid: {e}\")\n sys.exit(1)\n\n\nif __name__ == \"__main__\": # pragma: no cover\n validate_journalist_key()\n prime_keycache()\n debug = getattr(config, \"env\", \"prod\") != \"prod\"\n # nosemgrep: python.flask.security.audit.app-run-param-config.avoid_app_run_with_bad_host\n app.run(debug=debug, host=\"0.0.0.0\", port=8081)\n", "path": "securedrop/journalist.py"}]} | 797 | 440 |
gh_patches_debug_21071 | rasdani/github-patches | git_diff | netbox-community__netbox-14608 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Datasources stuck in sync when using git + ssh from ./manage.py syncdatasource
### NetBox version
v3.6.1
### Python version
3.11
### Steps to Reproduce
In Data Sources
Add
Name: test
Type: git
URL: [email protected]:netbox-community/netbox.git
Create
docker compose exec netbox ./manage.py syncdatasource test
### Expected Behavior
Usually leads to some sort of ssh question or failure, and I would expect the exception to set the status to failed, and then be able to hit sync again.
I'm not sure exactly how NetBox works, but looking at one of the exceptions...
core.exceptions.SyncError: Fetching remote data failed (HangupException):
class SyncError(Exception):
pass
Does this mean the status is not being reset correctly due to the status being left as syncing?
### Observed Behavior
datasource.status = syncing in nbshell
'syncing' in gui
Sync option is now greyed out and cannot reset status without manually setting it in nbshell:
for d in DataSource.objects.filter(status='syncing'):
d.status = 'failed'
d.save()
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netbox/core/management/commands/syncdatasource.py`
Content:
```
1 from django.core.management.base import BaseCommand, CommandError
2
3 from core.models import DataSource
4
5
6 class Command(BaseCommand):
7 help = "Synchronize a data source from its remote upstream"
8
9 def add_arguments(self, parser):
10 parser.add_argument('name', nargs='*', help="Data source(s) to synchronize")
11 parser.add_argument(
12 "--all", action='store_true', dest='sync_all',
13 help="Synchronize all data sources"
14 )
15
16 def handle(self, *args, **options):
17
18 # Find DataSources to sync
19 if options['sync_all']:
20 datasources = DataSource.objects.all()
21 elif options['name']:
22 datasources = DataSource.objects.filter(name__in=options['name'])
23 # Check for invalid names
24 found_names = {ds['name'] for ds in datasources.values('name')}
25 if invalid_names := set(options['name']) - found_names:
26 raise CommandError(f"Invalid data source names: {', '.join(invalid_names)}")
27 else:
28 raise CommandError(f"Must specify at least one data source, or set --all.")
29
30 if len(options['name']) > 1:
31 self.stdout.write(f"Syncing {len(datasources)} data sources.")
32
33 for i, datasource in enumerate(datasources, start=1):
34 self.stdout.write(f"[{i}] Syncing {datasource}... ", ending='')
35 self.stdout.flush()
36 datasource.sync()
37 self.stdout.write(datasource.get_status_display())
38 self.stdout.flush()
39
40 if len(options['name']) > 1:
41 self.stdout.write(f"Finished.")
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/netbox/core/management/commands/syncdatasource.py b/netbox/core/management/commands/syncdatasource.py
--- a/netbox/core/management/commands/syncdatasource.py
+++ b/netbox/core/management/commands/syncdatasource.py
@@ -1,5 +1,6 @@
from django.core.management.base import BaseCommand, CommandError
+from core.choices import DataSourceStatusChoices
from core.models import DataSource
@@ -33,9 +34,13 @@
for i, datasource in enumerate(datasources, start=1):
self.stdout.write(f"[{i}] Syncing {datasource}... ", ending='')
self.stdout.flush()
- datasource.sync()
- self.stdout.write(datasource.get_status_display())
- self.stdout.flush()
+ try:
+ datasource.sync()
+ self.stdout.write(datasource.get_status_display())
+ self.stdout.flush()
+ except Exception as e:
+ DataSource.objects.filter(pk=datasource.pk).update(status=DataSourceStatusChoices.FAILED)
+ raise e
if len(options['name']) > 1:
self.stdout.write(f"Finished.")
| {"golden_diff": "diff --git a/netbox/core/management/commands/syncdatasource.py b/netbox/core/management/commands/syncdatasource.py\n--- a/netbox/core/management/commands/syncdatasource.py\n+++ b/netbox/core/management/commands/syncdatasource.py\n@@ -1,5 +1,6 @@\n from django.core.management.base import BaseCommand, CommandError\n \n+from core.choices import DataSourceStatusChoices\n from core.models import DataSource\n \n \n@@ -33,9 +34,13 @@\n for i, datasource in enumerate(datasources, start=1):\n self.stdout.write(f\"[{i}] Syncing {datasource}... \", ending='')\n self.stdout.flush()\n- datasource.sync()\n- self.stdout.write(datasource.get_status_display())\n- self.stdout.flush()\n+ try:\n+ datasource.sync()\n+ self.stdout.write(datasource.get_status_display())\n+ self.stdout.flush()\n+ except Exception as e:\n+ DataSource.objects.filter(pk=datasource.pk).update(status=DataSourceStatusChoices.FAILED)\n+ raise e\n \n if len(options['name']) > 1:\n self.stdout.write(f\"Finished.\")\n", "issue": "Datasources stuck in sync when using git + ssh from ./manage.py syncdatasource\n### NetBox version\n\nv3.6.1\n\n### Python version\n\n3.11\n\n### Steps to Reproduce\n\nIn Data Sources\r\nAdd\r\nName: test\r\nType: git\r\nURL: [email protected]:netbox-community/netbox.git\r\nCreate\r\n\r\ndocker compose exec netbox ./manage.py syncdatasource test\r\n\r\n\r\n\r\n\n\n### Expected Behavior\n\nUsually leads to some sort of ssh question or failure, and I would expect the exception to set the status to failed, and then be able to hit sync again.\r\n\r\nI'm not sure exactly how NetBox works, but looking at one of the exceptions...\r\ncore.exceptions.SyncError: Fetching remote data failed (HangupException): \r\n\r\nclass SyncError(Exception):\r\n pass\r\n\r\nDoes this mean the status is not being reset correctly due to the status being left as syncing?\r\n\r\n\n\n### Observed Behavior\n\ndatasource.status = syncing in nbshell\r\n'syncing' in gui\r\nSync option is now greyed out and cannot reset status without manually setting it in nbshell:\r\n\r\nfor d in DataSource.objects.filter(status='syncing'):\r\n d.status = 'failed'\r\n d.save()\r\n\n", "before_files": [{"content": "from django.core.management.base import BaseCommand, CommandError\n\nfrom core.models import DataSource\n\n\nclass Command(BaseCommand):\n help = \"Synchronize a data source from its remote upstream\"\n\n def add_arguments(self, parser):\n parser.add_argument('name', nargs='*', help=\"Data source(s) to synchronize\")\n parser.add_argument(\n \"--all\", action='store_true', dest='sync_all',\n help=\"Synchronize all data sources\"\n )\n\n def handle(self, *args, **options):\n\n # Find DataSources to sync\n if options['sync_all']:\n datasources = DataSource.objects.all()\n elif options['name']:\n datasources = DataSource.objects.filter(name__in=options['name'])\n # Check for invalid names\n found_names = {ds['name'] for ds in datasources.values('name')}\n if invalid_names := set(options['name']) - found_names:\n raise CommandError(f\"Invalid data source names: {', '.join(invalid_names)}\")\n else:\n raise CommandError(f\"Must specify at least one data source, or set --all.\")\n\n if len(options['name']) > 1:\n self.stdout.write(f\"Syncing {len(datasources)} data sources.\")\n\n for i, datasource in enumerate(datasources, start=1):\n self.stdout.write(f\"[{i}] Syncing {datasource}... \", ending='')\n self.stdout.flush()\n datasource.sync()\n self.stdout.write(datasource.get_status_display())\n self.stdout.flush()\n\n if len(options['name']) > 1:\n self.stdout.write(f\"Finished.\")\n", "path": "netbox/core/management/commands/syncdatasource.py"}], "after_files": [{"content": "from django.core.management.base import BaseCommand, CommandError\n\nfrom core.choices import DataSourceStatusChoices\nfrom core.models import DataSource\n\n\nclass Command(BaseCommand):\n help = \"Synchronize a data source from its remote upstream\"\n\n def add_arguments(self, parser):\n parser.add_argument('name', nargs='*', help=\"Data source(s) to synchronize\")\n parser.add_argument(\n \"--all\", action='store_true', dest='sync_all',\n help=\"Synchronize all data sources\"\n )\n\n def handle(self, *args, **options):\n\n # Find DataSources to sync\n if options['sync_all']:\n datasources = DataSource.objects.all()\n elif options['name']:\n datasources = DataSource.objects.filter(name__in=options['name'])\n # Check for invalid names\n found_names = {ds['name'] for ds in datasources.values('name')}\n if invalid_names := set(options['name']) - found_names:\n raise CommandError(f\"Invalid data source names: {', '.join(invalid_names)}\")\n else:\n raise CommandError(f\"Must specify at least one data source, or set --all.\")\n\n if len(options['name']) > 1:\n self.stdout.write(f\"Syncing {len(datasources)} data sources.\")\n\n for i, datasource in enumerate(datasources, start=1):\n self.stdout.write(f\"[{i}] Syncing {datasource}... \", ending='')\n self.stdout.flush()\n try:\n datasource.sync()\n self.stdout.write(datasource.get_status_display())\n self.stdout.flush()\n except Exception as e:\n DataSource.objects.filter(pk=datasource.pk).update(status=DataSourceStatusChoices.FAILED)\n raise e\n\n if len(options['name']) > 1:\n self.stdout.write(f\"Finished.\")\n", "path": "netbox/core/management/commands/syncdatasource.py"}]} | 936 | 249 |
gh_patches_debug_27449 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-321 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Function to handle deleting schemas
**Problem**
<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->
Users might want to delete schemas. We don't currently support this.
**Proposed solution**
<!-- A clear and concise description of your proposed solution or feature. -->
A function that handles deleting of schemas in the database. We should raise an error if there is anything outside of the schema referencing the schema.
**Additional context**
<!-- Add any other context or screenshots about the feature request here.-->
This should be in the `db` module.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `db/schemas.py`
Content:
```
1 import logging
2 import warnings
3 from sqlalchemy.schema import CreateSchema
4 from sqlalchemy import inspect, MetaData, select, and_, not_, or_, Table
5
6 from db import types
7
8 logger = logging.getLogger(__name__)
9
10 TYPES_SCHEMA = types.base.SCHEMA
11
12 EXCLUDED_SCHEMATA = [TYPES_SCHEMA, "information_schema"]
13
14
15 def get_schema_name_from_oid(oid, engine):
16 return reflect_schema(engine, oid=oid)["name"]
17
18
19 def get_schema_oid_from_name(name, engine):
20 return reflect_schema(engine, name=name)["oid"]
21
22
23 def reflect_schema(engine, name=None, oid=None):
24 # If we have both arguments, the behavior is undefined.
25 try:
26 assert name is None or oid is None
27 except AssertionError as e:
28 logger.error("ERROR: Only one of 'name' or 'oid' can be given!")
29 raise e
30 metadata = MetaData()
31 with warnings.catch_warnings():
32 warnings.filterwarnings("ignore", message="Did not recognize type")
33 pg_namespace = Table("pg_namespace", metadata, autoload_with=engine)
34 sel = (
35 select(pg_namespace.c.oid, pg_namespace.c.nspname.label("name"))
36 .where(or_(pg_namespace.c.nspname == name, pg_namespace.c.oid == oid))
37 )
38 with engine.begin() as conn:
39 schema_info = conn.execute(sel).fetchone()
40 return schema_info
41
42
43 def get_mathesar_schemas(engine):
44 return [schema for schema, _ in get_mathesar_schemas_with_oids(engine)]
45
46
47 def get_mathesar_schemas_with_oids(engine):
48 metadata = MetaData()
49 with warnings.catch_warnings():
50 warnings.filterwarnings("ignore", message="Did not recognize type")
51 pg_namespace = Table("pg_namespace", metadata, autoload_with=engine)
52 sel = (
53 select(pg_namespace.c.nspname.label('schema'), pg_namespace.c.oid)
54 .where(
55 and_(
56 *[pg_namespace.c.nspname != schema for schema in EXCLUDED_SCHEMATA],
57 not_(pg_namespace.c.nspname.like("pg_%"))
58 )
59 )
60 )
61 with engine.begin() as conn:
62 result = conn.execute(sel).fetchall()
63 return result
64
65
66 def get_all_schemas(engine):
67 inspector = inspect(engine)
68 # We don't need to exclude system schemas (i.e., starting with "pg_")
69 # since Inspector.get_schema_names already excludes them. Thus, this
70 # function actually gets all non-pg-reserved schemas.
71 return inspector.get_schema_names()
72
73
74 def create_schema(schema, engine):
75 """
76 This method creates a Postgres schema.
77 """
78 if schema not in get_all_schemas(engine):
79 with engine.begin() as connection:
80 connection.execute(CreateSchema(schema))
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/db/schemas.py b/db/schemas.py
--- a/db/schemas.py
+++ b/db/schemas.py
@@ -1,7 +1,11 @@
import logging
import warnings
-from sqlalchemy.schema import CreateSchema
+from sqlalchemy.schema import CreateSchema, DropSchema
from sqlalchemy import inspect, MetaData, select, and_, not_, or_, Table
+from sqlalchemy.exc import InternalError
+from sqlalchemy.schema import DDLElement
+from sqlalchemy.ext import compiler
+from psycopg2.errors import DependentObjectsStillExist
from db import types
@@ -78,3 +82,42 @@
if schema not in get_all_schemas(engine):
with engine.begin() as connection:
connection.execute(CreateSchema(schema))
+
+
+def delete_schema(schema, engine, cascade=False, if_exists=False):
+ """
+ This method deletes a Postgres schema.
+ """
+ if if_exists and schema not in get_all_schemas(engine):
+ return
+
+ with engine.begin() as connection:
+ try:
+ connection.execute(DropSchema(schema, cascade=cascade))
+ except InternalError as e:
+ if isinstance(e.orig, DependentObjectsStillExist):
+ raise e.orig
+ else:
+ raise e
+
+
+class RenameSchema(DDLElement):
+ def __init__(self, schema, rename_to):
+ self.schema = schema
+ self.rename_to = rename_to
+
+
[email protected](RenameSchema)
+def compile_rename_schema(element, compiler, **_):
+ return "ALTER SCHEMA %s RENAME TO %s" % (
+ element.schema,
+ element.rename_to
+ )
+
+
+def rename_schema(schema, engine, rename_to):
+ """
+ This method renames a Postgres schema.
+ """
+ with engine.begin() as connection:
+ connection.execute(RenameSchema(schema, rename_to))
| {"golden_diff": "diff --git a/db/schemas.py b/db/schemas.py\n--- a/db/schemas.py\n+++ b/db/schemas.py\n@@ -1,7 +1,11 @@\n import logging\n import warnings\n-from sqlalchemy.schema import CreateSchema\n+from sqlalchemy.schema import CreateSchema, DropSchema\n from sqlalchemy import inspect, MetaData, select, and_, not_, or_, Table\n+from sqlalchemy.exc import InternalError\n+from sqlalchemy.schema import DDLElement\n+from sqlalchemy.ext import compiler\n+from psycopg2.errors import DependentObjectsStillExist\n \n from db import types\n \n@@ -78,3 +82,42 @@\n if schema not in get_all_schemas(engine):\n with engine.begin() as connection:\n connection.execute(CreateSchema(schema))\n+\n+\n+def delete_schema(schema, engine, cascade=False, if_exists=False):\n+ \"\"\"\n+ This method deletes a Postgres schema.\n+ \"\"\"\n+ if if_exists and schema not in get_all_schemas(engine):\n+ return\n+\n+ with engine.begin() as connection:\n+ try:\n+ connection.execute(DropSchema(schema, cascade=cascade))\n+ except InternalError as e:\n+ if isinstance(e.orig, DependentObjectsStillExist):\n+ raise e.orig\n+ else:\n+ raise e\n+\n+\n+class RenameSchema(DDLElement):\n+ def __init__(self, schema, rename_to):\n+ self.schema = schema\n+ self.rename_to = rename_to\n+\n+\[email protected](RenameSchema)\n+def compile_rename_schema(element, compiler, **_):\n+ return \"ALTER SCHEMA %s RENAME TO %s\" % (\n+ element.schema,\n+ element.rename_to\n+ )\n+\n+\n+def rename_schema(schema, engine, rename_to):\n+ \"\"\"\n+ This method renames a Postgres schema.\n+ \"\"\"\n+ with engine.begin() as connection:\n+ connection.execute(RenameSchema(schema, rename_to))\n", "issue": "Function to handle deleting schemas\n**Problem**\r\n<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->\r\nUsers might want to delete schemas. We don't currently support this.\r\n\r\n**Proposed solution**\r\n<!-- A clear and concise description of your proposed solution or feature. -->\r\nA function that handles deleting of schemas in the database. We should raise an error if there is anything outside of the schema referencing the schema.\r\n\r\n**Additional context**\r\n<!-- Add any other context or screenshots about the feature request here.-->\r\nThis should be in the `db` module.\n", "before_files": [{"content": "import logging\nimport warnings\nfrom sqlalchemy.schema import CreateSchema\nfrom sqlalchemy import inspect, MetaData, select, and_, not_, or_, Table\n\nfrom db import types\n\nlogger = logging.getLogger(__name__)\n\nTYPES_SCHEMA = types.base.SCHEMA\n\nEXCLUDED_SCHEMATA = [TYPES_SCHEMA, \"information_schema\"]\n\n\ndef get_schema_name_from_oid(oid, engine):\n return reflect_schema(engine, oid=oid)[\"name\"]\n\n\ndef get_schema_oid_from_name(name, engine):\n return reflect_schema(engine, name=name)[\"oid\"]\n\n\ndef reflect_schema(engine, name=None, oid=None):\n # If we have both arguments, the behavior is undefined.\n try:\n assert name is None or oid is None\n except AssertionError as e:\n logger.error(\"ERROR: Only one of 'name' or 'oid' can be given!\")\n raise e\n metadata = MetaData()\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", message=\"Did not recognize type\")\n pg_namespace = Table(\"pg_namespace\", metadata, autoload_with=engine)\n sel = (\n select(pg_namespace.c.oid, pg_namespace.c.nspname.label(\"name\"))\n .where(or_(pg_namespace.c.nspname == name, pg_namespace.c.oid == oid))\n )\n with engine.begin() as conn:\n schema_info = conn.execute(sel).fetchone()\n return schema_info\n\n\ndef get_mathesar_schemas(engine):\n return [schema for schema, _ in get_mathesar_schemas_with_oids(engine)]\n\n\ndef get_mathesar_schemas_with_oids(engine):\n metadata = MetaData()\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", message=\"Did not recognize type\")\n pg_namespace = Table(\"pg_namespace\", metadata, autoload_with=engine)\n sel = (\n select(pg_namespace.c.nspname.label('schema'), pg_namespace.c.oid)\n .where(\n and_(\n *[pg_namespace.c.nspname != schema for schema in EXCLUDED_SCHEMATA],\n not_(pg_namespace.c.nspname.like(\"pg_%\"))\n )\n )\n )\n with engine.begin() as conn:\n result = conn.execute(sel).fetchall()\n return result\n\n\ndef get_all_schemas(engine):\n inspector = inspect(engine)\n # We don't need to exclude system schemas (i.e., starting with \"pg_\")\n # since Inspector.get_schema_names already excludes them. Thus, this\n # function actually gets all non-pg-reserved schemas.\n return inspector.get_schema_names()\n\n\ndef create_schema(schema, engine):\n \"\"\"\n This method creates a Postgres schema.\n \"\"\"\n if schema not in get_all_schemas(engine):\n with engine.begin() as connection:\n connection.execute(CreateSchema(schema))\n", "path": "db/schemas.py"}], "after_files": [{"content": "import logging\nimport warnings\nfrom sqlalchemy.schema import CreateSchema, DropSchema\nfrom sqlalchemy import inspect, MetaData, select, and_, not_, or_, Table\nfrom sqlalchemy.exc import InternalError\nfrom sqlalchemy.schema import DDLElement\nfrom sqlalchemy.ext import compiler\nfrom psycopg2.errors import DependentObjectsStillExist\n\nfrom db import types\n\nlogger = logging.getLogger(__name__)\n\nTYPES_SCHEMA = types.base.SCHEMA\n\nEXCLUDED_SCHEMATA = [TYPES_SCHEMA, \"information_schema\"]\n\n\ndef get_schema_name_from_oid(oid, engine):\n return reflect_schema(engine, oid=oid)[\"name\"]\n\n\ndef get_schema_oid_from_name(name, engine):\n return reflect_schema(engine, name=name)[\"oid\"]\n\n\ndef reflect_schema(engine, name=None, oid=None):\n # If we have both arguments, the behavior is undefined.\n try:\n assert name is None or oid is None\n except AssertionError as e:\n logger.error(\"ERROR: Only one of 'name' or 'oid' can be given!\")\n raise e\n metadata = MetaData()\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", message=\"Did not recognize type\")\n pg_namespace = Table(\"pg_namespace\", metadata, autoload_with=engine)\n sel = (\n select(pg_namespace.c.oid, pg_namespace.c.nspname.label(\"name\"))\n .where(or_(pg_namespace.c.nspname == name, pg_namespace.c.oid == oid))\n )\n with engine.begin() as conn:\n schema_info = conn.execute(sel).fetchone()\n return schema_info\n\n\ndef get_mathesar_schemas(engine):\n return [schema for schema, _ in get_mathesar_schemas_with_oids(engine)]\n\n\ndef get_mathesar_schemas_with_oids(engine):\n metadata = MetaData()\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", message=\"Did not recognize type\")\n pg_namespace = Table(\"pg_namespace\", metadata, autoload_with=engine)\n sel = (\n select(pg_namespace.c.nspname.label('schema'), pg_namespace.c.oid)\n .where(\n and_(\n *[pg_namespace.c.nspname != schema for schema in EXCLUDED_SCHEMATA],\n not_(pg_namespace.c.nspname.like(\"pg_%\"))\n )\n )\n )\n with engine.begin() as conn:\n result = conn.execute(sel).fetchall()\n return result\n\n\ndef get_all_schemas(engine):\n inspector = inspect(engine)\n # We don't need to exclude system schemas (i.e., starting with \"pg_\")\n # since Inspector.get_schema_names already excludes them. Thus, this\n # function actually gets all non-pg-reserved schemas.\n return inspector.get_schema_names()\n\n\ndef create_schema(schema, engine):\n \"\"\"\n This method creates a Postgres schema.\n \"\"\"\n if schema not in get_all_schemas(engine):\n with engine.begin() as connection:\n connection.execute(CreateSchema(schema))\n\n\ndef delete_schema(schema, engine, cascade=False, if_exists=False):\n \"\"\"\n This method deletes a Postgres schema.\n \"\"\"\n if if_exists and schema not in get_all_schemas(engine):\n return\n\n with engine.begin() as connection:\n try:\n connection.execute(DropSchema(schema, cascade=cascade))\n except InternalError as e:\n if isinstance(e.orig, DependentObjectsStillExist):\n raise e.orig\n else:\n raise e\n\n\nclass RenameSchema(DDLElement):\n def __init__(self, schema, rename_to):\n self.schema = schema\n self.rename_to = rename_to\n\n\[email protected](RenameSchema)\ndef compile_rename_schema(element, compiler, **_):\n return \"ALTER SCHEMA %s RENAME TO %s\" % (\n element.schema,\n element.rename_to\n )\n\n\ndef rename_schema(schema, engine, rename_to):\n \"\"\"\n This method renames a Postgres schema.\n \"\"\"\n with engine.begin() as connection:\n connection.execute(RenameSchema(schema, rename_to))\n", "path": "db/schemas.py"}]} | 1,125 | 421 |
gh_patches_debug_4070 | rasdani/github-patches | git_diff | scrapy__scrapy-4033 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
may be 'accessible'?
in the function [request_fingerprint](https://github.com/scrapy/scrapy/blob/master/scrapy/utils/request.py) ,‘accesible’ may be ‘accessible’ in comments. OCD XD..
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/utils/request.py`
Content:
```
1 """
2 This module provides some useful functions for working with
3 scrapy.http.Request objects
4 """
5
6 from __future__ import print_function
7 import hashlib
8 import weakref
9 from six.moves.urllib.parse import urlunparse
10
11 from w3lib.http import basic_auth_header
12 from scrapy.utils.python import to_bytes, to_native_str
13
14 from w3lib.url import canonicalize_url
15 from scrapy.utils.httpobj import urlparse_cached
16
17
18 _fingerprint_cache = weakref.WeakKeyDictionary()
19 def request_fingerprint(request, include_headers=None):
20 """
21 Return the request fingerprint.
22
23 The request fingerprint is a hash that uniquely identifies the resource the
24 request points to. For example, take the following two urls:
25
26 http://www.example.com/query?id=111&cat=222
27 http://www.example.com/query?cat=222&id=111
28
29 Even though those are two different URLs both point to the same resource
30 and are equivalent (ie. they should return the same response).
31
32 Another example are cookies used to store session ids. Suppose the
33 following page is only accesible to authenticated users:
34
35 http://www.example.com/members/offers.html
36
37 Lot of sites use a cookie to store the session id, which adds a random
38 component to the HTTP Request and thus should be ignored when calculating
39 the fingerprint.
40
41 For this reason, request headers are ignored by default when calculating
42 the fingeprint. If you want to include specific headers use the
43 include_headers argument, which is a list of Request headers to include.
44
45 """
46 if include_headers:
47 include_headers = tuple(to_bytes(h.lower())
48 for h in sorted(include_headers))
49 cache = _fingerprint_cache.setdefault(request, {})
50 if include_headers not in cache:
51 fp = hashlib.sha1()
52 fp.update(to_bytes(request.method))
53 fp.update(to_bytes(canonicalize_url(request.url)))
54 fp.update(request.body or b'')
55 if include_headers:
56 for hdr in include_headers:
57 if hdr in request.headers:
58 fp.update(hdr)
59 for v in request.headers.getlist(hdr):
60 fp.update(v)
61 cache[include_headers] = fp.hexdigest()
62 return cache[include_headers]
63
64
65 def request_authenticate(request, username, password):
66 """Autenticate the given request (in place) using the HTTP basic access
67 authentication mechanism (RFC 2617) and the given username and password
68 """
69 request.headers['Authorization'] = basic_auth_header(username, password)
70
71
72 def request_httprepr(request):
73 """Return the raw HTTP representation (as bytes) of the given request.
74 This is provided only for reference since it's not the actual stream of
75 bytes that will be send when performing the request (that's controlled
76 by Twisted).
77 """
78 parsed = urlparse_cached(request)
79 path = urlunparse(('', '', parsed.path or '/', parsed.params, parsed.query, ''))
80 s = to_bytes(request.method) + b" " + to_bytes(path) + b" HTTP/1.1\r\n"
81 s += b"Host: " + to_bytes(parsed.hostname or b'') + b"\r\n"
82 if request.headers:
83 s += request.headers.to_string() + b"\r\n"
84 s += b"\r\n"
85 s += request.body
86 return s
87
88
89 def referer_str(request):
90 """ Return Referer HTTP header suitable for logging. """
91 referrer = request.headers.get('Referer')
92 if referrer is None:
93 return referrer
94 return to_native_str(referrer, errors='replace')
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scrapy/utils/request.py b/scrapy/utils/request.py
--- a/scrapy/utils/request.py
+++ b/scrapy/utils/request.py
@@ -30,7 +30,7 @@
and are equivalent (ie. they should return the same response).
Another example are cookies used to store session ids. Suppose the
- following page is only accesible to authenticated users:
+ following page is only accessible to authenticated users:
http://www.example.com/members/offers.html
| {"golden_diff": "diff --git a/scrapy/utils/request.py b/scrapy/utils/request.py\n--- a/scrapy/utils/request.py\n+++ b/scrapy/utils/request.py\n@@ -30,7 +30,7 @@\n and are equivalent (ie. they should return the same response).\n \n Another example are cookies used to store session ids. Suppose the\n- following page is only accesible to authenticated users:\n+ following page is only accessible to authenticated users:\n \n http://www.example.com/members/offers.html\n", "issue": "may be 'accessible'?\nin the function [request_fingerprint](https://github.com/scrapy/scrapy/blob/master/scrapy/utils/request.py) \uff0c\u2018accesible\u2019 may be \u2018accessible\u2019 in comments. OCD XD..\r\n\n", "before_files": [{"content": "\"\"\"\nThis module provides some useful functions for working with\nscrapy.http.Request objects\n\"\"\"\n\nfrom __future__ import print_function\nimport hashlib\nimport weakref\nfrom six.moves.urllib.parse import urlunparse\n\nfrom w3lib.http import basic_auth_header\nfrom scrapy.utils.python import to_bytes, to_native_str\n\nfrom w3lib.url import canonicalize_url\nfrom scrapy.utils.httpobj import urlparse_cached\n\n\n_fingerprint_cache = weakref.WeakKeyDictionary()\ndef request_fingerprint(request, include_headers=None):\n \"\"\"\n Return the request fingerprint.\n\n The request fingerprint is a hash that uniquely identifies the resource the\n request points to. For example, take the following two urls:\n\n http://www.example.com/query?id=111&cat=222\n http://www.example.com/query?cat=222&id=111\n\n Even though those are two different URLs both point to the same resource\n and are equivalent (ie. they should return the same response).\n\n Another example are cookies used to store session ids. Suppose the\n following page is only accesible to authenticated users:\n\n http://www.example.com/members/offers.html\n\n Lot of sites use a cookie to store the session id, which adds a random\n component to the HTTP Request and thus should be ignored when calculating\n the fingerprint.\n\n For this reason, request headers are ignored by default when calculating\n the fingeprint. If you want to include specific headers use the\n include_headers argument, which is a list of Request headers to include.\n\n \"\"\"\n if include_headers:\n include_headers = tuple(to_bytes(h.lower())\n for h in sorted(include_headers))\n cache = _fingerprint_cache.setdefault(request, {})\n if include_headers not in cache:\n fp = hashlib.sha1()\n fp.update(to_bytes(request.method))\n fp.update(to_bytes(canonicalize_url(request.url)))\n fp.update(request.body or b'')\n if include_headers:\n for hdr in include_headers:\n if hdr in request.headers:\n fp.update(hdr)\n for v in request.headers.getlist(hdr):\n fp.update(v)\n cache[include_headers] = fp.hexdigest()\n return cache[include_headers]\n\n\ndef request_authenticate(request, username, password):\n \"\"\"Autenticate the given request (in place) using the HTTP basic access\n authentication mechanism (RFC 2617) and the given username and password\n \"\"\"\n request.headers['Authorization'] = basic_auth_header(username, password)\n\n\ndef request_httprepr(request):\n \"\"\"Return the raw HTTP representation (as bytes) of the given request.\n This is provided only for reference since it's not the actual stream of\n bytes that will be send when performing the request (that's controlled\n by Twisted).\n \"\"\"\n parsed = urlparse_cached(request)\n path = urlunparse(('', '', parsed.path or '/', parsed.params, parsed.query, ''))\n s = to_bytes(request.method) + b\" \" + to_bytes(path) + b\" HTTP/1.1\\r\\n\"\n s += b\"Host: \" + to_bytes(parsed.hostname or b'') + b\"\\r\\n\"\n if request.headers:\n s += request.headers.to_string() + b\"\\r\\n\"\n s += b\"\\r\\n\"\n s += request.body\n return s\n\n\ndef referer_str(request):\n \"\"\" Return Referer HTTP header suitable for logging. \"\"\"\n referrer = request.headers.get('Referer')\n if referrer is None:\n return referrer\n return to_native_str(referrer, errors='replace')\n", "path": "scrapy/utils/request.py"}], "after_files": [{"content": "\"\"\"\nThis module provides some useful functions for working with\nscrapy.http.Request objects\n\"\"\"\n\nfrom __future__ import print_function\nimport hashlib\nimport weakref\nfrom six.moves.urllib.parse import urlunparse\n\nfrom w3lib.http import basic_auth_header\nfrom scrapy.utils.python import to_bytes, to_native_str\n\nfrom w3lib.url import canonicalize_url\nfrom scrapy.utils.httpobj import urlparse_cached\n\n\n_fingerprint_cache = weakref.WeakKeyDictionary()\ndef request_fingerprint(request, include_headers=None):\n \"\"\"\n Return the request fingerprint.\n\n The request fingerprint is a hash that uniquely identifies the resource the\n request points to. For example, take the following two urls:\n\n http://www.example.com/query?id=111&cat=222\n http://www.example.com/query?cat=222&id=111\n\n Even though those are two different URLs both point to the same resource\n and are equivalent (ie. they should return the same response).\n\n Another example are cookies used to store session ids. Suppose the\n following page is only accessible to authenticated users:\n\n http://www.example.com/members/offers.html\n\n Lot of sites use a cookie to store the session id, which adds a random\n component to the HTTP Request and thus should be ignored when calculating\n the fingerprint.\n\n For this reason, request headers are ignored by default when calculating\n the fingeprint. If you want to include specific headers use the\n include_headers argument, which is a list of Request headers to include.\n\n \"\"\"\n if include_headers:\n include_headers = tuple(to_bytes(h.lower())\n for h in sorted(include_headers))\n cache = _fingerprint_cache.setdefault(request, {})\n if include_headers not in cache:\n fp = hashlib.sha1()\n fp.update(to_bytes(request.method))\n fp.update(to_bytes(canonicalize_url(request.url)))\n fp.update(request.body or b'')\n if include_headers:\n for hdr in include_headers:\n if hdr in request.headers:\n fp.update(hdr)\n for v in request.headers.getlist(hdr):\n fp.update(v)\n cache[include_headers] = fp.hexdigest()\n return cache[include_headers]\n\n\ndef request_authenticate(request, username, password):\n \"\"\"Autenticate the given request (in place) using the HTTP basic access\n authentication mechanism (RFC 2617) and the given username and password\n \"\"\"\n request.headers['Authorization'] = basic_auth_header(username, password)\n\n\ndef request_httprepr(request):\n \"\"\"Return the raw HTTP representation (as bytes) of the given request.\n This is provided only for reference since it's not the actual stream of\n bytes that will be send when performing the request (that's controlled\n by Twisted).\n \"\"\"\n parsed = urlparse_cached(request)\n path = urlunparse(('', '', parsed.path or '/', parsed.params, parsed.query, ''))\n s = to_bytes(request.method) + b\" \" + to_bytes(path) + b\" HTTP/1.1\\r\\n\"\n s += b\"Host: \" + to_bytes(parsed.hostname or b'') + b\"\\r\\n\"\n if request.headers:\n s += request.headers.to_string() + b\"\\r\\n\"\n s += b\"\\r\\n\"\n s += request.body\n return s\n\n\ndef referer_str(request):\n \"\"\" Return Referer HTTP header suitable for logging. \"\"\"\n referrer = request.headers.get('Referer')\n if referrer is None:\n return referrer\n return to_native_str(referrer, errors='replace')\n", "path": "scrapy/utils/request.py"}]} | 1,263 | 109 |
gh_patches_debug_13920 | rasdani/github-patches | git_diff | searx__searx-1135 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Yacy results crash
Getting:
Engines cannot retrieve results:
yacy (unexpected crash)
> ERROR:searx.search:engine yacy : exception : 'url'
> Traceback (most recent call last):
> File "/home/leo/searx/searx/search.py", line 118, in search_one_request_safe
> search_results = search_one_request(engine, query, request_params, start_time, timeout_limit)
> File "/home/leo/searx/searx/search.py", line 110, in search_one_request
> return engine.response(response)
> File "/home/leo/searx/searx/engines/yacy.py", line 80, in response
> results.append({'url': result['url'],
> KeyError: 'url'
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/engines/yacy.py`
Content:
```
1 # Yacy (Web, Images, Videos, Music, Files)
2 #
3 # @website http://yacy.net
4 # @provide-api yes
5 # (http://www.yacy-websuche.de/wiki/index.php/Dev:APIyacysearch)
6 #
7 # @using-api yes
8 # @results JSON
9 # @stable yes
10 # @parse (general) url, title, content, publishedDate
11 # @parse (images) url, title, img_src
12 #
13 # @todo parse video, audio and file results
14
15 from json import loads
16 from dateutil import parser
17 from searx.url_utils import urlencode
18
19 from searx.utils import html_to_text
20
21 # engine dependent config
22 categories = ['general', 'images'] # TODO , 'music', 'videos', 'files'
23 paging = True
24 language_support = True
25 number_of_results = 5
26
27 # search-url
28 base_url = 'http://localhost:8090'
29 search_url = '/yacysearch.json?{query}'\
30 '&startRecord={offset}'\
31 '&maximumRecords={limit}'\
32 '&contentdom={search_type}'\
33 '&resource=global'
34
35 # yacy specific type-definitions
36 search_types = {'general': 'text',
37 'images': 'image',
38 'files': 'app',
39 'music': 'audio',
40 'videos': 'video'}
41
42
43 # do search-request
44 def request(query, params):
45 offset = (params['pageno'] - 1) * number_of_results
46 search_type = search_types.get(params.get('category'), '0')
47
48 params['url'] = base_url +\
49 search_url.format(query=urlencode({'query': query}),
50 offset=offset,
51 limit=number_of_results,
52 search_type=search_type)
53
54 params['url'] += '&lr=lang_' + params['language'].split('-')[0]
55
56 return params
57
58
59 # get response from search-request
60 def response(resp):
61 results = []
62
63 raw_search_results = loads(resp.text)
64
65 # return empty array if there are no results
66 if not raw_search_results:
67 return []
68
69 search_results = raw_search_results.get('channels', [])
70
71 if len(search_results) == 0:
72 return []
73
74 for result in search_results[0].get('items', []):
75 # parse image results
76 if result.get('image'):
77 # append result
78 results.append({'url': result['url'],
79 'title': result['title'],
80 'content': '',
81 'img_src': result['image'],
82 'template': 'images.html'})
83
84 # parse general results
85 else:
86 publishedDate = parser.parse(result['pubDate'])
87
88 # append result
89 results.append({'url': result['link'],
90 'title': result['title'],
91 'content': html_to_text(result['description']),
92 'publishedDate': publishedDate})
93
94 # TODO parse video, audio and file results
95
96 # return results
97 return results
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/searx/engines/yacy.py b/searx/engines/yacy.py
--- a/searx/engines/yacy.py
+++ b/searx/engines/yacy.py
@@ -74,8 +74,17 @@
for result in search_results[0].get('items', []):
# parse image results
if result.get('image'):
+
+ result_url = ''
+ if 'url' in result:
+ result_url = result['url']
+ elif 'link' in result:
+ result_url = result['link']
+ else:
+ continue
+
# append result
- results.append({'url': result['url'],
+ results.append({'url': result_url,
'title': result['title'],
'content': '',
'img_src': result['image'],
| {"golden_diff": "diff --git a/searx/engines/yacy.py b/searx/engines/yacy.py\n--- a/searx/engines/yacy.py\n+++ b/searx/engines/yacy.py\n@@ -74,8 +74,17 @@\n for result in search_results[0].get('items', []):\n # parse image results\n if result.get('image'):\n+\n+ result_url = ''\n+ if 'url' in result:\n+ result_url = result['url']\n+ elif 'link' in result:\n+ result_url = result['link']\n+ else:\n+ continue\n+\n # append result\n- results.append({'url': result['url'],\n+ results.append({'url': result_url,\n 'title': result['title'],\n 'content': '',\n 'img_src': result['image'],\n", "issue": "Yacy results crash\nGetting:\r\nEngines cannot retrieve results:\r\nyacy (unexpected crash)\r\n\r\n> ERROR:searx.search:engine yacy : exception : 'url'\r\n> Traceback (most recent call last):\r\n> File \"/home/leo/searx/searx/search.py\", line 118, in search_one_request_safe\r\n> search_results = search_one_request(engine, query, request_params, start_time, timeout_limit)\r\n> File \"/home/leo/searx/searx/search.py\", line 110, in search_one_request\r\n> return engine.response(response)\r\n> File \"/home/leo/searx/searx/engines/yacy.py\", line 80, in response\r\n> results.append({'url': result['url'],\r\n> KeyError: 'url'\n", "before_files": [{"content": "# Yacy (Web, Images, Videos, Music, Files)\n#\n# @website http://yacy.net\n# @provide-api yes\n# (http://www.yacy-websuche.de/wiki/index.php/Dev:APIyacysearch)\n#\n# @using-api yes\n# @results JSON\n# @stable yes\n# @parse (general) url, title, content, publishedDate\n# @parse (images) url, title, img_src\n#\n# @todo parse video, audio and file results\n\nfrom json import loads\nfrom dateutil import parser\nfrom searx.url_utils import urlencode\n\nfrom searx.utils import html_to_text\n\n# engine dependent config\ncategories = ['general', 'images'] # TODO , 'music', 'videos', 'files'\npaging = True\nlanguage_support = True\nnumber_of_results = 5\n\n# search-url\nbase_url = 'http://localhost:8090'\nsearch_url = '/yacysearch.json?{query}'\\\n '&startRecord={offset}'\\\n '&maximumRecords={limit}'\\\n '&contentdom={search_type}'\\\n '&resource=global'\n\n# yacy specific type-definitions\nsearch_types = {'general': 'text',\n 'images': 'image',\n 'files': 'app',\n 'music': 'audio',\n 'videos': 'video'}\n\n\n# do search-request\ndef request(query, params):\n offset = (params['pageno'] - 1) * number_of_results\n search_type = search_types.get(params.get('category'), '0')\n\n params['url'] = base_url +\\\n search_url.format(query=urlencode({'query': query}),\n offset=offset,\n limit=number_of_results,\n search_type=search_type)\n\n params['url'] += '&lr=lang_' + params['language'].split('-')[0]\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n raw_search_results = loads(resp.text)\n\n # return empty array if there are no results\n if not raw_search_results:\n return []\n\n search_results = raw_search_results.get('channels', [])\n\n if len(search_results) == 0:\n return []\n\n for result in search_results[0].get('items', []):\n # parse image results\n if result.get('image'):\n # append result\n results.append({'url': result['url'],\n 'title': result['title'],\n 'content': '',\n 'img_src': result['image'],\n 'template': 'images.html'})\n\n # parse general results\n else:\n publishedDate = parser.parse(result['pubDate'])\n\n # append result\n results.append({'url': result['link'],\n 'title': result['title'],\n 'content': html_to_text(result['description']),\n 'publishedDate': publishedDate})\n\n # TODO parse video, audio and file results\n\n # return results\n return results\n", "path": "searx/engines/yacy.py"}], "after_files": [{"content": "# Yacy (Web, Images, Videos, Music, Files)\n#\n# @website http://yacy.net\n# @provide-api yes\n# (http://www.yacy-websuche.de/wiki/index.php/Dev:APIyacysearch)\n#\n# @using-api yes\n# @results JSON\n# @stable yes\n# @parse (general) url, title, content, publishedDate\n# @parse (images) url, title, img_src\n#\n# @todo parse video, audio and file results\n\nfrom json import loads\nfrom dateutil import parser\nfrom searx.url_utils import urlencode\n\nfrom searx.utils import html_to_text\n\n# engine dependent config\ncategories = ['general', 'images'] # TODO , 'music', 'videos', 'files'\npaging = True\nlanguage_support = True\nnumber_of_results = 5\n\n# search-url\nbase_url = 'http://localhost:8090'\nsearch_url = '/yacysearch.json?{query}'\\\n '&startRecord={offset}'\\\n '&maximumRecords={limit}'\\\n '&contentdom={search_type}'\\\n '&resource=global'\n\n# yacy specific type-definitions\nsearch_types = {'general': 'text',\n 'images': 'image',\n 'files': 'app',\n 'music': 'audio',\n 'videos': 'video'}\n\n\n# do search-request\ndef request(query, params):\n offset = (params['pageno'] - 1) * number_of_results\n search_type = search_types.get(params.get('category'), '0')\n\n params['url'] = base_url +\\\n search_url.format(query=urlencode({'query': query}),\n offset=offset,\n limit=number_of_results,\n search_type=search_type)\n\n params['url'] += '&lr=lang_' + params['language'].split('-')[0]\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n raw_search_results = loads(resp.text)\n\n # return empty array if there are no results\n if not raw_search_results:\n return []\n\n search_results = raw_search_results.get('channels', [])\n\n if len(search_results) == 0:\n return []\n\n for result in search_results[0].get('items', []):\n # parse image results\n if result.get('image'):\n\n result_url = ''\n if 'url' in result:\n result_url = result['url']\n elif 'link' in result:\n result_url = result['link']\n else:\n continue\n\n # append result\n results.append({'url': result_url,\n 'title': result['title'],\n 'content': '',\n 'img_src': result['image'],\n 'template': 'images.html'})\n\n # parse general results\n else:\n publishedDate = parser.parse(result['pubDate'])\n\n # append result\n results.append({'url': result['link'],\n 'title': result['title'],\n 'content': html_to_text(result['description']),\n 'publishedDate': publishedDate})\n\n # TODO parse video, audio and file results\n\n # return results\n return results\n", "path": "searx/engines/yacy.py"}]} | 1,275 | 189 |
gh_patches_debug_12988 | rasdani/github-patches | git_diff | elastic__ecs-1488 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`doc_values` parameter not set in Beats artifact
Certain fields have `index: false` and `doc_values: false` in their ECS definition, like `event.original`:
https://github.com/elastic/ecs/blob/master/schemas/event.yml#L577-L599
When `doc_values: false` is defined in the field definition, it's not being added to the maintained Beats fields YAML artifact:
https://github.com/elastic/ecs/blob/master/generated/beats/fields.ecs.yml#L1737-L1750
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/generators/beats.py`
Content:
```
1 from os.path import join
2 from collections import OrderedDict
3 from generators import ecs_helpers
4
5
6 def generate(ecs_nested, ecs_version, out_dir):
7 # Load temporary allowlist for default_fields workaround.
8 df_allowlist = ecs_helpers.yaml_load('scripts/generators/beats_default_fields_allowlist.yml')
9
10 # base first
11 beats_fields = fieldset_field_array(ecs_nested['base']['fields'], df_allowlist, ecs_nested['base']['prefix'])
12
13 allowed_fieldset_keys = ['name', 'title', 'group', 'description', 'footnote', 'type']
14 # other fieldsets
15 for fieldset_name in sorted(ecs_nested):
16 if 'base' == fieldset_name:
17 continue
18 fieldset = ecs_nested[fieldset_name]
19
20 # Handle when `root:true`
21 if fieldset.get('root', False):
22 beats_fields.extend(fieldset_field_array(fieldset['fields'], df_allowlist, fieldset['prefix']))
23 continue
24
25 beats_field = ecs_helpers.dict_copy_keys_ordered(fieldset, allowed_fieldset_keys)
26 beats_field['fields'] = fieldset_field_array(fieldset['fields'], df_allowlist, fieldset['prefix'])
27 beats_fields.append(beats_field)
28
29 beats_file = OrderedDict()
30 beats_file['key'] = 'ecs'
31 beats_file['title'] = 'ECS'
32 beats_file['description'] = 'ECS Fields.'
33 beats_file['fields'] = beats_fields
34
35 write_beats_yaml(beats_file, ecs_version, out_dir)
36
37
38 def fieldset_field_array(source_fields, df_allowlist, fieldset_prefix):
39 allowed_keys = ['name', 'level', 'required', 'type', 'object_type',
40 'ignore_above', 'multi_fields', 'format', 'input_format',
41 'output_format', 'output_precision', 'description',
42 'example', 'enabled', 'index', 'path', 'scaling_factor']
43 multi_fields_allowed_keys = ['name', 'type', 'norms', 'default_field', 'normalizer', 'ignore_above']
44
45 fields = []
46 for nested_field_name in source_fields:
47 ecs_field = source_fields[nested_field_name]
48 beats_field = ecs_helpers.dict_copy_keys_ordered(ecs_field, allowed_keys)
49 if '' == fieldset_prefix:
50 contextual_name = nested_field_name
51 else:
52 contextual_name = '.'.join(nested_field_name.split('.')[1:])
53
54 cleaned_multi_fields = []
55 if 'multi_fields' in ecs_field:
56 for mf in ecs_field['multi_fields']:
57 # Set default_field if necessary. Avoid adding the key if the parent
58 # field already is marked with default_field: false.
59 if not mf['flat_name'] in df_allowlist and ecs_field['flat_name'] in df_allowlist:
60 mf['default_field'] = False
61 cleaned_multi_fields.append(
62 ecs_helpers.dict_copy_keys_ordered(mf, multi_fields_allowed_keys))
63 beats_field['multi_fields'] = cleaned_multi_fields
64
65 beats_field['name'] = contextual_name
66
67 if not ecs_field['flat_name'] in df_allowlist:
68 beats_field['default_field'] = False
69
70 fields.append(beats_field)
71 return sorted(fields, key=lambda x: x['name'])
72
73 # Helpers
74
75
76 def write_beats_yaml(beats_file, ecs_version, out_dir):
77 ecs_helpers.make_dirs(join(out_dir, 'beats'))
78 warning = file_header().format(version=ecs_version)
79 ecs_helpers.yaml_dump(join(out_dir, 'beats/fields.ecs.yml'), [beats_file], preamble=warning)
80
81
82 # Templates
83
84
85 def file_header():
86 return """
87 # WARNING! Do not edit this file directly, it was generated by the ECS project,
88 # based on ECS version {version}.
89 # Please visit https://github.com/elastic/ecs to suggest changes to ECS fields.
90
91 """.lstrip()
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scripts/generators/beats.py b/scripts/generators/beats.py
--- a/scripts/generators/beats.py
+++ b/scripts/generators/beats.py
@@ -39,7 +39,8 @@
allowed_keys = ['name', 'level', 'required', 'type', 'object_type',
'ignore_above', 'multi_fields', 'format', 'input_format',
'output_format', 'output_precision', 'description',
- 'example', 'enabled', 'index', 'path', 'scaling_factor']
+ 'example', 'enabled', 'index', 'doc_values', 'path',
+ 'scaling_factor']
multi_fields_allowed_keys = ['name', 'type', 'norms', 'default_field', 'normalizer', 'ignore_above']
fields = []
| {"golden_diff": "diff --git a/scripts/generators/beats.py b/scripts/generators/beats.py\n--- a/scripts/generators/beats.py\n+++ b/scripts/generators/beats.py\n@@ -39,7 +39,8 @@\n allowed_keys = ['name', 'level', 'required', 'type', 'object_type',\n 'ignore_above', 'multi_fields', 'format', 'input_format',\n 'output_format', 'output_precision', 'description',\n- 'example', 'enabled', 'index', 'path', 'scaling_factor']\n+ 'example', 'enabled', 'index', 'doc_values', 'path',\n+ 'scaling_factor']\n multi_fields_allowed_keys = ['name', 'type', 'norms', 'default_field', 'normalizer', 'ignore_above']\n \n fields = []\n", "issue": "`doc_values` parameter not set in Beats artifact \nCertain fields have `index: false` and `doc_values: false` in their ECS definition, like `event.original`:\r\n\r\nhttps://github.com/elastic/ecs/blob/master/schemas/event.yml#L577-L599\r\n\r\nWhen `doc_values: false` is defined in the field definition, it's not being added to the maintained Beats fields YAML artifact:\r\n\r\nhttps://github.com/elastic/ecs/blob/master/generated/beats/fields.ecs.yml#L1737-L1750\n", "before_files": [{"content": "from os.path import join\nfrom collections import OrderedDict\nfrom generators import ecs_helpers\n\n\ndef generate(ecs_nested, ecs_version, out_dir):\n # Load temporary allowlist for default_fields workaround.\n df_allowlist = ecs_helpers.yaml_load('scripts/generators/beats_default_fields_allowlist.yml')\n\n # base first\n beats_fields = fieldset_field_array(ecs_nested['base']['fields'], df_allowlist, ecs_nested['base']['prefix'])\n\n allowed_fieldset_keys = ['name', 'title', 'group', 'description', 'footnote', 'type']\n # other fieldsets\n for fieldset_name in sorted(ecs_nested):\n if 'base' == fieldset_name:\n continue\n fieldset = ecs_nested[fieldset_name]\n\n # Handle when `root:true`\n if fieldset.get('root', False):\n beats_fields.extend(fieldset_field_array(fieldset['fields'], df_allowlist, fieldset['prefix']))\n continue\n\n beats_field = ecs_helpers.dict_copy_keys_ordered(fieldset, allowed_fieldset_keys)\n beats_field['fields'] = fieldset_field_array(fieldset['fields'], df_allowlist, fieldset['prefix'])\n beats_fields.append(beats_field)\n\n beats_file = OrderedDict()\n beats_file['key'] = 'ecs'\n beats_file['title'] = 'ECS'\n beats_file['description'] = 'ECS Fields.'\n beats_file['fields'] = beats_fields\n\n write_beats_yaml(beats_file, ecs_version, out_dir)\n\n\ndef fieldset_field_array(source_fields, df_allowlist, fieldset_prefix):\n allowed_keys = ['name', 'level', 'required', 'type', 'object_type',\n 'ignore_above', 'multi_fields', 'format', 'input_format',\n 'output_format', 'output_precision', 'description',\n 'example', 'enabled', 'index', 'path', 'scaling_factor']\n multi_fields_allowed_keys = ['name', 'type', 'norms', 'default_field', 'normalizer', 'ignore_above']\n\n fields = []\n for nested_field_name in source_fields:\n ecs_field = source_fields[nested_field_name]\n beats_field = ecs_helpers.dict_copy_keys_ordered(ecs_field, allowed_keys)\n if '' == fieldset_prefix:\n contextual_name = nested_field_name\n else:\n contextual_name = '.'.join(nested_field_name.split('.')[1:])\n\n cleaned_multi_fields = []\n if 'multi_fields' in ecs_field:\n for mf in ecs_field['multi_fields']:\n # Set default_field if necessary. Avoid adding the key if the parent\n # field already is marked with default_field: false.\n if not mf['flat_name'] in df_allowlist and ecs_field['flat_name'] in df_allowlist:\n mf['default_field'] = False\n cleaned_multi_fields.append(\n ecs_helpers.dict_copy_keys_ordered(mf, multi_fields_allowed_keys))\n beats_field['multi_fields'] = cleaned_multi_fields\n\n beats_field['name'] = contextual_name\n\n if not ecs_field['flat_name'] in df_allowlist:\n beats_field['default_field'] = False\n\n fields.append(beats_field)\n return sorted(fields, key=lambda x: x['name'])\n\n# Helpers\n\n\ndef write_beats_yaml(beats_file, ecs_version, out_dir):\n ecs_helpers.make_dirs(join(out_dir, 'beats'))\n warning = file_header().format(version=ecs_version)\n ecs_helpers.yaml_dump(join(out_dir, 'beats/fields.ecs.yml'), [beats_file], preamble=warning)\n\n\n# Templates\n\n\ndef file_header():\n return \"\"\"\n# WARNING! Do not edit this file directly, it was generated by the ECS project,\n# based on ECS version {version}.\n# Please visit https://github.com/elastic/ecs to suggest changes to ECS fields.\n\n\"\"\".lstrip()\n", "path": "scripts/generators/beats.py"}], "after_files": [{"content": "from os.path import join\nfrom collections import OrderedDict\nfrom generators import ecs_helpers\n\n\ndef generate(ecs_nested, ecs_version, out_dir):\n # Load temporary allowlist for default_fields workaround.\n df_allowlist = ecs_helpers.yaml_load('scripts/generators/beats_default_fields_allowlist.yml')\n\n # base first\n beats_fields = fieldset_field_array(ecs_nested['base']['fields'], df_allowlist, ecs_nested['base']['prefix'])\n\n allowed_fieldset_keys = ['name', 'title', 'group', 'description', 'footnote', 'type']\n # other fieldsets\n for fieldset_name in sorted(ecs_nested):\n if 'base' == fieldset_name:\n continue\n fieldset = ecs_nested[fieldset_name]\n\n # Handle when `root:true`\n if fieldset.get('root', False):\n beats_fields.extend(fieldset_field_array(fieldset['fields'], df_allowlist, fieldset['prefix']))\n continue\n\n beats_field = ecs_helpers.dict_copy_keys_ordered(fieldset, allowed_fieldset_keys)\n beats_field['fields'] = fieldset_field_array(fieldset['fields'], df_allowlist, fieldset['prefix'])\n beats_fields.append(beats_field)\n\n beats_file = OrderedDict()\n beats_file['key'] = 'ecs'\n beats_file['title'] = 'ECS'\n beats_file['description'] = 'ECS Fields.'\n beats_file['fields'] = beats_fields\n\n write_beats_yaml(beats_file, ecs_version, out_dir)\n\n\ndef fieldset_field_array(source_fields, df_allowlist, fieldset_prefix):\n allowed_keys = ['name', 'level', 'required', 'type', 'object_type',\n 'ignore_above', 'multi_fields', 'format', 'input_format',\n 'output_format', 'output_precision', 'description',\n 'example', 'enabled', 'index', 'doc_values', 'path',\n 'scaling_factor']\n multi_fields_allowed_keys = ['name', 'type', 'norms', 'default_field', 'normalizer', 'ignore_above']\n\n fields = []\n for nested_field_name in source_fields:\n ecs_field = source_fields[nested_field_name]\n beats_field = ecs_helpers.dict_copy_keys_ordered(ecs_field, allowed_keys)\n if '' == fieldset_prefix:\n contextual_name = nested_field_name\n else:\n contextual_name = '.'.join(nested_field_name.split('.')[1:])\n\n cleaned_multi_fields = []\n if 'multi_fields' in ecs_field:\n for mf in ecs_field['multi_fields']:\n # Set default_field if necessary. Avoid adding the key if the parent\n # field already is marked with default_field: false.\n if not mf['flat_name'] in df_allowlist and ecs_field['flat_name'] in df_allowlist:\n mf['default_field'] = False\n cleaned_multi_fields.append(\n ecs_helpers.dict_copy_keys_ordered(mf, multi_fields_allowed_keys))\n beats_field['multi_fields'] = cleaned_multi_fields\n\n beats_field['name'] = contextual_name\n\n if not ecs_field['flat_name'] in df_allowlist:\n beats_field['default_field'] = False\n\n fields.append(beats_field)\n return sorted(fields, key=lambda x: x['name'])\n\n# Helpers\n\n\ndef write_beats_yaml(beats_file, ecs_version, out_dir):\n ecs_helpers.make_dirs(join(out_dir, 'beats'))\n warning = file_header().format(version=ecs_version)\n ecs_helpers.yaml_dump(join(out_dir, 'beats/fields.ecs.yml'), [beats_file], preamble=warning)\n\n\n# Templates\n\n\ndef file_header():\n return \"\"\"\n# WARNING! Do not edit this file directly, it was generated by the ECS project,\n# based on ECS version {version}.\n# Please visit https://github.com/elastic/ecs to suggest changes to ECS fields.\n\n\"\"\".lstrip()\n", "path": "scripts/generators/beats.py"}]} | 1,380 | 175 |
gh_patches_debug_8516 | rasdani/github-patches | git_diff | iterative__dvc-10005 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
exp save: Short option for --message is -M, but for dvc exp run it is -m
It would be nice if the short options of `dvc exp run` and `dvc exp save` for specifying a commit message would be identical. Also, best to use the same options as one would use for `git commit`, i.e., `-m` instead of `-M`.
```
usage: dvc experiments save [-h] [-q | -v] [-f] [--json] [-n <name>] [-I <path>] [-M MESSAGE]
Save current workspace as an experiment.
Documentation: <https://man.dvc.org/exp/save>
options:
-h, --help show this help message and exit
-q, --quiet Be quiet.
-v, --verbose Be verbose.
-f, --force Replace experiment if it already exists.
--json Show output in JSON format.
-n <name>, --name <name>
Human-readable experiment name. If not specified, a name will be auto-generated.
-I <path>, --include-untracked <path>
List of untracked paths to include in the experiment.
-M MESSAGE, --message MESSAGE
Custom commit message to use when committing the experiment.
```
DVC CLI v3.22.1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/commands/experiments/save.py`
Content:
```
1 import argparse
2 import logging
3
4 from dvc.cli.command import CmdBase
5 from dvc.cli.utils import append_doc_link
6 from dvc.exceptions import DvcException
7 from dvc.ui import ui
8
9 logger = logging.getLogger(__name__)
10
11
12 class CmdExperimentsSave(CmdBase):
13 def run(self):
14 try:
15 ref = self.repo.experiments.save(
16 name=self.args.name,
17 force=self.args.force,
18 include_untracked=self.args.include_untracked,
19 message=self.args.message,
20 )
21 except DvcException:
22 logger.exception("failed to save experiment")
23 return 1
24
25 if self.args.json:
26 ui.write_json({"ref": ref})
27 else:
28 name = self.repo.experiments.get_exact_name([ref])[ref]
29 ui.write(f"Experiment has been saved as: {name}")
30
31 return 0
32
33
34 def add_parser(experiments_subparsers, parent_parser):
35 EXPERIMENTS_SAVE_HELP = "Save current workspace as an experiment."
36 save_parser = experiments_subparsers.add_parser(
37 "save",
38 parents=[parent_parser],
39 description=append_doc_link(EXPERIMENTS_SAVE_HELP, "exp/save"),
40 help=EXPERIMENTS_SAVE_HELP,
41 formatter_class=argparse.RawDescriptionHelpFormatter,
42 )
43 save_parser.add_argument(
44 "-f",
45 "--force",
46 action="store_true",
47 default=False,
48 help="Replace experiment if it already exists.",
49 )
50 save_parser.add_argument(
51 "--json",
52 action="store_true",
53 default=False,
54 help="Show output in JSON format.",
55 )
56 save_parser.add_argument(
57 "-n",
58 "--name",
59 default=None,
60 help=(
61 "Human-readable experiment name. If not specified, a name will "
62 "be auto-generated."
63 ),
64 metavar="<name>",
65 )
66 save_parser.add_argument(
67 "-I",
68 "--include-untracked",
69 action="append",
70 default=[],
71 help="List of untracked paths to include in the experiment.",
72 metavar="<path>",
73 )
74 save_parser.add_argument(
75 "-M",
76 "--message",
77 type=str,
78 default=None,
79 help="Custom commit message to use when committing the experiment.",
80 )
81 save_parser.set_defaults(func=CmdExperimentsSave)
82
```
Path: `dvc/commands/experiments/exec_run.py`
Content:
```
1 import logging
2
3 from dvc.cli.command import CmdBaseNoRepo
4
5 logger = logging.getLogger(__name__)
6
7
8 class CmdExecutorRun(CmdBaseNoRepo):
9 """Run an experiment executor."""
10
11 def run(self):
12 from dvc.repo.experiments.executor.base import BaseExecutor, ExecutorInfo
13 from dvc.utils.serialize import load_json
14
15 info = ExecutorInfo.from_dict(load_json(self.args.infofile))
16 BaseExecutor.reproduce(
17 info=info,
18 rev="",
19 queue=None,
20 log_level=logger.getEffectiveLevel(),
21 infofile=self.args.infofile,
22 copy_paths=self.args.copy_paths,
23 message=self.args.message,
24 )
25 return 0
26
27
28 def add_parser(experiments_subparsers, parent_parser):
29 EXEC_RUN_HELP = "Run an experiment executor."
30 exec_run_parser = experiments_subparsers.add_parser(
31 "exec-run",
32 parents=[parent_parser],
33 description=EXEC_RUN_HELP,
34 add_help=False,
35 )
36 exec_run_parser.add_argument(
37 "--infofile",
38 help="Path to executor info file",
39 default=None,
40 )
41 exec_run_parser.add_argument(
42 "-C",
43 "--copy-paths",
44 action="append",
45 default=[],
46 help=(
47 "List of ignored or untracked paths to copy into the temp directory."
48 " Only used if `--temp` or `--queue` is specified."
49 ),
50 )
51 exec_run_parser.add_argument(
52 "-M",
53 "--message",
54 type=str,
55 default=None,
56 help="Custom commit message to use when committing the experiment.",
57 )
58 exec_run_parser.set_defaults(func=CmdExecutorRun)
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dvc/commands/experiments/exec_run.py b/dvc/commands/experiments/exec_run.py
--- a/dvc/commands/experiments/exec_run.py
+++ b/dvc/commands/experiments/exec_run.py
@@ -49,7 +49,7 @@
),
)
exec_run_parser.add_argument(
- "-M",
+ "-m",
"--message",
type=str,
default=None,
diff --git a/dvc/commands/experiments/save.py b/dvc/commands/experiments/save.py
--- a/dvc/commands/experiments/save.py
+++ b/dvc/commands/experiments/save.py
@@ -72,7 +72,7 @@
metavar="<path>",
)
save_parser.add_argument(
- "-M",
+ "-m",
"--message",
type=str,
default=None,
| {"golden_diff": "diff --git a/dvc/commands/experiments/exec_run.py b/dvc/commands/experiments/exec_run.py\n--- a/dvc/commands/experiments/exec_run.py\n+++ b/dvc/commands/experiments/exec_run.py\n@@ -49,7 +49,7 @@\n ),\n )\n exec_run_parser.add_argument(\n- \"-M\",\n+ \"-m\",\n \"--message\",\n type=str,\n default=None,\ndiff --git a/dvc/commands/experiments/save.py b/dvc/commands/experiments/save.py\n--- a/dvc/commands/experiments/save.py\n+++ b/dvc/commands/experiments/save.py\n@@ -72,7 +72,7 @@\n metavar=\"<path>\",\n )\n save_parser.add_argument(\n- \"-M\",\n+ \"-m\",\n \"--message\",\n type=str,\n default=None,\n", "issue": "exp save: Short option for --message is -M, but for dvc exp run it is -m\nIt would be nice if the short options of `dvc exp run` and `dvc exp save` for specifying a commit message would be identical. Also, best to use the same options as one would use for `git commit`, i.e., `-m` instead of `-M`.\r\n\r\n```\r\nusage: dvc experiments save [-h] [-q | -v] [-f] [--json] [-n <name>] [-I <path>] [-M MESSAGE]\r\n\r\nSave current workspace as an experiment.\r\nDocumentation: <https://man.dvc.org/exp/save>\r\n\r\noptions:\r\n -h, --help show this help message and exit\r\n -q, --quiet Be quiet.\r\n -v, --verbose Be verbose.\r\n -f, --force Replace experiment if it already exists.\r\n --json Show output in JSON format.\r\n -n <name>, --name <name>\r\n Human-readable experiment name. If not specified, a name will be auto-generated.\r\n -I <path>, --include-untracked <path>\r\n List of untracked paths to include in the experiment.\r\n -M MESSAGE, --message MESSAGE\r\n Custom commit message to use when committing the experiment.\r\n```\r\n\r\nDVC CLI v3.22.1\n", "before_files": [{"content": "import argparse\nimport logging\n\nfrom dvc.cli.command import CmdBase\nfrom dvc.cli.utils import append_doc_link\nfrom dvc.exceptions import DvcException\nfrom dvc.ui import ui\n\nlogger = logging.getLogger(__name__)\n\n\nclass CmdExperimentsSave(CmdBase):\n def run(self):\n try:\n ref = self.repo.experiments.save(\n name=self.args.name,\n force=self.args.force,\n include_untracked=self.args.include_untracked,\n message=self.args.message,\n )\n except DvcException:\n logger.exception(\"failed to save experiment\")\n return 1\n\n if self.args.json:\n ui.write_json({\"ref\": ref})\n else:\n name = self.repo.experiments.get_exact_name([ref])[ref]\n ui.write(f\"Experiment has been saved as: {name}\")\n\n return 0\n\n\ndef add_parser(experiments_subparsers, parent_parser):\n EXPERIMENTS_SAVE_HELP = \"Save current workspace as an experiment.\"\n save_parser = experiments_subparsers.add_parser(\n \"save\",\n parents=[parent_parser],\n description=append_doc_link(EXPERIMENTS_SAVE_HELP, \"exp/save\"),\n help=EXPERIMENTS_SAVE_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n save_parser.add_argument(\n \"-f\",\n \"--force\",\n action=\"store_true\",\n default=False,\n help=\"Replace experiment if it already exists.\",\n )\n save_parser.add_argument(\n \"--json\",\n action=\"store_true\",\n default=False,\n help=\"Show output in JSON format.\",\n )\n save_parser.add_argument(\n \"-n\",\n \"--name\",\n default=None,\n help=(\n \"Human-readable experiment name. If not specified, a name will \"\n \"be auto-generated.\"\n ),\n metavar=\"<name>\",\n )\n save_parser.add_argument(\n \"-I\",\n \"--include-untracked\",\n action=\"append\",\n default=[],\n help=\"List of untracked paths to include in the experiment.\",\n metavar=\"<path>\",\n )\n save_parser.add_argument(\n \"-M\",\n \"--message\",\n type=str,\n default=None,\n help=\"Custom commit message to use when committing the experiment.\",\n )\n save_parser.set_defaults(func=CmdExperimentsSave)\n", "path": "dvc/commands/experiments/save.py"}, {"content": "import logging\n\nfrom dvc.cli.command import CmdBaseNoRepo\n\nlogger = logging.getLogger(__name__)\n\n\nclass CmdExecutorRun(CmdBaseNoRepo):\n \"\"\"Run an experiment executor.\"\"\"\n\n def run(self):\n from dvc.repo.experiments.executor.base import BaseExecutor, ExecutorInfo\n from dvc.utils.serialize import load_json\n\n info = ExecutorInfo.from_dict(load_json(self.args.infofile))\n BaseExecutor.reproduce(\n info=info,\n rev=\"\",\n queue=None,\n log_level=logger.getEffectiveLevel(),\n infofile=self.args.infofile,\n copy_paths=self.args.copy_paths,\n message=self.args.message,\n )\n return 0\n\n\ndef add_parser(experiments_subparsers, parent_parser):\n EXEC_RUN_HELP = \"Run an experiment executor.\"\n exec_run_parser = experiments_subparsers.add_parser(\n \"exec-run\",\n parents=[parent_parser],\n description=EXEC_RUN_HELP,\n add_help=False,\n )\n exec_run_parser.add_argument(\n \"--infofile\",\n help=\"Path to executor info file\",\n default=None,\n )\n exec_run_parser.add_argument(\n \"-C\",\n \"--copy-paths\",\n action=\"append\",\n default=[],\n help=(\n \"List of ignored or untracked paths to copy into the temp directory.\"\n \" Only used if `--temp` or `--queue` is specified.\"\n ),\n )\n exec_run_parser.add_argument(\n \"-M\",\n \"--message\",\n type=str,\n default=None,\n help=\"Custom commit message to use when committing the experiment.\",\n )\n exec_run_parser.set_defaults(func=CmdExecutorRun)\n", "path": "dvc/commands/experiments/exec_run.py"}], "after_files": [{"content": "import argparse\nimport logging\n\nfrom dvc.cli.command import CmdBase\nfrom dvc.cli.utils import append_doc_link\nfrom dvc.exceptions import DvcException\nfrom dvc.ui import ui\n\nlogger = logging.getLogger(__name__)\n\n\nclass CmdExperimentsSave(CmdBase):\n def run(self):\n try:\n ref = self.repo.experiments.save(\n name=self.args.name,\n force=self.args.force,\n include_untracked=self.args.include_untracked,\n message=self.args.message,\n )\n except DvcException:\n logger.exception(\"failed to save experiment\")\n return 1\n\n if self.args.json:\n ui.write_json({\"ref\": ref})\n else:\n name = self.repo.experiments.get_exact_name([ref])[ref]\n ui.write(f\"Experiment has been saved as: {name}\")\n\n return 0\n\n\ndef add_parser(experiments_subparsers, parent_parser):\n EXPERIMENTS_SAVE_HELP = \"Save current workspace as an experiment.\"\n save_parser = experiments_subparsers.add_parser(\n \"save\",\n parents=[parent_parser],\n description=append_doc_link(EXPERIMENTS_SAVE_HELP, \"exp/save\"),\n help=EXPERIMENTS_SAVE_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n save_parser.add_argument(\n \"-f\",\n \"--force\",\n action=\"store_true\",\n default=False,\n help=\"Replace experiment if it already exists.\",\n )\n save_parser.add_argument(\n \"--json\",\n action=\"store_true\",\n default=False,\n help=\"Show output in JSON format.\",\n )\n save_parser.add_argument(\n \"-n\",\n \"--name\",\n default=None,\n help=(\n \"Human-readable experiment name. If not specified, a name will \"\n \"be auto-generated.\"\n ),\n metavar=\"<name>\",\n )\n save_parser.add_argument(\n \"-I\",\n \"--include-untracked\",\n action=\"append\",\n default=[],\n help=\"List of untracked paths to include in the experiment.\",\n metavar=\"<path>\",\n )\n save_parser.add_argument(\n \"-m\",\n \"--message\",\n type=str,\n default=None,\n help=\"Custom commit message to use when committing the experiment.\",\n )\n save_parser.set_defaults(func=CmdExperimentsSave)\n", "path": "dvc/commands/experiments/save.py"}, {"content": "import logging\n\nfrom dvc.cli.command import CmdBaseNoRepo\n\nlogger = logging.getLogger(__name__)\n\n\nclass CmdExecutorRun(CmdBaseNoRepo):\n \"\"\"Run an experiment executor.\"\"\"\n\n def run(self):\n from dvc.repo.experiments.executor.base import BaseExecutor, ExecutorInfo\n from dvc.utils.serialize import load_json\n\n info = ExecutorInfo.from_dict(load_json(self.args.infofile))\n BaseExecutor.reproduce(\n info=info,\n rev=\"\",\n queue=None,\n log_level=logger.getEffectiveLevel(),\n infofile=self.args.infofile,\n copy_paths=self.args.copy_paths,\n message=self.args.message,\n )\n return 0\n\n\ndef add_parser(experiments_subparsers, parent_parser):\n EXEC_RUN_HELP = \"Run an experiment executor.\"\n exec_run_parser = experiments_subparsers.add_parser(\n \"exec-run\",\n parents=[parent_parser],\n description=EXEC_RUN_HELP,\n add_help=False,\n )\n exec_run_parser.add_argument(\n \"--infofile\",\n help=\"Path to executor info file\",\n default=None,\n )\n exec_run_parser.add_argument(\n \"-C\",\n \"--copy-paths\",\n action=\"append\",\n default=[],\n help=(\n \"List of ignored or untracked paths to copy into the temp directory.\"\n \" Only used if `--temp` or `--queue` is specified.\"\n ),\n )\n exec_run_parser.add_argument(\n \"-m\",\n \"--message\",\n type=str,\n default=None,\n help=\"Custom commit message to use when committing the experiment.\",\n )\n exec_run_parser.set_defaults(func=CmdExecutorRun)\n", "path": "dvc/commands/experiments/exec_run.py"}]} | 1,663 | 184 |
gh_patches_debug_22467 | rasdani/github-patches | git_diff | pre-commit__pre-commit-400 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Stashed changes lost if hook fails with non-UTF-8 diff containing trailing whitespace
Hi,
A colleague almost lost all the changes she was working on after launching a `git commit` (with zero file added) and `pre-commit` crashing without restoring its [patch](https://github.com/pre-commit/pre-commit/blob/master/pre_commit/staged_files_only.py#L15).
Here is the terminal message she got:
```
[WARNING] Stashed changes conflicted with hook auto-fixes... Rolling back fixes...
An unexpected error has occurred: CalledProcessError: Command: ['git', 'apply', 'C:\\Users\\toto\\.pre-commit\\patch1471341002']
```
This seems very similar to a past solved issue:
https://github.com/pre-commit/pre-commit/issues/176
I think it had to do with CRLF conversion.
I'm going to try to reproduce this.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/staged_files_only.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import contextlib
4 import io
5 import logging
6 import time
7
8 from pre_commit.util import CalledProcessError
9
10
11 logger = logging.getLogger('pre_commit')
12
13
14 @contextlib.contextmanager
15 def staged_files_only(cmd_runner):
16 """Clear any unstaged changes from the git working directory inside this
17 context.
18
19 Args:
20 cmd_runner - PrefixedCommandRunner
21 """
22 # Determine if there are unstaged files
23 retcode, diff_stdout_binary, _ = cmd_runner.run(
24 [
25 'git', 'diff', '--ignore-submodules', '--binary', '--exit-code',
26 '--no-color',
27 ],
28 retcode=None,
29 encoding=None,
30 )
31 if retcode and diff_stdout_binary.strip():
32 patch_filename = cmd_runner.path('patch{0}'.format(int(time.time())))
33 logger.warning('Unstaged files detected.')
34 logger.info(
35 'Stashing unstaged files to {0}.'.format(patch_filename),
36 )
37 # Save the current unstaged changes as a patch
38 with io.open(patch_filename, 'wb') as patch_file:
39 patch_file.write(diff_stdout_binary)
40
41 # Clear the working directory of unstaged changes
42 cmd_runner.run(['git', 'checkout', '--', '.'])
43 try:
44 yield
45 finally:
46 # Try to apply the patch we saved
47 try:
48 cmd_runner.run(['git', 'apply', patch_filename])
49 except CalledProcessError:
50 logger.warning(
51 'Stashed changes conflicted with hook auto-fixes... '
52 'Rolling back fixes...'
53 )
54 # We failed to apply the patch, presumably due to fixes made
55 # by hooks.
56 # Roll back the changes made by hooks.
57 cmd_runner.run(['git', 'checkout', '--', '.'])
58 cmd_runner.run(['git', 'apply', patch_filename])
59 logger.info('Restored changes from {0}.'.format(patch_filename))
60 else:
61 # There weren't any staged files so we don't need to do anything
62 # special
63 yield
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pre_commit/staged_files_only.py b/pre_commit/staged_files_only.py
--- a/pre_commit/staged_files_only.py
+++ b/pre_commit/staged_files_only.py
@@ -45,7 +45,7 @@
finally:
# Try to apply the patch we saved
try:
- cmd_runner.run(['git', 'apply', patch_filename])
+ cmd_runner.run(('git', 'apply', patch_filename), encoding=None)
except CalledProcessError:
logger.warning(
'Stashed changes conflicted with hook auto-fixes... '
@@ -55,7 +55,7 @@
# by hooks.
# Roll back the changes made by hooks.
cmd_runner.run(['git', 'checkout', '--', '.'])
- cmd_runner.run(['git', 'apply', patch_filename])
+ cmd_runner.run(('git', 'apply', patch_filename), encoding=None)
logger.info('Restored changes from {0}.'.format(patch_filename))
else:
# There weren't any staged files so we don't need to do anything
| {"golden_diff": "diff --git a/pre_commit/staged_files_only.py b/pre_commit/staged_files_only.py\n--- a/pre_commit/staged_files_only.py\n+++ b/pre_commit/staged_files_only.py\n@@ -45,7 +45,7 @@\n finally:\n # Try to apply the patch we saved\n try:\n- cmd_runner.run(['git', 'apply', patch_filename])\n+ cmd_runner.run(('git', 'apply', patch_filename), encoding=None)\n except CalledProcessError:\n logger.warning(\n 'Stashed changes conflicted with hook auto-fixes... '\n@@ -55,7 +55,7 @@\n # by hooks.\n # Roll back the changes made by hooks.\n cmd_runner.run(['git', 'checkout', '--', '.'])\n- cmd_runner.run(['git', 'apply', patch_filename])\n+ cmd_runner.run(('git', 'apply', patch_filename), encoding=None)\n logger.info('Restored changes from {0}.'.format(patch_filename))\n else:\n # There weren't any staged files so we don't need to do anything\n", "issue": "Stashed changes lost if hook fails with non-UTF-8 diff containing trailing whitespace\nHi,\n\nA colleague almost lost all the changes she was working on after launching a `git commit` (with zero file added) and `pre-commit` crashing without restoring its [patch](https://github.com/pre-commit/pre-commit/blob/master/pre_commit/staged_files_only.py#L15).\n\nHere is the terminal message she got:\n\n```\n[WARNING] Stashed changes conflicted with hook auto-fixes... Rolling back fixes...\nAn unexpected error has occurred: CalledProcessError: Command: ['git', 'apply', 'C:\\\\Users\\\\toto\\\\.pre-commit\\\\patch1471341002']\n```\n\nThis seems very similar to a past solved issue:\nhttps://github.com/pre-commit/pre-commit/issues/176\n\nI think it had to do with CRLF conversion.\nI'm going to try to reproduce this.\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport io\nimport logging\nimport time\n\nfrom pre_commit.util import CalledProcessError\n\n\nlogger = logging.getLogger('pre_commit')\n\n\[email protected]\ndef staged_files_only(cmd_runner):\n \"\"\"Clear any unstaged changes from the git working directory inside this\n context.\n\n Args:\n cmd_runner - PrefixedCommandRunner\n \"\"\"\n # Determine if there are unstaged files\n retcode, diff_stdout_binary, _ = cmd_runner.run(\n [\n 'git', 'diff', '--ignore-submodules', '--binary', '--exit-code',\n '--no-color',\n ],\n retcode=None,\n encoding=None,\n )\n if retcode and diff_stdout_binary.strip():\n patch_filename = cmd_runner.path('patch{0}'.format(int(time.time())))\n logger.warning('Unstaged files detected.')\n logger.info(\n 'Stashing unstaged files to {0}.'.format(patch_filename),\n )\n # Save the current unstaged changes as a patch\n with io.open(patch_filename, 'wb') as patch_file:\n patch_file.write(diff_stdout_binary)\n\n # Clear the working directory of unstaged changes\n cmd_runner.run(['git', 'checkout', '--', '.'])\n try:\n yield\n finally:\n # Try to apply the patch we saved\n try:\n cmd_runner.run(['git', 'apply', patch_filename])\n except CalledProcessError:\n logger.warning(\n 'Stashed changes conflicted with hook auto-fixes... '\n 'Rolling back fixes...'\n )\n # We failed to apply the patch, presumably due to fixes made\n # by hooks.\n # Roll back the changes made by hooks.\n cmd_runner.run(['git', 'checkout', '--', '.'])\n cmd_runner.run(['git', 'apply', patch_filename])\n logger.info('Restored changes from {0}.'.format(patch_filename))\n else:\n # There weren't any staged files so we don't need to do anything\n # special\n yield\n", "path": "pre_commit/staged_files_only.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport io\nimport logging\nimport time\n\nfrom pre_commit.util import CalledProcessError\n\n\nlogger = logging.getLogger('pre_commit')\n\n\[email protected]\ndef staged_files_only(cmd_runner):\n \"\"\"Clear any unstaged changes from the git working directory inside this\n context.\n\n Args:\n cmd_runner - PrefixedCommandRunner\n \"\"\"\n # Determine if there are unstaged files\n retcode, diff_stdout_binary, _ = cmd_runner.run(\n [\n 'git', 'diff', '--ignore-submodules', '--binary', '--exit-code',\n '--no-color',\n ],\n retcode=None,\n encoding=None,\n )\n if retcode and diff_stdout_binary.strip():\n patch_filename = cmd_runner.path('patch{0}'.format(int(time.time())))\n logger.warning('Unstaged files detected.')\n logger.info(\n 'Stashing unstaged files to {0}.'.format(patch_filename),\n )\n # Save the current unstaged changes as a patch\n with io.open(patch_filename, 'wb') as patch_file:\n patch_file.write(diff_stdout_binary)\n\n # Clear the working directory of unstaged changes\n cmd_runner.run(['git', 'checkout', '--', '.'])\n try:\n yield\n finally:\n # Try to apply the patch we saved\n try:\n cmd_runner.run(('git', 'apply', patch_filename), encoding=None)\n except CalledProcessError:\n logger.warning(\n 'Stashed changes conflicted with hook auto-fixes... '\n 'Rolling back fixes...'\n )\n # We failed to apply the patch, presumably due to fixes made\n # by hooks.\n # Roll back the changes made by hooks.\n cmd_runner.run(['git', 'checkout', '--', '.'])\n cmd_runner.run(('git', 'apply', patch_filename), encoding=None)\n logger.info('Restored changes from {0}.'.format(patch_filename))\n else:\n # There weren't any staged files so we don't need to do anything\n # special\n yield\n", "path": "pre_commit/staged_files_only.py"}]} | 1,013 | 231 |
gh_patches_debug_65236 | rasdani/github-patches | git_diff | streamlink__streamlink-5698 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plugins.btv: No playable streams found
### Checklist
- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
Your Streamlink version (6.4.2+1.g7e722ec1) is up to date!
### Description
The plug-in does not display video. It displays errors shown in the logs below.
### Debug log
```text
streamlink --loglevel=debug "https://btvplus.bg/live/" best
[cli][debug] OS: Linux-6.2.0-35-generic-x86_64-with-glibc2.35
[cli][debug] Python: 3.10.12
[cli][debug] OpenSSL: OpenSSL 3.0.2 15 Mar 2022
[cli][debug] Streamlink: 6.4.2+1.g7e722ec1
[cli][debug] Dependencies:
[cli][debug] certifi: 2023.5.7
[cli][debug] isodate: 0.6.1
[cli][debug] lxml: 4.8.0
[cli][debug] pycountry: 20.7.3
[cli][debug] pycryptodome: 3.17
[cli][debug] PySocks: 1.7.1
[cli][debug] requests: 2.31.0
[cli][debug] trio: 0.22.2
[cli][debug] trio-websocket: 0.10.3
[cli][debug] typing-extensions: 4.7.1
[cli][debug] urllib3: 1.26.16
[cli][debug] websocket-client: 1.2.3
[cli][debug] Arguments:
[cli][debug] url=https://btvplus.bg/live/
[cli][debug] stream=['best']
[cli][debug] --loglevel=debug
[cli][info] Found matching plugin btv for URL https://btvplus.bg/live/
[cli][info] Available streams: live (worst, best)
[cli][info] Opening stream: live (hls)
[cli][info] Starting player: /usr/bin/vlc
[stream.hls][debug] Reloading playlist
[cli][debug] Pre-buffering 8192 bytes
[stream.hls][error] Attempted to play a variant playlist, use 'hls://https://cdn.bweb.bg/live/PhRBlmfjy0uVGxaj1_BMiw/1701627017/61065646.m3u8' instead
[stream.segmented][debug] Closing worker thread
[stream.segmented][debug] Closing writer thread
[cli][error] Try 1/1: Could not open stream <HLSStream ['hls', 'https://cdn.bweb.bg/live/PhRBlmfjy0uVGxaj1_BMiw/1701627017/61065646.m3u8']> (No data returned from stream)
error: Could not open stream <HLSStream ['hls', 'https://cdn.bweb.bg/live/PhRBlmfjy0uVGxaj1_BMiw/1701627017/61065646.m3u8']>, tried 1 times, exiting
[cli][info] Closing currently open stream...
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/btv.py`
Content:
```
1 """
2 $description A privately owned Bulgarian live TV channel.
3 $url btvplus.bg
4 $type live
5 $region Bulgaria
6 """
7
8 import logging
9 import re
10
11 from streamlink.plugin import Plugin, pluginmatcher
12 from streamlink.plugin.api import validate
13 from streamlink.stream.hls import HLSStream
14
15
16 log = logging.getLogger(__name__)
17
18
19 @pluginmatcher(re.compile(
20 r"https?://(?:www\.)?btvplus\.bg/live/?",
21 ))
22 class BTV(Plugin):
23 URL_API = "https://btvplus.bg/lbin/v3/btvplus/player_config.php"
24
25 def _get_streams(self):
26 media_id = self.session.http.get(self.url, schema=validate.Schema(
27 re.compile(r"media_id=(\d+)"),
28 validate.any(None, validate.get(1)),
29 ))
30 if media_id is None:
31 return
32
33 stream_url = self.session.http.get(
34 self.URL_API,
35 params={
36 "media_id": media_id,
37 },
38 schema=validate.Schema(
39 validate.any(
40 validate.all(
41 validate.regex(re.compile(r"geo_blocked_stream")),
42 validate.get(0),
43 ),
44 validate.all(
45 validate.parse_json(),
46 {
47 "status": "ok",
48 "info": {
49 "file": validate.url(path=validate.endswith(".m3u8")),
50 },
51 },
52 validate.get(("info", "file")),
53 ),
54 ),
55 ),
56 )
57 if not stream_url:
58 return
59
60 if stream_url == "geo_blocked_stream":
61 log.error("The content is not available in your region")
62 return
63
64 return {"live": HLSStream(self.session, stream_url)}
65
66
67 __plugin__ = BTV
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/plugins/btv.py b/src/streamlink/plugins/btv.py
--- a/src/streamlink/plugins/btv.py
+++ b/src/streamlink/plugins/btv.py
@@ -61,7 +61,7 @@
log.error("The content is not available in your region")
return
- return {"live": HLSStream(self.session, stream_url)}
+ return HLSStream.parse_variant_playlist(self.session, stream_url)
__plugin__ = BTV
| {"golden_diff": "diff --git a/src/streamlink/plugins/btv.py b/src/streamlink/plugins/btv.py\n--- a/src/streamlink/plugins/btv.py\n+++ b/src/streamlink/plugins/btv.py\n@@ -61,7 +61,7 @@\n log.error(\"The content is not available in your region\")\n return\n \n- return {\"live\": HLSStream(self.session, stream_url)}\n+ return HLSStream.parse_variant_playlist(self.session, stream_url)\n \n \n __plugin__ = BTV\n", "issue": "plugins.btv: No playable streams found\n### Checklist\n\n- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nYour Streamlink version (6.4.2+1.g7e722ec1) is up to date!\n\n### Description\n\nThe plug-in does not display video. It displays errors shown in the logs below.\r\n\n\n### Debug log\n\n```text\nstreamlink --loglevel=debug \"https://btvplus.bg/live/\" best\r\n[cli][debug] OS: Linux-6.2.0-35-generic-x86_64-with-glibc2.35\r\n[cli][debug] Python: 3.10.12\r\n[cli][debug] OpenSSL: OpenSSL 3.0.2 15 Mar 2022\r\n[cli][debug] Streamlink: 6.4.2+1.g7e722ec1\r\n[cli][debug] Dependencies:\r\n[cli][debug] certifi: 2023.5.7\r\n[cli][debug] isodate: 0.6.1\r\n[cli][debug] lxml: 4.8.0\r\n[cli][debug] pycountry: 20.7.3\r\n[cli][debug] pycryptodome: 3.17\r\n[cli][debug] PySocks: 1.7.1\r\n[cli][debug] requests: 2.31.0\r\n[cli][debug] trio: 0.22.2\r\n[cli][debug] trio-websocket: 0.10.3\r\n[cli][debug] typing-extensions: 4.7.1\r\n[cli][debug] urllib3: 1.26.16\r\n[cli][debug] websocket-client: 1.2.3\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://btvplus.bg/live/\r\n[cli][debug] stream=['best']\r\n[cli][debug] --loglevel=debug\r\n[cli][info] Found matching plugin btv for URL https://btvplus.bg/live/\r\n[cli][info] Available streams: live (worst, best)\r\n[cli][info] Opening stream: live (hls)\r\n[cli][info] Starting player: /usr/bin/vlc\r\n[stream.hls][debug] Reloading playlist\r\n[cli][debug] Pre-buffering 8192 bytes\r\n[stream.hls][error] Attempted to play a variant playlist, use 'hls://https://cdn.bweb.bg/live/PhRBlmfjy0uVGxaj1_BMiw/1701627017/61065646.m3u8' instead\r\n[stream.segmented][debug] Closing worker thread\r\n[stream.segmented][debug] Closing writer thread\r\n[cli][error] Try 1/1: Could not open stream <HLSStream ['hls', 'https://cdn.bweb.bg/live/PhRBlmfjy0uVGxaj1_BMiw/1701627017/61065646.m3u8']> (No data returned from stream)\r\nerror: Could not open stream <HLSStream ['hls', 'https://cdn.bweb.bg/live/PhRBlmfjy0uVGxaj1_BMiw/1701627017/61065646.m3u8']>, tried 1 times, exiting\r\n[cli][info] Closing currently open stream...\n```\n\n", "before_files": [{"content": "\"\"\"\n$description A privately owned Bulgarian live TV channel.\n$url btvplus.bg\n$type live\n$region Bulgaria\n\"\"\"\n\nimport logging\nimport re\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.hls import HLSStream\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://(?:www\\.)?btvplus\\.bg/live/?\",\n))\nclass BTV(Plugin):\n URL_API = \"https://btvplus.bg/lbin/v3/btvplus/player_config.php\"\n\n def _get_streams(self):\n media_id = self.session.http.get(self.url, schema=validate.Schema(\n re.compile(r\"media_id=(\\d+)\"),\n validate.any(None, validate.get(1)),\n ))\n if media_id is None:\n return\n\n stream_url = self.session.http.get(\n self.URL_API,\n params={\n \"media_id\": media_id,\n },\n schema=validate.Schema(\n validate.any(\n validate.all(\n validate.regex(re.compile(r\"geo_blocked_stream\")),\n validate.get(0),\n ),\n validate.all(\n validate.parse_json(),\n {\n \"status\": \"ok\",\n \"info\": {\n \"file\": validate.url(path=validate.endswith(\".m3u8\")),\n },\n },\n validate.get((\"info\", \"file\")),\n ),\n ),\n ),\n )\n if not stream_url:\n return\n\n if stream_url == \"geo_blocked_stream\":\n log.error(\"The content is not available in your region\")\n return\n\n return {\"live\": HLSStream(self.session, stream_url)}\n\n\n__plugin__ = BTV\n", "path": "src/streamlink/plugins/btv.py"}], "after_files": [{"content": "\"\"\"\n$description A privately owned Bulgarian live TV channel.\n$url btvplus.bg\n$type live\n$region Bulgaria\n\"\"\"\n\nimport logging\nimport re\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.hls import HLSStream\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://(?:www\\.)?btvplus\\.bg/live/?\",\n))\nclass BTV(Plugin):\n URL_API = \"https://btvplus.bg/lbin/v3/btvplus/player_config.php\"\n\n def _get_streams(self):\n media_id = self.session.http.get(self.url, schema=validate.Schema(\n re.compile(r\"media_id=(\\d+)\"),\n validate.any(None, validate.get(1)),\n ))\n if media_id is None:\n return\n\n stream_url = self.session.http.get(\n self.URL_API,\n params={\n \"media_id\": media_id,\n },\n schema=validate.Schema(\n validate.any(\n validate.all(\n validate.regex(re.compile(r\"geo_blocked_stream\")),\n validate.get(0),\n ),\n validate.all(\n validate.parse_json(),\n {\n \"status\": \"ok\",\n \"info\": {\n \"file\": validate.url(path=validate.endswith(\".m3u8\")),\n },\n },\n validate.get((\"info\", \"file\")),\n ),\n ),\n ),\n )\n if not stream_url:\n return\n\n if stream_url == \"geo_blocked_stream\":\n log.error(\"The content is not available in your region\")\n return\n\n return HLSStream.parse_variant_playlist(self.session, stream_url)\n\n\n__plugin__ = BTV\n", "path": "src/streamlink/plugins/btv.py"}]} | 1,695 | 104 |
gh_patches_debug_7467 | rasdani/github-patches | git_diff | sublimelsp__LSP-660 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
when cancelling the symbols panel, the last symbol is selected
https://github.com/tomv564/LSP/blob/be904c56fddf35f724486de405a168786ed4ffeb/plugin/symbols.py#L82-L92
```diff
def on_symbol_selected(self, symbol_index):
+ if symbol_index == -1:
+ return
selected_symbol = self.symbols[symbol_index]
range = selected_symbol.get('location', selected_symbol.get('range'))
range = range.get('range', range)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugin/symbols.py`
Content:
```
1 from .core.logging import debug
2 from .core.protocol import Request, Range
3 from .core.protocol import SymbolKind
4 from .core.registry import client_for_view, LspTextCommand
5 from .core.url import filename_to_uri
6 from .core.views import range_to_region
7
8 try:
9 from typing import List, Optional, Any
10 assert List and Optional and Any
11 except ImportError:
12 pass
13
14 symbol_kind_names = {
15 SymbolKind.File: "file",
16 SymbolKind.Module: "module",
17 SymbolKind.Namespace: "namespace",
18 SymbolKind.Package: "package",
19 SymbolKind.Class: "class",
20 SymbolKind.Method: "method",
21 SymbolKind.Property: "property",
22 SymbolKind.Field: "field",
23 SymbolKind.Constructor: "constructor",
24 SymbolKind.Enum: "enum",
25 SymbolKind.Interface: "interface",
26 SymbolKind.Function: "function",
27 SymbolKind.Variable: "variable",
28 SymbolKind.Constant: "constant",
29 SymbolKind.String: "string",
30 SymbolKind.Number: "number",
31 SymbolKind.Boolean: "boolean",
32 SymbolKind.Array: "array",
33 SymbolKind.Object: "object",
34 SymbolKind.Key: "key",
35 SymbolKind.Null: "null",
36 SymbolKind.EnumMember: "enum member",
37 SymbolKind.Struct: "struct",
38 SymbolKind.Event: "event",
39 SymbolKind.Operator: "operator",
40 SymbolKind.TypeParameter: "type parameter"
41 }
42
43
44 def format_symbol_kind(kind):
45 return symbol_kind_names.get(kind, str(kind))
46
47
48 def format_symbol(item):
49 """
50 items may be a list of strings, or a list of string lists.
51 In the latter case, each entry in the quick panel will show multiple rows
52 """
53 prefix = item.get("containerName", "")
54 label = prefix + "." + item.get("name") if prefix else item.get("name")
55 return [label, format_symbol_kind(item.get("kind"))]
56
57
58 class LspDocumentSymbolsCommand(LspTextCommand):
59 def __init__(self, view):
60 super().__init__(view)
61
62 def is_enabled(self, event=None):
63 return self.has_client_with_capability('documentSymbolProvider')
64
65 def run(self, edit) -> None:
66 client = client_for_view(self.view)
67 if client:
68 params = {
69 "textDocument": {
70 "uri": filename_to_uri(self.view.file_name())
71 }
72 }
73 request = Request.documentSymbols(params)
74 client.send_request(request, self.handle_response)
75
76 def handle_response(self, response: 'Optional[List]') -> None:
77 response_list = response or []
78 symbols = list(format_symbol(item) for item in response_list)
79 self.symbols = response_list
80 self.view.window().show_quick_panel(symbols, self.on_symbol_selected)
81
82 def on_symbol_selected(self, symbol_index):
83 selected_symbol = self.symbols[symbol_index]
84 range = selected_symbol.get('location', selected_symbol.get('range'))
85 range = range.get('range', range)
86 if not range:
87 debug('could not recognize the type: expected either SymbolInformation or DocumentSymbol')
88 return
89 region = range_to_region(Range.from_lsp(range), self.view)
90 self.view.show_at_center(region)
91 self.view.sel().clear()
92 self.view.sel().add(region)
93
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugin/symbols.py b/plugin/symbols.py
--- a/plugin/symbols.py
+++ b/plugin/symbols.py
@@ -80,6 +80,8 @@
self.view.window().show_quick_panel(symbols, self.on_symbol_selected)
def on_symbol_selected(self, symbol_index):
+ if symbol_index == -1:
+ return
selected_symbol = self.symbols[symbol_index]
range = selected_symbol.get('location', selected_symbol.get('range'))
range = range.get('range', range)
| {"golden_diff": "diff --git a/plugin/symbols.py b/plugin/symbols.py\n--- a/plugin/symbols.py\n+++ b/plugin/symbols.py\n@@ -80,6 +80,8 @@\n self.view.window().show_quick_panel(symbols, self.on_symbol_selected)\n \n def on_symbol_selected(self, symbol_index):\n+ if symbol_index == -1:\n+ return\n selected_symbol = self.symbols[symbol_index]\n range = selected_symbol.get('location', selected_symbol.get('range'))\n range = range.get('range', range)\n", "issue": "when cancelling the symbols panel, the last symbol is selected\nhttps://github.com/tomv564/LSP/blob/be904c56fddf35f724486de405a168786ed4ffeb/plugin/symbols.py#L82-L92\r\n```diff\r\n def on_symbol_selected(self, symbol_index):\r\n+ if symbol_index == -1:\r\n+ return\r\n selected_symbol = self.symbols[symbol_index]\r\n range = selected_symbol.get('location', selected_symbol.get('range'))\r\n range = range.get('range', range)\r\n```\n", "before_files": [{"content": "from .core.logging import debug\nfrom .core.protocol import Request, Range\nfrom .core.protocol import SymbolKind\nfrom .core.registry import client_for_view, LspTextCommand\nfrom .core.url import filename_to_uri\nfrom .core.views import range_to_region\n\ntry:\n from typing import List, Optional, Any\n assert List and Optional and Any\nexcept ImportError:\n pass\n\nsymbol_kind_names = {\n SymbolKind.File: \"file\",\n SymbolKind.Module: \"module\",\n SymbolKind.Namespace: \"namespace\",\n SymbolKind.Package: \"package\",\n SymbolKind.Class: \"class\",\n SymbolKind.Method: \"method\",\n SymbolKind.Property: \"property\",\n SymbolKind.Field: \"field\",\n SymbolKind.Constructor: \"constructor\",\n SymbolKind.Enum: \"enum\",\n SymbolKind.Interface: \"interface\",\n SymbolKind.Function: \"function\",\n SymbolKind.Variable: \"variable\",\n SymbolKind.Constant: \"constant\",\n SymbolKind.String: \"string\",\n SymbolKind.Number: \"number\",\n SymbolKind.Boolean: \"boolean\",\n SymbolKind.Array: \"array\",\n SymbolKind.Object: \"object\",\n SymbolKind.Key: \"key\",\n SymbolKind.Null: \"null\",\n SymbolKind.EnumMember: \"enum member\",\n SymbolKind.Struct: \"struct\",\n SymbolKind.Event: \"event\",\n SymbolKind.Operator: \"operator\",\n SymbolKind.TypeParameter: \"type parameter\"\n}\n\n\ndef format_symbol_kind(kind):\n return symbol_kind_names.get(kind, str(kind))\n\n\ndef format_symbol(item):\n \"\"\"\n items may be a list of strings, or a list of string lists.\n In the latter case, each entry in the quick panel will show multiple rows\n \"\"\"\n prefix = item.get(\"containerName\", \"\")\n label = prefix + \".\" + item.get(\"name\") if prefix else item.get(\"name\")\n return [label, format_symbol_kind(item.get(\"kind\"))]\n\n\nclass LspDocumentSymbolsCommand(LspTextCommand):\n def __init__(self, view):\n super().__init__(view)\n\n def is_enabled(self, event=None):\n return self.has_client_with_capability('documentSymbolProvider')\n\n def run(self, edit) -> None:\n client = client_for_view(self.view)\n if client:\n params = {\n \"textDocument\": {\n \"uri\": filename_to_uri(self.view.file_name())\n }\n }\n request = Request.documentSymbols(params)\n client.send_request(request, self.handle_response)\n\n def handle_response(self, response: 'Optional[List]') -> None:\n response_list = response or []\n symbols = list(format_symbol(item) for item in response_list)\n self.symbols = response_list\n self.view.window().show_quick_panel(symbols, self.on_symbol_selected)\n\n def on_symbol_selected(self, symbol_index):\n selected_symbol = self.symbols[symbol_index]\n range = selected_symbol.get('location', selected_symbol.get('range'))\n range = range.get('range', range)\n if not range:\n debug('could not recognize the type: expected either SymbolInformation or DocumentSymbol')\n return\n region = range_to_region(Range.from_lsp(range), self.view)\n self.view.show_at_center(region)\n self.view.sel().clear()\n self.view.sel().add(region)\n", "path": "plugin/symbols.py"}], "after_files": [{"content": "from .core.logging import debug\nfrom .core.protocol import Request, Range\nfrom .core.protocol import SymbolKind\nfrom .core.registry import client_for_view, LspTextCommand\nfrom .core.url import filename_to_uri\nfrom .core.views import range_to_region\n\ntry:\n from typing import List, Optional, Any\n assert List and Optional and Any\nexcept ImportError:\n pass\n\nsymbol_kind_names = {\n SymbolKind.File: \"file\",\n SymbolKind.Module: \"module\",\n SymbolKind.Namespace: \"namespace\",\n SymbolKind.Package: \"package\",\n SymbolKind.Class: \"class\",\n SymbolKind.Method: \"method\",\n SymbolKind.Property: \"property\",\n SymbolKind.Field: \"field\",\n SymbolKind.Constructor: \"constructor\",\n SymbolKind.Enum: \"enum\",\n SymbolKind.Interface: \"interface\",\n SymbolKind.Function: \"function\",\n SymbolKind.Variable: \"variable\",\n SymbolKind.Constant: \"constant\",\n SymbolKind.String: \"string\",\n SymbolKind.Number: \"number\",\n SymbolKind.Boolean: \"boolean\",\n SymbolKind.Array: \"array\",\n SymbolKind.Object: \"object\",\n SymbolKind.Key: \"key\",\n SymbolKind.Null: \"null\",\n SymbolKind.EnumMember: \"enum member\",\n SymbolKind.Struct: \"struct\",\n SymbolKind.Event: \"event\",\n SymbolKind.Operator: \"operator\",\n SymbolKind.TypeParameter: \"type parameter\"\n}\n\n\ndef format_symbol_kind(kind):\n return symbol_kind_names.get(kind, str(kind))\n\n\ndef format_symbol(item):\n \"\"\"\n items may be a list of strings, or a list of string lists.\n In the latter case, each entry in the quick panel will show multiple rows\n \"\"\"\n prefix = item.get(\"containerName\", \"\")\n label = prefix + \".\" + item.get(\"name\") if prefix else item.get(\"name\")\n return [label, format_symbol_kind(item.get(\"kind\"))]\n\n\nclass LspDocumentSymbolsCommand(LspTextCommand):\n def __init__(self, view):\n super().__init__(view)\n\n def is_enabled(self, event=None):\n return self.has_client_with_capability('documentSymbolProvider')\n\n def run(self, edit) -> None:\n client = client_for_view(self.view)\n if client:\n params = {\n \"textDocument\": {\n \"uri\": filename_to_uri(self.view.file_name())\n }\n }\n request = Request.documentSymbols(params)\n client.send_request(request, self.handle_response)\n\n def handle_response(self, response: 'Optional[List]') -> None:\n response_list = response or []\n symbols = list(format_symbol(item) for item in response_list)\n self.symbols = response_list\n self.view.window().show_quick_panel(symbols, self.on_symbol_selected)\n\n def on_symbol_selected(self, symbol_index):\n if symbol_index == -1:\n return\n selected_symbol = self.symbols[symbol_index]\n range = selected_symbol.get('location', selected_symbol.get('range'))\n range = range.get('range', range)\n if not range:\n debug('could not recognize the type: expected either SymbolInformation or DocumentSymbol')\n return\n region = range_to_region(Range.from_lsp(range), self.view)\n self.view.show_at_center(region)\n self.view.sel().clear()\n self.view.sel().add(region)\n", "path": "plugin/symbols.py"}]} | 1,270 | 116 |
gh_patches_debug_8479 | rasdani/github-patches | git_diff | spacetelescope__jwql-92 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update environment.yml to update Django version
When running the Django web server (on the `laurenmarietta/web-app-dev branch`) from the `jwql` environment on the VM, and I had to update Django from 1.11.8 to the latest version (2.0.5) to get rid of an error with Django.
The version of Django in `environment.yml` should be specified to >=2.0.5 in the environment file in the future.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import numpy as np
2 from setuptools import setup
3 from setuptools import find_packages
4
5 VERSION = '0.4.0'
6
7 AUTHORS = 'Matthew Bourque, Sara Ogaz, Joe Filippazzo, Bryan Hilbert, Misty Cracraft, Graham Kanarek'
8 AUTHORS += 'Johannes Sahlmann, Lauren Chambers, Catherine Martlin'
9
10 REQUIRES = ['astropy', 'astroquery', 'bokeh==0.12.5', 'django', 'matplotlib', 'numpy', 'python-dateutil', 'sphinx', 'sphinx-automodapi', 'sqlalchemy']
11
12 setup(
13 name='jwql',
14 version=VERSION,
15 description='The JWST Quicklook Project',
16 url='https://github.com/spacetelescope/jwql.git',
17 author=AUTHORS,
18 author_email='[email protected]',
19 license='BSD',
20 keywords=['astronomy', 'python'],
21 classifiers=['Programming Language :: Python'],
22 packages=find_packages(),
23 install_requires=REQUIRES,
24 include_package_data=True,
25 include_dirs=[np.get_include()],
26 )
27
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -7,7 +7,7 @@
AUTHORS = 'Matthew Bourque, Sara Ogaz, Joe Filippazzo, Bryan Hilbert, Misty Cracraft, Graham Kanarek'
AUTHORS += 'Johannes Sahlmann, Lauren Chambers, Catherine Martlin'
-REQUIRES = ['astropy', 'astroquery', 'bokeh==0.12.5', 'django', 'matplotlib', 'numpy', 'python-dateutil', 'sphinx', 'sphinx-automodapi', 'sqlalchemy']
+REQUIRES = ['astropy', 'astroquery', 'bokeh==0.12.5', 'django==2.0.5', 'matplotlib', 'numpy', 'python-dateutil', 'sphinx', 'sphinx-automodapi', 'sqlalchemy']
setup(
name='jwql',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -7,7 +7,7 @@\n AUTHORS = 'Matthew Bourque, Sara Ogaz, Joe Filippazzo, Bryan Hilbert, Misty Cracraft, Graham Kanarek'\n AUTHORS += 'Johannes Sahlmann, Lauren Chambers, Catherine Martlin'\n \n-REQUIRES = ['astropy', 'astroquery', 'bokeh==0.12.5', 'django', 'matplotlib', 'numpy', 'python-dateutil', 'sphinx', 'sphinx-automodapi', 'sqlalchemy']\n+REQUIRES = ['astropy', 'astroquery', 'bokeh==0.12.5', 'django==2.0.5', 'matplotlib', 'numpy', 'python-dateutil', 'sphinx', 'sphinx-automodapi', 'sqlalchemy']\n \n setup(\n name='jwql',\n", "issue": "Update environment.yml to update Django version\nWhen running the Django web server (on the `laurenmarietta/web-app-dev branch`) from the `jwql` environment on the VM, and I had to update Django from 1.11.8 to the latest version (2.0.5) to get rid of an error with Django.\r\n\r\nThe version of Django in `environment.yml` should be specified to >=2.0.5 in the environment file in the future.\n", "before_files": [{"content": "import numpy as np\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.4.0'\n\nAUTHORS = 'Matthew Bourque, Sara Ogaz, Joe Filippazzo, Bryan Hilbert, Misty Cracraft, Graham Kanarek'\nAUTHORS += 'Johannes Sahlmann, Lauren Chambers, Catherine Martlin'\n\nREQUIRES = ['astropy', 'astroquery', 'bokeh==0.12.5', 'django', 'matplotlib', 'numpy', 'python-dateutil', 'sphinx', 'sphinx-automodapi', 'sqlalchemy']\n\nsetup(\n name='jwql',\n version=VERSION,\n description='The JWST Quicklook Project',\n url='https://github.com/spacetelescope/jwql.git',\n author=AUTHORS,\n author_email='[email protected]',\n license='BSD',\n keywords=['astronomy', 'python'],\n classifiers=['Programming Language :: Python'],\n packages=find_packages(),\n install_requires=REQUIRES,\n include_package_data=True,\n include_dirs=[np.get_include()],\n )\n", "path": "setup.py"}], "after_files": [{"content": "import numpy as np\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.4.0'\n\nAUTHORS = 'Matthew Bourque, Sara Ogaz, Joe Filippazzo, Bryan Hilbert, Misty Cracraft, Graham Kanarek'\nAUTHORS += 'Johannes Sahlmann, Lauren Chambers, Catherine Martlin'\n\nREQUIRES = ['astropy', 'astroquery', 'bokeh==0.12.5', 'django==2.0.5', 'matplotlib', 'numpy', 'python-dateutil', 'sphinx', 'sphinx-automodapi', 'sqlalchemy']\n\nsetup(\n name='jwql',\n version=VERSION,\n description='The JWST Quicklook Project',\n url='https://github.com/spacetelescope/jwql.git',\n author=AUTHORS,\n author_email='[email protected]',\n license='BSD',\n keywords=['astronomy', 'python'],\n classifiers=['Programming Language :: Python'],\n packages=find_packages(),\n install_requires=REQUIRES,\n include_package_data=True,\n include_dirs=[np.get_include()],\n )\n", "path": "setup.py"}]} | 639 | 201 |
gh_patches_debug_26629 | rasdani/github-patches | git_diff | GoogleCloudPlatform__PerfKitBenchmarker-73 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The cluster boot benchmark should the num_cpus function in parallel
The cluster boot benchmark has the following code:
> for vm in vms:
> metadata = {'machine_type': vm.machine_type, 'num_cpus': vm.num_cpus,
> 'machine_instance': vm_number}
> value = vm.TimeToBoot()
This looks great until you realize vm.num_cpus is a method on the virtual machine which in turn calls RemoteCommand leading to an ssh. When large number of VM's boot the result is a long set of serially run ssh's to each VM. This could be done a lot faster by moving the code into a method and then using RunThreaded.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `perfkitbenchmarker/benchmarks/cluster_boot_benchmark.py`
Content:
```
1 # Copyright 2014 Google Inc. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Runs a cluster boot benchmark."""
16
17 import logging
18
19 from perfkitbenchmarker import flags
20
21 FLAGS = flags.FLAGS
22 BENCHMARK_INFO = {'name': 'cluster boot',
23 'description': 'Create a cluster, record all times to boot',
24 'scratch_disk': False,
25 'num_machines': None} # Set in GetInfo()
26
27
28 def GetInfo():
29 BENCHMARK_INFO['num_machines'] = FLAGS.num_vms
30 return BENCHMARK_INFO
31
32
33 def Prepare(unused_benchmark_spec):
34 pass
35
36
37 def Run(benchmark_spec):
38 """Measure the boot time for all VMs.
39
40 Args:
41 benchmark_spec: The benchmark specification. Contains all data that is
42 required to run the benchmark.
43
44 Returns:
45 A list of samples in the form of 3 or 4 tuples. The tuples contain
46 the sample metric (string), value (float), and unit (string).
47 If a 4th element is included, it is a dictionary of sample
48 metadata.
49 """
50
51 samples = []
52 vm_number = 0
53 logging.info('Boot Results:')
54 vms = benchmark_spec.vms
55 for vm in vms:
56 metadata = {'machine_type': vm.machine_type, 'num_cpus': vm.num_cpus,
57 'machine_instance': vm_number}
58 value = vm.TimeToBoot()
59 assert value is not None
60 samples.append(('Boot Time', value, 'seconds', metadata))
61 vm_number += 1
62 logging.info(samples)
63 assert vm_number == benchmark_spec.num_vms
64 return samples
65
66
67 def Cleanup(unused_benchmark_spec):
68 pass
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/perfkitbenchmarker/benchmarks/cluster_boot_benchmark.py b/perfkitbenchmarker/benchmarks/cluster_boot_benchmark.py
--- a/perfkitbenchmarker/benchmarks/cluster_boot_benchmark.py
+++ b/perfkitbenchmarker/benchmarks/cluster_boot_benchmark.py
@@ -17,6 +17,7 @@
import logging
from perfkitbenchmarker import flags
+from perfkitbenchmarker import vm_util
FLAGS = flags.FLAGS
BENCHMARK_INFO = {'name': 'cluster boot',
@@ -34,6 +35,14 @@
pass
+def _GetTimeToBoot(vm, vm_index, result_list):
+ metadata = {'machine_type': vm.machine_type, 'num_cpus': vm.num_cpus,
+ 'machine_instance': vm_index}
+ value = vm.TimeToBoot()
+ assert value is not None
+ result_list.append(('Boot Time', value, 'seconds', metadata))
+
+
def Run(benchmark_spec):
"""Measure the boot time for all VMs.
@@ -49,18 +58,12 @@
"""
samples = []
- vm_number = 0
logging.info('Boot Results:')
vms = benchmark_spec.vms
- for vm in vms:
- metadata = {'machine_type': vm.machine_type, 'num_cpus': vm.num_cpus,
- 'machine_instance': vm_number}
- value = vm.TimeToBoot()
- assert value is not None
- samples.append(('Boot Time', value, 'seconds', metadata))
- vm_number += 1
+ params = [((vm, i, samples), {}) for i, vm in enumerate(vms)]
+ vm_util.RunThreaded(_GetTimeToBoot, params)
logging.info(samples)
- assert vm_number == benchmark_spec.num_vms
+ assert len(samples) == benchmark_spec.num_vms
return samples
| {"golden_diff": "diff --git a/perfkitbenchmarker/benchmarks/cluster_boot_benchmark.py b/perfkitbenchmarker/benchmarks/cluster_boot_benchmark.py\n--- a/perfkitbenchmarker/benchmarks/cluster_boot_benchmark.py\n+++ b/perfkitbenchmarker/benchmarks/cluster_boot_benchmark.py\n@@ -17,6 +17,7 @@\n import logging\n \n from perfkitbenchmarker import flags\n+from perfkitbenchmarker import vm_util\n \n FLAGS = flags.FLAGS\n BENCHMARK_INFO = {'name': 'cluster boot',\n@@ -34,6 +35,14 @@\n pass\n \n \n+def _GetTimeToBoot(vm, vm_index, result_list):\n+ metadata = {'machine_type': vm.machine_type, 'num_cpus': vm.num_cpus,\n+ 'machine_instance': vm_index}\n+ value = vm.TimeToBoot()\n+ assert value is not None\n+ result_list.append(('Boot Time', value, 'seconds', metadata))\n+\n+\n def Run(benchmark_spec):\n \"\"\"Measure the boot time for all VMs.\n \n@@ -49,18 +58,12 @@\n \"\"\"\n \n samples = []\n- vm_number = 0\n logging.info('Boot Results:')\n vms = benchmark_spec.vms\n- for vm in vms:\n- metadata = {'machine_type': vm.machine_type, 'num_cpus': vm.num_cpus,\n- 'machine_instance': vm_number}\n- value = vm.TimeToBoot()\n- assert value is not None\n- samples.append(('Boot Time', value, 'seconds', metadata))\n- vm_number += 1\n+ params = [((vm, i, samples), {}) for i, vm in enumerate(vms)]\n+ vm_util.RunThreaded(_GetTimeToBoot, params)\n logging.info(samples)\n- assert vm_number == benchmark_spec.num_vms\n+ assert len(samples) == benchmark_spec.num_vms\n return samples\n", "issue": "The cluster boot benchmark should the num_cpus function in parallel\nThe cluster boot benchmark has the following code:\n\n> for vm in vms:\n> metadata = {'machine_type': vm.machine_type, 'num_cpus': vm.num_cpus,\n> 'machine_instance': vm_number}\n> value = vm.TimeToBoot()\n\nThis looks great until you realize vm.num_cpus is a method on the virtual machine which in turn calls RemoteCommand leading to an ssh. When large number of VM's boot the result is a long set of serially run ssh's to each VM. This could be done a lot faster by moving the code into a method and then using RunThreaded.\n\n", "before_files": [{"content": "# Copyright 2014 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Runs a cluster boot benchmark.\"\"\"\n\nimport logging\n\nfrom perfkitbenchmarker import flags\n\nFLAGS = flags.FLAGS\nBENCHMARK_INFO = {'name': 'cluster boot',\n 'description': 'Create a cluster, record all times to boot',\n 'scratch_disk': False,\n 'num_machines': None} # Set in GetInfo()\n\n\ndef GetInfo():\n BENCHMARK_INFO['num_machines'] = FLAGS.num_vms\n return BENCHMARK_INFO\n\n\ndef Prepare(unused_benchmark_spec):\n pass\n\n\ndef Run(benchmark_spec):\n \"\"\"Measure the boot time for all VMs.\n\n Args:\n benchmark_spec: The benchmark specification. Contains all data that is\n required to run the benchmark.\n\n Returns:\n A list of samples in the form of 3 or 4 tuples. The tuples contain\n the sample metric (string), value (float), and unit (string).\n If a 4th element is included, it is a dictionary of sample\n metadata.\n \"\"\"\n\n samples = []\n vm_number = 0\n logging.info('Boot Results:')\n vms = benchmark_spec.vms\n for vm in vms:\n metadata = {'machine_type': vm.machine_type, 'num_cpus': vm.num_cpus,\n 'machine_instance': vm_number}\n value = vm.TimeToBoot()\n assert value is not None\n samples.append(('Boot Time', value, 'seconds', metadata))\n vm_number += 1\n logging.info(samples)\n assert vm_number == benchmark_spec.num_vms\n return samples\n\n\ndef Cleanup(unused_benchmark_spec):\n pass\n", "path": "perfkitbenchmarker/benchmarks/cluster_boot_benchmark.py"}], "after_files": [{"content": "# Copyright 2014 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Runs a cluster boot benchmark.\"\"\"\n\nimport logging\n\nfrom perfkitbenchmarker import flags\nfrom perfkitbenchmarker import vm_util\n\nFLAGS = flags.FLAGS\nBENCHMARK_INFO = {'name': 'cluster boot',\n 'description': 'Create a cluster, record all times to boot',\n 'scratch_disk': False,\n 'num_machines': None} # Set in GetInfo()\n\n\ndef GetInfo():\n BENCHMARK_INFO['num_machines'] = FLAGS.num_vms\n return BENCHMARK_INFO\n\n\ndef Prepare(unused_benchmark_spec):\n pass\n\n\ndef _GetTimeToBoot(vm, vm_index, result_list):\n metadata = {'machine_type': vm.machine_type, 'num_cpus': vm.num_cpus,\n 'machine_instance': vm_index}\n value = vm.TimeToBoot()\n assert value is not None\n result_list.append(('Boot Time', value, 'seconds', metadata))\n\n\ndef Run(benchmark_spec):\n \"\"\"Measure the boot time for all VMs.\n\n Args:\n benchmark_spec: The benchmark specification. Contains all data that is\n required to run the benchmark.\n\n Returns:\n A list of samples in the form of 3 or 4 tuples. The tuples contain\n the sample metric (string), value (float), and unit (string).\n If a 4th element is included, it is a dictionary of sample\n metadata.\n \"\"\"\n\n samples = []\n logging.info('Boot Results:')\n vms = benchmark_spec.vms\n params = [((vm, i, samples), {}) for i, vm in enumerate(vms)]\n vm_util.RunThreaded(_GetTimeToBoot, params)\n logging.info(samples)\n assert len(samples) == benchmark_spec.num_vms\n return samples\n\n\ndef Cleanup(unused_benchmark_spec):\n pass\n", "path": "perfkitbenchmarker/benchmarks/cluster_boot_benchmark.py"}]} | 1,023 | 425 |
gh_patches_debug_6992 | rasdani/github-patches | git_diff | ansible__awx-13627 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unable to use CCP lookup plugin with empty webservice_id
### Please confirm the following
- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).
- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.
- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.
### Bug Summary
When job uses the `CyberArk Central Credential Provider Lookup` credential plugin with an empty web service id, it fails with the exception:
```
Traceback (most recent call last):
File "/awx_devel/awx/main/tasks/jobs.py", line 508, in run
args = self.build_args(self.instance, private_data_dir, passwords)
File "/awx_devel/awx/main/tasks/jobs.py", line 941, in build_args
ssh_username = creds.get_input('username', default='')
File "/awx_devel/awx/main/models/credential/__init__.py", line 275, in get_input
return self._get_dynamic_input(field_name)
File "/awx_devel/awx/main/models/credential/__init__.py", line 309, in _get_dynamic_input
return input_source.get_input_value()
File "/awx_devel/awx/main/models/credential/__init__.py", line 1250, in get_input_value
return backend(**backend_kwargs)
File "/awx_devel/awx/main/credential_plugins/aim.py", line 73, in aim_backend
webservice_id = kwargs['webservice_id']
KeyError: 'webservice_id'
```
The issue is only reproducible if we create a CCP lookup credential using API and we do not provide the `webservice_id` key as the input. If you create CCP lookup with UI - everything works fine.
### AWX version
devel
### Select the relevant components
- [ ] UI
- [X] API
- [ ] Docs
- [ ] Collection
- [ ] CLI
- [ ] Other
### Installation method
docker development environment
### Modifications
no
### Ansible version
_No response_
### Operating system
_No response_
### Web browser
_No response_
### Steps to reproduce
1. Create CyberArk Central Credential Provider Lookup credential. Do not provide the WebService ID value, keep it empty. I used API to create credetnail and the webservice_id was missing in the inputs:
```
inputs = {
'url': url,
'app_id': app_id,
'client_key': client_key,
'client_cert': client_cert,
'verify': verify
}
payload = factories.credential.payload(
name=fauxfactory.gen_utf8(),
description=fauxfactory.gen_utf8(),
credential_type=cred_type,
inputs=inputs
)
```
2. Create Machine credential that uses the CCP lookup credential. Set proper Object query.
3. Create Job Template that uses this credential. Run the job.
### Expected results
The lookup should use default webservice id: `AIMWebService`
### Actual results
Exception occured. See description.
### Additional information
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `awx/main/credential_plugins/aim.py`
Content:
```
1 from .plugin import CredentialPlugin, CertFiles, raise_for_status
2
3 from urllib.parse import quote, urlencode, urljoin
4
5 from django.utils.translation import gettext_lazy as _
6 import requests
7
8 aim_inputs = {
9 'fields': [
10 {
11 'id': 'url',
12 'label': _('CyberArk CCP URL'),
13 'type': 'string',
14 'format': 'url',
15 },
16 {
17 'id': 'webservice_id',
18 'label': _('Web Service ID'),
19 'type': 'string',
20 'help_text': _('The CCP Web Service ID. Leave blank to default to AIMWebService.'),
21 },
22 {
23 'id': 'app_id',
24 'label': _('Application ID'),
25 'type': 'string',
26 'secret': True,
27 },
28 {
29 'id': 'client_key',
30 'label': _('Client Key'),
31 'type': 'string',
32 'secret': True,
33 'multiline': True,
34 },
35 {
36 'id': 'client_cert',
37 'label': _('Client Certificate'),
38 'type': 'string',
39 'secret': True,
40 'multiline': True,
41 },
42 {
43 'id': 'verify',
44 'label': _('Verify SSL Certificates'),
45 'type': 'boolean',
46 'default': True,
47 },
48 ],
49 'metadata': [
50 {
51 'id': 'object_query',
52 'label': _('Object Query'),
53 'type': 'string',
54 'help_text': _('Lookup query for the object. Ex: Safe=TestSafe;Object=testAccountName123'),
55 },
56 {'id': 'object_query_format', 'label': _('Object Query Format'), 'type': 'string', 'default': 'Exact', 'choices': ['Exact', 'Regexp']},
57 {
58 'id': 'reason',
59 'label': _('Reason'),
60 'type': 'string',
61 'help_text': _('Object request reason. This is only needed if it is required by the object\'s policy.'),
62 },
63 ],
64 'required': ['url', 'app_id', 'object_query'],
65 }
66
67
68 def aim_backend(**kwargs):
69 url = kwargs['url']
70 client_cert = kwargs.get('client_cert', None)
71 client_key = kwargs.get('client_key', None)
72 verify = kwargs['verify']
73 webservice_id = kwargs['webservice_id']
74 app_id = kwargs['app_id']
75 object_query = kwargs['object_query']
76 object_query_format = kwargs['object_query_format']
77 reason = kwargs.get('reason', None)
78 if webservice_id == '':
79 webservice_id = 'AIMWebService'
80
81 query_params = {
82 'AppId': app_id,
83 'Query': object_query,
84 'QueryFormat': object_query_format,
85 }
86 if reason:
87 query_params['reason'] = reason
88
89 request_qs = '?' + urlencode(query_params, quote_via=quote)
90 request_url = urljoin(url, '/'.join([webservice_id, 'api', 'Accounts']))
91
92 with CertFiles(client_cert, client_key) as cert:
93 res = requests.get(
94 request_url + request_qs,
95 timeout=30,
96 cert=cert,
97 verify=verify,
98 allow_redirects=False,
99 )
100 raise_for_status(res)
101 return res.json()['Content']
102
103
104 aim_plugin = CredentialPlugin('CyberArk Central Credential Provider Lookup', inputs=aim_inputs, backend=aim_backend)
105
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/awx/main/credential_plugins/aim.py b/awx/main/credential_plugins/aim.py
--- a/awx/main/credential_plugins/aim.py
+++ b/awx/main/credential_plugins/aim.py
@@ -70,7 +70,7 @@
client_cert = kwargs.get('client_cert', None)
client_key = kwargs.get('client_key', None)
verify = kwargs['verify']
- webservice_id = kwargs['webservice_id']
+ webservice_id = kwargs.get('webservice_id', '')
app_id = kwargs['app_id']
object_query = kwargs['object_query']
object_query_format = kwargs['object_query_format']
| {"golden_diff": "diff --git a/awx/main/credential_plugins/aim.py b/awx/main/credential_plugins/aim.py\n--- a/awx/main/credential_plugins/aim.py\n+++ b/awx/main/credential_plugins/aim.py\n@@ -70,7 +70,7 @@\n client_cert = kwargs.get('client_cert', None)\n client_key = kwargs.get('client_key', None)\n verify = kwargs['verify']\n- webservice_id = kwargs['webservice_id']\n+ webservice_id = kwargs.get('webservice_id', '')\n app_id = kwargs['app_id']\n object_query = kwargs['object_query']\n object_query_format = kwargs['object_query_format']\n", "issue": "Unable to use CCP lookup plugin with empty webservice_id\n### Please confirm the following\r\n\r\n- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).\r\n- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.\r\n- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.\r\n\r\n### Bug Summary\r\n\r\nWhen job uses the `CyberArk Central Credential Provider Lookup` credential plugin with an empty web service id, it fails with the exception: \r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/awx_devel/awx/main/tasks/jobs.py\", line 508, in run\r\n args = self.build_args(self.instance, private_data_dir, passwords)\r\n File \"/awx_devel/awx/main/tasks/jobs.py\", line 941, in build_args\r\n ssh_username = creds.get_input('username', default='')\r\n File \"/awx_devel/awx/main/models/credential/__init__.py\", line 275, in get_input\r\n return self._get_dynamic_input(field_name)\r\n File \"/awx_devel/awx/main/models/credential/__init__.py\", line 309, in _get_dynamic_input\r\n return input_source.get_input_value()\r\n File \"/awx_devel/awx/main/models/credential/__init__.py\", line 1250, in get_input_value\r\n return backend(**backend_kwargs)\r\n File \"/awx_devel/awx/main/credential_plugins/aim.py\", line 73, in aim_backend\r\n webservice_id = kwargs['webservice_id']\r\nKeyError: 'webservice_id'\r\n```\r\n\r\nThe issue is only reproducible if we create a CCP lookup credential using API and we do not provide the `webservice_id` key as the input. If you create CCP lookup with UI - everything works fine. \r\n\r\n### AWX version\r\n\r\ndevel\r\n\r\n### Select the relevant components\r\n\r\n- [ ] UI\r\n- [X] API\r\n- [ ] Docs\r\n- [ ] Collection\r\n- [ ] CLI\r\n- [ ] Other\r\n\r\n### Installation method\r\n\r\ndocker development environment\r\n\r\n### Modifications\r\n\r\nno\r\n\r\n### Ansible version\r\n\r\n_No response_\r\n\r\n### Operating system\r\n\r\n_No response_\r\n\r\n### Web browser\r\n\r\n_No response_\r\n\r\n### Steps to reproduce\r\n\r\n1. Create CyberArk Central Credential Provider Lookup credential. Do not provide the WebService ID value, keep it empty. I used API to create credetnail and the webservice_id was missing in the inputs: \r\n\r\n```\r\ninputs = {\r\n 'url': url,\r\n 'app_id': app_id,\r\n 'client_key': client_key,\r\n 'client_cert': client_cert,\r\n 'verify': verify\r\n}\r\n\r\npayload = factories.credential.payload(\r\n name=fauxfactory.gen_utf8(),\r\n description=fauxfactory.gen_utf8(),\r\n credential_type=cred_type,\r\n inputs=inputs\r\n)\r\n```\r\n\r\n2. Create Machine credential that uses the CCP lookup credential. Set proper Object query. \r\n3. Create Job Template that uses this credential. Run the job. \r\n\r\n\r\n\r\n### Expected results\r\n\r\nThe lookup should use default webservice id: `AIMWebService`\r\n\r\n### Actual results\r\n\r\nException occured. See description. \r\n\r\n\r\n\r\n### Additional information\r\n\r\n_No response_\n", "before_files": [{"content": "from .plugin import CredentialPlugin, CertFiles, raise_for_status\n\nfrom urllib.parse import quote, urlencode, urljoin\n\nfrom django.utils.translation import gettext_lazy as _\nimport requests\n\naim_inputs = {\n 'fields': [\n {\n 'id': 'url',\n 'label': _('CyberArk CCP URL'),\n 'type': 'string',\n 'format': 'url',\n },\n {\n 'id': 'webservice_id',\n 'label': _('Web Service ID'),\n 'type': 'string',\n 'help_text': _('The CCP Web Service ID. Leave blank to default to AIMWebService.'),\n },\n {\n 'id': 'app_id',\n 'label': _('Application ID'),\n 'type': 'string',\n 'secret': True,\n },\n {\n 'id': 'client_key',\n 'label': _('Client Key'),\n 'type': 'string',\n 'secret': True,\n 'multiline': True,\n },\n {\n 'id': 'client_cert',\n 'label': _('Client Certificate'),\n 'type': 'string',\n 'secret': True,\n 'multiline': True,\n },\n {\n 'id': 'verify',\n 'label': _('Verify SSL Certificates'),\n 'type': 'boolean',\n 'default': True,\n },\n ],\n 'metadata': [\n {\n 'id': 'object_query',\n 'label': _('Object Query'),\n 'type': 'string',\n 'help_text': _('Lookup query for the object. Ex: Safe=TestSafe;Object=testAccountName123'),\n },\n {'id': 'object_query_format', 'label': _('Object Query Format'), 'type': 'string', 'default': 'Exact', 'choices': ['Exact', 'Regexp']},\n {\n 'id': 'reason',\n 'label': _('Reason'),\n 'type': 'string',\n 'help_text': _('Object request reason. This is only needed if it is required by the object\\'s policy.'),\n },\n ],\n 'required': ['url', 'app_id', 'object_query'],\n}\n\n\ndef aim_backend(**kwargs):\n url = kwargs['url']\n client_cert = kwargs.get('client_cert', None)\n client_key = kwargs.get('client_key', None)\n verify = kwargs['verify']\n webservice_id = kwargs['webservice_id']\n app_id = kwargs['app_id']\n object_query = kwargs['object_query']\n object_query_format = kwargs['object_query_format']\n reason = kwargs.get('reason', None)\n if webservice_id == '':\n webservice_id = 'AIMWebService'\n\n query_params = {\n 'AppId': app_id,\n 'Query': object_query,\n 'QueryFormat': object_query_format,\n }\n if reason:\n query_params['reason'] = reason\n\n request_qs = '?' + urlencode(query_params, quote_via=quote)\n request_url = urljoin(url, '/'.join([webservice_id, 'api', 'Accounts']))\n\n with CertFiles(client_cert, client_key) as cert:\n res = requests.get(\n request_url + request_qs,\n timeout=30,\n cert=cert,\n verify=verify,\n allow_redirects=False,\n )\n raise_for_status(res)\n return res.json()['Content']\n\n\naim_plugin = CredentialPlugin('CyberArk Central Credential Provider Lookup', inputs=aim_inputs, backend=aim_backend)\n", "path": "awx/main/credential_plugins/aim.py"}], "after_files": [{"content": "from .plugin import CredentialPlugin, CertFiles, raise_for_status\n\nfrom urllib.parse import quote, urlencode, urljoin\n\nfrom django.utils.translation import gettext_lazy as _\nimport requests\n\naim_inputs = {\n 'fields': [\n {\n 'id': 'url',\n 'label': _('CyberArk CCP URL'),\n 'type': 'string',\n 'format': 'url',\n },\n {\n 'id': 'webservice_id',\n 'label': _('Web Service ID'),\n 'type': 'string',\n 'help_text': _('The CCP Web Service ID. Leave blank to default to AIMWebService.'),\n },\n {\n 'id': 'app_id',\n 'label': _('Application ID'),\n 'type': 'string',\n 'secret': True,\n },\n {\n 'id': 'client_key',\n 'label': _('Client Key'),\n 'type': 'string',\n 'secret': True,\n 'multiline': True,\n },\n {\n 'id': 'client_cert',\n 'label': _('Client Certificate'),\n 'type': 'string',\n 'secret': True,\n 'multiline': True,\n },\n {\n 'id': 'verify',\n 'label': _('Verify SSL Certificates'),\n 'type': 'boolean',\n 'default': True,\n },\n ],\n 'metadata': [\n {\n 'id': 'object_query',\n 'label': _('Object Query'),\n 'type': 'string',\n 'help_text': _('Lookup query for the object. Ex: Safe=TestSafe;Object=testAccountName123'),\n },\n {'id': 'object_query_format', 'label': _('Object Query Format'), 'type': 'string', 'default': 'Exact', 'choices': ['Exact', 'Regexp']},\n {\n 'id': 'reason',\n 'label': _('Reason'),\n 'type': 'string',\n 'help_text': _('Object request reason. This is only needed if it is required by the object\\'s policy.'),\n },\n ],\n 'required': ['url', 'app_id', 'object_query'],\n}\n\n\ndef aim_backend(**kwargs):\n url = kwargs['url']\n client_cert = kwargs.get('client_cert', None)\n client_key = kwargs.get('client_key', None)\n verify = kwargs['verify']\n webservice_id = kwargs.get('webservice_id', '')\n app_id = kwargs['app_id']\n object_query = kwargs['object_query']\n object_query_format = kwargs['object_query_format']\n reason = kwargs.get('reason', None)\n if webservice_id == '':\n webservice_id = 'AIMWebService'\n\n query_params = {\n 'AppId': app_id,\n 'Query': object_query,\n 'QueryFormat': object_query_format,\n }\n if reason:\n query_params['reason'] = reason\n\n request_qs = '?' + urlencode(query_params, quote_via=quote)\n request_url = urljoin(url, '/'.join([webservice_id, 'api', 'Accounts']))\n\n with CertFiles(client_cert, client_key) as cert:\n res = requests.get(\n request_url + request_qs,\n timeout=30,\n cert=cert,\n verify=verify,\n allow_redirects=False,\n )\n raise_for_status(res)\n return res.json()['Content']\n\n\naim_plugin = CredentialPlugin('CyberArk Central Credential Provider Lookup', inputs=aim_inputs, backend=aim_backend)\n", "path": "awx/main/credential_plugins/aim.py"}]} | 1,921 | 150 |
gh_patches_debug_2021 | rasdani/github-patches | git_diff | zigpy__zha-device-handlers-112 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ikea group support bind method doesn't return status as expected
https://github.com/dmulcahey/zha-device-handlers/blob/b5b383939944ff541ee38a94c7f4d6cf3edc611f/zhaquirks/ikea/__init__.py#L25
https://github.com/home-assistant/home-assistant/blob/a30c37017b7782473294d7999e85d7a369a0539a/homeassistant/components/zha/core/helpers.py#L56
reported by @Adminiuga
we should return the status in [ ] so the bind helper in HA is happy.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zhaquirks/ikea/__init__.py`
Content:
```
1 """Ikea module."""
2 import logging
3 from zigpy.zcl.clusters.lightlink import LightLink
4 from zigpy.quirks import CustomCluster
5
6 _LOGGER = logging.getLogger(__name__)
7
8
9 class LightLinkCluster(CustomCluster, LightLink):
10 """Ikea LightLink cluster."""
11
12 async def bind(self):
13 """Bind LightLink cluster to coordinator."""
14 application = self._endpoint.device.application
15 try:
16 coordinator = application.get_device(application.ieee)
17 except KeyError:
18 _LOGGER.warning(
19 "Aborting - unable to locate required coordinator device."
20 )
21 return
22 group_list = await self.get_group_identifiers(0)
23 group_record = group_list[2]
24 group_id = group_record[0].group_id
25 await coordinator.add_to_group(group_id)
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zhaquirks/ikea/__init__.py b/zhaquirks/ikea/__init__.py
--- a/zhaquirks/ikea/__init__.py
+++ b/zhaquirks/ikea/__init__.py
@@ -22,4 +22,5 @@
group_list = await self.get_group_identifiers(0)
group_record = group_list[2]
group_id = group_record[0].group_id
- await coordinator.add_to_group(group_id)
+ status = await coordinator.add_to_group(group_id)
+ return [status]
| {"golden_diff": "diff --git a/zhaquirks/ikea/__init__.py b/zhaquirks/ikea/__init__.py\n--- a/zhaquirks/ikea/__init__.py\n+++ b/zhaquirks/ikea/__init__.py\n@@ -22,4 +22,5 @@\n group_list = await self.get_group_identifiers(0)\n group_record = group_list[2]\n group_id = group_record[0].group_id\n- await coordinator.add_to_group(group_id)\n+ status = await coordinator.add_to_group(group_id)\n+ return [status]\n", "issue": "Ikea group support bind method doesn't return status as expected\nhttps://github.com/dmulcahey/zha-device-handlers/blob/b5b383939944ff541ee38a94c7f4d6cf3edc611f/zhaquirks/ikea/__init__.py#L25\r\n\r\nhttps://github.com/home-assistant/home-assistant/blob/a30c37017b7782473294d7999e85d7a369a0539a/homeassistant/components/zha/core/helpers.py#L56\r\n\r\nreported by @Adminiuga \r\n\r\nwe should return the status in [ ] so the bind helper in HA is happy.\n", "before_files": [{"content": "\"\"\"Ikea module.\"\"\"\nimport logging\nfrom zigpy.zcl.clusters.lightlink import LightLink\nfrom zigpy.quirks import CustomCluster\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass LightLinkCluster(CustomCluster, LightLink):\n \"\"\"Ikea LightLink cluster.\"\"\"\n\n async def bind(self):\n \"\"\"Bind LightLink cluster to coordinator.\"\"\"\n application = self._endpoint.device.application\n try:\n coordinator = application.get_device(application.ieee)\n except KeyError:\n _LOGGER.warning(\n \"Aborting - unable to locate required coordinator device.\"\n )\n return\n group_list = await self.get_group_identifiers(0)\n group_record = group_list[2]\n group_id = group_record[0].group_id\n await coordinator.add_to_group(group_id)\n", "path": "zhaquirks/ikea/__init__.py"}], "after_files": [{"content": "\"\"\"Ikea module.\"\"\"\nimport logging\nfrom zigpy.zcl.clusters.lightlink import LightLink\nfrom zigpy.quirks import CustomCluster\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass LightLinkCluster(CustomCluster, LightLink):\n \"\"\"Ikea LightLink cluster.\"\"\"\n\n async def bind(self):\n \"\"\"Bind LightLink cluster to coordinator.\"\"\"\n application = self._endpoint.device.application\n try:\n coordinator = application.get_device(application.ieee)\n except KeyError:\n _LOGGER.warning(\n \"Aborting - unable to locate required coordinator device.\"\n )\n return\n group_list = await self.get_group_identifiers(0)\n group_record = group_list[2]\n group_id = group_record[0].group_id\n status = await coordinator.add_to_group(group_id)\n return [status]\n", "path": "zhaquirks/ikea/__init__.py"}]} | 643 | 130 |
gh_patches_debug_22275 | rasdani/github-patches | git_diff | wemake-services__wemake-python-styleguide-39 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Refactor how version is defined
Currently we just have a legacy `version.py` file with version inside it.
It duplicates the version information from `pyproject.toml`.
That's how it should be: https://github.com/sdispater/poetry/issues/273#issuecomment-401983643
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wemake_python_styleguide/version.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 __version__ = '0.0.2' # noqa
4 # TODO: resolve after https://github.com/sdispater/poetry/issues/273
5
```
Path: `wemake_python_styleguide/checker.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 from ast import Module
4 from typing import Generator, Tuple
5
6 from wemake_python_styleguide.version import __version__
7 from wemake_python_styleguide.visitors.high_complexity import ComplexityVisitor
8 from wemake_python_styleguide.visitors.wrong_function_call import (
9 WrongFunctionCallVisitor,
10 )
11 from wemake_python_styleguide.visitors.wrong_import import WrongImportVisitor
12 from wemake_python_styleguide.visitors.wrong_keyword import (
13 WrongKeywordVisitor,
14 WrongRaiseVisitor,
15 )
16 from wemake_python_styleguide.visitors.wrong_name import (
17 WrongModuleMetadataVisitor,
18 WrongNameVisitor,
19 )
20 from wemake_python_styleguide.visitors.wrong_nested import WrongNestedVisitor
21
22 CheckResult = Tuple[int, int, str, type]
23
24
25 class Checker(object):
26 """
27 Main checker class.
28
29 Runs all possible checks.
30 """
31
32 name = 'wemake-python-styleguide'
33 version = __version__
34
35 def __init__(self, tree: Module, filename: str = '-') -> None:
36 """Creates new checker instance."""
37 self.tree = tree
38 self.filename = filename
39
40 self._visitors = (
41 WrongRaiseVisitor,
42 WrongFunctionCallVisitor,
43 WrongImportVisitor,
44 WrongKeywordVisitor,
45 WrongNestedVisitor,
46 ComplexityVisitor,
47 WrongNameVisitor,
48 WrongModuleMetadataVisitor,
49 )
50
51 def run(self) -> Generator[CheckResult, None, None]:
52 """
53 Runs the checker.
54
55 This method is used by `flake8` API.
56 """
57 for visitor_class in self._visitors:
58 visiter = visitor_class()
59 visiter.visit(self.tree)
60
61 for error in visiter.errors:
62 lineno, col_offset, message = error.node_items()
63 yield lineno, col_offset, message, type(self)
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wemake_python_styleguide/checker.py b/wemake_python_styleguide/checker.py
--- a/wemake_python_styleguide/checker.py
+++ b/wemake_python_styleguide/checker.py
@@ -3,7 +3,7 @@
from ast import Module
from typing import Generator, Tuple
-from wemake_python_styleguide.version import __version__
+from wemake_python_styleguide.version import version
from wemake_python_styleguide.visitors.high_complexity import ComplexityVisitor
from wemake_python_styleguide.visitors.wrong_function_call import (
WrongFunctionCallVisitor,
@@ -30,7 +30,7 @@
"""
name = 'wemake-python-styleguide'
- version = __version__
+ version = version
def __init__(self, tree: Module, filename: str = '-') -> None:
"""Creates new checker instance."""
diff --git a/wemake_python_styleguide/version.py b/wemake_python_styleguide/version.py
--- a/wemake_python_styleguide/version.py
+++ b/wemake_python_styleguide/version.py
@@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
-__version__ = '0.0.2' # noqa
-# TODO: resolve after https://github.com/sdispater/poetry/issues/273
+import pkg_resources
+
+version = pkg_resources.get_distribution('wemake-python-styleguide').version
| {"golden_diff": "diff --git a/wemake_python_styleguide/checker.py b/wemake_python_styleguide/checker.py\n--- a/wemake_python_styleguide/checker.py\n+++ b/wemake_python_styleguide/checker.py\n@@ -3,7 +3,7 @@\n from ast import Module\n from typing import Generator, Tuple\n \n-from wemake_python_styleguide.version import __version__\n+from wemake_python_styleguide.version import version\n from wemake_python_styleguide.visitors.high_complexity import ComplexityVisitor\n from wemake_python_styleguide.visitors.wrong_function_call import (\n WrongFunctionCallVisitor,\n@@ -30,7 +30,7 @@\n \"\"\"\n \n name = 'wemake-python-styleguide'\n- version = __version__\n+ version = version\n \n def __init__(self, tree: Module, filename: str = '-') -> None:\n \"\"\"Creates new checker instance.\"\"\"\ndiff --git a/wemake_python_styleguide/version.py b/wemake_python_styleguide/version.py\n--- a/wemake_python_styleguide/version.py\n+++ b/wemake_python_styleguide/version.py\n@@ -1,4 +1,5 @@\n # -*- coding: utf-8 -*-\n \n-__version__ = '0.0.2' # noqa\n-# TODO: resolve after https://github.com/sdispater/poetry/issues/273\n+import pkg_resources\n+\n+version = pkg_resources.get_distribution('wemake-python-styleguide').version\n", "issue": "Refactor how version is defined\nCurrently we just have a legacy `version.py` file with version inside it.\r\nIt duplicates the version information from `pyproject.toml`.\r\n\r\nThat's how it should be: https://github.com/sdispater/poetry/issues/273#issuecomment-401983643\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n__version__ = '0.0.2' # noqa\n# TODO: resolve after https://github.com/sdispater/poetry/issues/273\n", "path": "wemake_python_styleguide/version.py"}, {"content": "# -*- coding: utf-8 -*-\n\nfrom ast import Module\nfrom typing import Generator, Tuple\n\nfrom wemake_python_styleguide.version import __version__\nfrom wemake_python_styleguide.visitors.high_complexity import ComplexityVisitor\nfrom wemake_python_styleguide.visitors.wrong_function_call import (\n WrongFunctionCallVisitor,\n)\nfrom wemake_python_styleguide.visitors.wrong_import import WrongImportVisitor\nfrom wemake_python_styleguide.visitors.wrong_keyword import (\n WrongKeywordVisitor,\n WrongRaiseVisitor,\n)\nfrom wemake_python_styleguide.visitors.wrong_name import (\n WrongModuleMetadataVisitor,\n WrongNameVisitor,\n)\nfrom wemake_python_styleguide.visitors.wrong_nested import WrongNestedVisitor\n\nCheckResult = Tuple[int, int, str, type]\n\n\nclass Checker(object):\n \"\"\"\n Main checker class.\n\n Runs all possible checks.\n \"\"\"\n\n name = 'wemake-python-styleguide'\n version = __version__\n\n def __init__(self, tree: Module, filename: str = '-') -> None:\n \"\"\"Creates new checker instance.\"\"\"\n self.tree = tree\n self.filename = filename\n\n self._visitors = (\n WrongRaiseVisitor,\n WrongFunctionCallVisitor,\n WrongImportVisitor,\n WrongKeywordVisitor,\n WrongNestedVisitor,\n ComplexityVisitor,\n WrongNameVisitor,\n WrongModuleMetadataVisitor,\n )\n\n def run(self) -> Generator[CheckResult, None, None]:\n \"\"\"\n Runs the checker.\n\n This method is used by `flake8` API.\n \"\"\"\n for visitor_class in self._visitors:\n visiter = visitor_class()\n visiter.visit(self.tree)\n\n for error in visiter.errors:\n lineno, col_offset, message = error.node_items()\n yield lineno, col_offset, message, type(self)\n", "path": "wemake_python_styleguide/checker.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport pkg_resources\n\nversion = pkg_resources.get_distribution('wemake-python-styleguide').version\n", "path": "wemake_python_styleguide/version.py"}, {"content": "# -*- coding: utf-8 -*-\n\nfrom ast import Module\nfrom typing import Generator, Tuple\n\nfrom wemake_python_styleguide.version import version\nfrom wemake_python_styleguide.visitors.high_complexity import ComplexityVisitor\nfrom wemake_python_styleguide.visitors.wrong_function_call import (\n WrongFunctionCallVisitor,\n)\nfrom wemake_python_styleguide.visitors.wrong_import import WrongImportVisitor\nfrom wemake_python_styleguide.visitors.wrong_keyword import (\n WrongKeywordVisitor,\n WrongRaiseVisitor,\n)\nfrom wemake_python_styleguide.visitors.wrong_name import (\n WrongModuleMetadataVisitor,\n WrongNameVisitor,\n)\nfrom wemake_python_styleguide.visitors.wrong_nested import WrongNestedVisitor\n\nCheckResult = Tuple[int, int, str, type]\n\n\nclass Checker(object):\n \"\"\"\n Main checker class.\n\n Runs all possible checks.\n \"\"\"\n\n name = 'wemake-python-styleguide'\n version = version\n\n def __init__(self, tree: Module, filename: str = '-') -> None:\n \"\"\"Creates new checker instance.\"\"\"\n self.tree = tree\n self.filename = filename\n\n self._visitors = (\n WrongRaiseVisitor,\n WrongFunctionCallVisitor,\n WrongImportVisitor,\n WrongKeywordVisitor,\n WrongNestedVisitor,\n ComplexityVisitor,\n WrongNameVisitor,\n WrongModuleMetadataVisitor,\n )\n\n def run(self) -> Generator[CheckResult, None, None]:\n \"\"\"\n Runs the checker.\n\n This method is used by `flake8` API.\n \"\"\"\n for visitor_class in self._visitors:\n visiter = visitor_class()\n visiter.visit(self.tree)\n\n for error in visiter.errors:\n lineno, col_offset, message = error.node_items()\n yield lineno, col_offset, message, type(self)\n", "path": "wemake_python_styleguide/checker.py"}]} | 918 | 320 |
gh_patches_debug_4756 | rasdani/github-patches | git_diff | keras-team__keras-core-579 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Using torch backend
Using PyTorch backend.
Epoch 1/3
32/32 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - mean_absolute_error: 0.4083 - loss: 0.2566
Epoch 2/3
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - mean_absolute_error: 0.3805 - loss: 0.2151
Epoch 3/3
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - mean_absolute_error: 0.3704 - loss: 0.2056
Epoch 1/5
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.2699 - mae: 0.4200
Epoch 2/5
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.2409 - mae: 0.3940
Epoch 3/5
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.2271 - mae: 0.3856
Epoch 4/5
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.2174 - mae: 0.3785
Epoch 5/5
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.2120 - mae: 0.3699
Epoch 1/3
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - mean_absolute_error: 0.7020 - loss: 0.3334
Epoch 2/3
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - mean_absolute_error: 0.4075 - loss: 0.1271
Epoch 3/3
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - mean_absolute_error: 0.3776 - loss: 0.1010
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - mean_absolute_error: 0.8608 - loss: 0.9672
Traceback (most recent call last):
File "E:\custom_train_step_in_torch.py", line 483, in <module>
gan.fit(dataloader, epochs=1)
File "C:\Python_310\lib\site-packages\keras_core\src\utils\traceback_utils.py", line 123, in error_handler
raise e.with_traceback(filtered_tb) from None
File "C:\Python_310\lib\site-packages\keras_core\src\utils\module_utils.py", line 26, in initialize
raise ImportError(
ImportError: This requires the tensorflow module. You can install it via `pip install tensorflow`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `keras_core/utils/module_utils.py`
Content:
```
1 import importlib
2
3
4 class LazyModule:
5 def __init__(self, name, pip_name=None):
6 self.name = name
7 pip_name = pip_name or name
8 self.pip_name = pip_name
9 self.module = None
10 self._available = None
11
12 @property
13 def available(self):
14 if self._available is None:
15 try:
16 self.initialize()
17 except ImportError:
18 self._available = False
19 self._available = True
20 return self._available
21
22 def initialize(self):
23 try:
24 self.module = importlib.import_module(self.name)
25 except ImportError:
26 raise ImportError(
27 f"This requires the {self.name} module. "
28 f"You can install it via `pip install {self.pip_name}`"
29 )
30
31 def __getattr__(self, name):
32 if self.module is None:
33 self.initialize()
34 return getattr(self.module, name)
35
36
37 tensorflow = LazyModule("tensorflow")
38 gfile = LazyModule("tensorflow.io.gfile")
39
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/keras_core/utils/module_utils.py b/keras_core/utils/module_utils.py
--- a/keras_core/utils/module_utils.py
+++ b/keras_core/utils/module_utils.py
@@ -14,9 +14,9 @@
if self._available is None:
try:
self.initialize()
+ self._available = True
except ImportError:
self._available = False
- self._available = True
return self._available
def initialize(self):
| {"golden_diff": "diff --git a/keras_core/utils/module_utils.py b/keras_core/utils/module_utils.py\n--- a/keras_core/utils/module_utils.py\n+++ b/keras_core/utils/module_utils.py\n@@ -14,9 +14,9 @@\n if self._available is None:\n try:\n self.initialize()\n+ self._available = True\n except ImportError:\n self._available = False\n- self._available = True\n return self._available\n \n def initialize(self):\n", "issue": "Using torch backend\nUsing PyTorch backend.\r\nEpoch 1/3\r\n32/32 \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 1s 4ms/step - mean_absolute_error: 0.4083 - loss: 0.2566\r\nEpoch 2/3\r\n32/32 \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 0s 4ms/step - mean_absolute_error: 0.3805 - loss: 0.2151\r\nEpoch 3/3\r\n32/32 \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 0s 4ms/step - mean_absolute_error: 0.3704 - loss: 0.2056\r\nEpoch 1/5\r\n32/32 \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 0s 4ms/step - loss: 0.2699 - mae: 0.4200\r\nEpoch 2/5\r\n32/32 \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 0s 4ms/step - loss: 0.2409 - mae: 0.3940\r\nEpoch 3/5\r\n32/32 \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 0s 4ms/step - loss: 0.2271 - mae: 0.3856\r\nEpoch 4/5\r\n32/32 \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 0s 4ms/step - loss: 0.2174 - mae: 0.3785\r\nEpoch 5/5\r\n32/32 \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 0s 4ms/step - loss: 0.2120 - mae: 0.3699\r\nEpoch 1/3\r\n32/32 \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 0s 4ms/step - mean_absolute_error: 0.7020 - loss: 0.3334\r\nEpoch 2/3\r\n32/32 \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 0s 4ms/step - mean_absolute_error: 0.4075 - loss: 0.1271\r\nEpoch 3/3\r\n32/32 \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 0s 4ms/step - mean_absolute_error: 0.3776 - loss: 0.1010\r\n32/32 \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 0s 2ms/step - mean_absolute_error: 0.8608 - loss: 0.9672\r\nTraceback (most recent call last):\r\n File \"E:\\custom_train_step_in_torch.py\", line 483, in <module>\r\n gan.fit(dataloader, epochs=1)\r\n File \"C:\\Python_310\\lib\\site-packages\\keras_core\\src\\utils\\traceback_utils.py\", line 123, in error_handler\r\n raise e.with_traceback(filtered_tb) from None\r\n File \"C:\\Python_310\\lib\\site-packages\\keras_core\\src\\utils\\module_utils.py\", line 26, in initialize\r\n raise ImportError(\r\nImportError: This requires the tensorflow module. You can install it via `pip install tensorflow`\n", "before_files": [{"content": "import importlib\n\n\nclass LazyModule:\n def __init__(self, name, pip_name=None):\n self.name = name\n pip_name = pip_name or name\n self.pip_name = pip_name\n self.module = None\n self._available = None\n\n @property\n def available(self):\n if self._available is None:\n try:\n self.initialize()\n except ImportError:\n self._available = False\n self._available = True\n return self._available\n\n def initialize(self):\n try:\n self.module = importlib.import_module(self.name)\n except ImportError:\n raise ImportError(\n f\"This requires the {self.name} module. \"\n f\"You can install it via `pip install {self.pip_name}`\"\n )\n\n def __getattr__(self, name):\n if self.module is None:\n self.initialize()\n return getattr(self.module, name)\n\n\ntensorflow = LazyModule(\"tensorflow\")\ngfile = LazyModule(\"tensorflow.io.gfile\")\n", "path": "keras_core/utils/module_utils.py"}], "after_files": [{"content": "import importlib\n\n\nclass LazyModule:\n def __init__(self, name, pip_name=None):\n self.name = name\n pip_name = pip_name or name\n self.pip_name = pip_name\n self.module = None\n self._available = None\n\n @property\n def available(self):\n if self._available is None:\n try:\n self.initialize()\n self._available = True\n except ImportError:\n self._available = False\n return self._available\n\n def initialize(self):\n try:\n self.module = importlib.import_module(self.name)\n except ImportError:\n raise ImportError(\n f\"This requires the {self.name} module. \"\n f\"You can install it via `pip install {self.pip_name}`\"\n )\n\n def __getattr__(self, name):\n if self.module is None:\n self.initialize()\n return getattr(self.module, name)\n\n\ntensorflow = LazyModule(\"tensorflow\")\ngfile = LazyModule(\"tensorflow.io.gfile\")\n", "path": "keras_core/utils/module_utils.py"}]} | 1,345 | 109 |
gh_patches_debug_25289 | rasdani/github-patches | git_diff | easybuilders__easybuild-framework-4551 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Environment variable change in module cray-libsci of CPE 23.12
Hi, I report a bug affecting EasyBuild on Cray systems (file [libsci.py](https://github.com/easybuilders/easybuild-framework/blob/develop/easybuild/toolchains/linalg/libsci.py)) with the Cray Programming Environment (CPE) 23.12. The bug should be fixed in CPE 24.03 according to HPE/Cray staff, therefore the impact is limited:
- The environment variable name referenced in [line 68](https://github.com/easybuilders/easybuild-framework/blob/e4524c1c70e496e5886de7d4848bb8147eea84bd/easybuild/toolchains/linalg/libsci.py#L68) changed from `CRAY_LIBSCI_PREFIX_DIR` to `CRAY_PE_LIBSCI_PREFIX_DIR`
- I have manually fixed [line 69](https://github.com/easybuilders/easybuild-framework/blob/e4524c1c70e496e5886de7d4848bb8147eea84bd/easybuild/toolchains/linalg/libsci.py#L69) using the workaround below:
`root = os.getenv('CRAY_LIBSCI_PREFIX_DIR', None) or os.getenv('CRAY_PE_LIBSCI_PREFIX_DIR', None)`
The environment variable name should be fixed back to the original one in CPE 24.03 (I did not have the chance to test it yet, though). Since CPE variable names change sometimes, it might be useful to give the option to read the `prefix` of the external module `cray-libsci` from a [metadata file](https://docs.easybuild.io/using-external-modules/?h=metadata#using_external_modules_metadata) instead of having it hard coded.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `easybuild/toolchains/linalg/libsci.py`
Content:
```
1 ##
2 # Copyright 2014-2024 Ghent University
3 #
4 # This file is part of EasyBuild,
5 # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
6 # with support of Ghent University (http://ugent.be/hpc),
7 # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
8 # Flemish Research Foundation (FWO) (http://www.fwo.be/en)
9 # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
10 #
11 # https://github.com/easybuilders/easybuild
12 #
13 # EasyBuild is free software: you can redistribute it and/or modify
14 # it under the terms of the GNU General Public License as published by
15 # the Free Software Foundation v2.
16 #
17 # EasyBuild is distributed in the hope that it will be useful,
18 # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 # GNU General Public License for more details.
21 #
22 # You should have received a copy of the GNU General Public License
23 # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
24 ##
25 """
26 Support for Cray's LibSci library, which provides BLAS/LAPACK support.
27 cfr. https://www.nersc.gov/users/software/programming-libraries/math-libraries/libsci/
28
29 Authors:
30
31 * Petar Forai (IMP/IMBA, Austria)
32 * Kenneth Hoste (Ghent University)
33 """
34 import os
35
36 from easybuild.tools.build_log import EasyBuildError
37 from easybuild.tools.toolchain.linalg import LinAlg
38
39
40 CRAY_LIBSCI_MODULE_NAME = 'cray-libsci'
41 TC_CONSTANT_CRAY_LIBSCI = 'CrayLibSci'
42
43
44 class LibSci(LinAlg):
45 """Support for Cray's LibSci library, which provides BLAS/LAPACK support."""
46 # BLAS/LAPACK support
47 # via cray-libsci module, which gets loaded via the PrgEnv module
48 # see https://www.nersc.gov/users/software/programming-libraries/math-libraries/libsci/
49 BLAS_MODULE_NAME = [CRAY_LIBSCI_MODULE_NAME]
50
51 # no need to specify libraries, compiler driver takes care of linking the right libraries
52 # FIXME: need to revisit this, on numpy we ended up with a serial BLAS through the wrapper.
53 BLAS_LIB = ['']
54 BLAS_LIB_MT = ['']
55 BLAS_FAMILY = TC_CONSTANT_CRAY_LIBSCI
56
57 LAPACK_MODULE_NAME = [CRAY_LIBSCI_MODULE_NAME]
58 LAPACK_IS_BLAS = True
59 LAPACK_FAMILY = TC_CONSTANT_CRAY_LIBSCI
60
61 BLACS_MODULE_NAME = []
62 SCALAPACK_MODULE_NAME = []
63
64 def _get_software_root(self, name, required=True):
65 """Get install prefix for specified software name; special treatment for Cray modules."""
66 if name == 'cray-libsci':
67 # Cray-provided LibSci module
68 env_var = 'CRAY_LIBSCI_PREFIX_DIR'
69 root = os.getenv(env_var, None)
70 if root is None:
71 if required:
72 raise EasyBuildError("Failed to determine install prefix for %s via $%s", name, env_var)
73 else:
74 self.log.debug("Obtained install prefix for %s via $%s: %s", name, env_var, root)
75 else:
76 root = super(LibSci, self)._get_software_root(name, required=required)
77
78 return root
79
80 def _set_blacs_variables(self):
81 """Skip setting BLACS related variables"""
82 pass
83
84 def _set_scalapack_variables(self):
85 """Skip setting ScaLAPACK related variables"""
86 pass
87
88 def definition(self):
89 """
90 Filter BLAS module from toolchain definition.
91 The cray-libsci module is loaded indirectly (and versionless) via the PrgEnv module,
92 and thus is not a direct toolchain component.
93 """
94 tc_def = super(LibSci, self).definition()
95 tc_def['BLAS'] = []
96 tc_def['LAPACK'] = []
97 return tc_def
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/easybuild/toolchains/linalg/libsci.py b/easybuild/toolchains/linalg/libsci.py
--- a/easybuild/toolchains/linalg/libsci.py
+++ b/easybuild/toolchains/linalg/libsci.py
@@ -65,13 +65,20 @@
"""Get install prefix for specified software name; special treatment for Cray modules."""
if name == 'cray-libsci':
# Cray-provided LibSci module
- env_var = 'CRAY_LIBSCI_PREFIX_DIR'
- root = os.getenv(env_var, None)
+ root = None
+ # consider both $CRAY_LIBSCI_PREFIX_DIR and $CRAY_PE_LIBSCI_PREFIX_DIR,
+ # cfr. https://github.com/easybuilders/easybuild-framework/issues/4536
+ env_vars = ('CRAY_LIBSCI_PREFIX_DIR', 'CRAY_PE_LIBSCI_PREFIX_DIR')
+ for env_var in env_vars:
+ root = os.getenv(env_var, None)
+ if root is not None:
+ self.log.debug("Obtained install prefix for %s via $%s: %s", name, env_var, root)
+ break
+
if root is None:
if required:
- raise EasyBuildError("Failed to determine install prefix for %s via $%s", name, env_var)
- else:
- self.log.debug("Obtained install prefix for %s via $%s: %s", name, env_var, root)
+ env_vars_str = ', '.join('$' + e for e in env_vars)
+ raise EasyBuildError("Failed to determine install prefix for %s via $%s", name, env_vars_str)
else:
root = super(LibSci, self)._get_software_root(name, required=required)
| {"golden_diff": "diff --git a/easybuild/toolchains/linalg/libsci.py b/easybuild/toolchains/linalg/libsci.py\n--- a/easybuild/toolchains/linalg/libsci.py\n+++ b/easybuild/toolchains/linalg/libsci.py\n@@ -65,13 +65,20 @@\n \"\"\"Get install prefix for specified software name; special treatment for Cray modules.\"\"\"\n if name == 'cray-libsci':\n # Cray-provided LibSci module\n- env_var = 'CRAY_LIBSCI_PREFIX_DIR'\n- root = os.getenv(env_var, None)\n+ root = None\n+ # consider both $CRAY_LIBSCI_PREFIX_DIR and $CRAY_PE_LIBSCI_PREFIX_DIR,\n+ # cfr. https://github.com/easybuilders/easybuild-framework/issues/4536\n+ env_vars = ('CRAY_LIBSCI_PREFIX_DIR', 'CRAY_PE_LIBSCI_PREFIX_DIR')\n+ for env_var in env_vars:\n+ root = os.getenv(env_var, None)\n+ if root is not None:\n+ self.log.debug(\"Obtained install prefix for %s via $%s: %s\", name, env_var, root)\n+ break\n+\n if root is None:\n if required:\n- raise EasyBuildError(\"Failed to determine install prefix for %s via $%s\", name, env_var)\n- else:\n- self.log.debug(\"Obtained install prefix for %s via $%s: %s\", name, env_var, root)\n+ env_vars_str = ', '.join('$' + e for e in env_vars)\n+ raise EasyBuildError(\"Failed to determine install prefix for %s via $%s\", name, env_vars_str)\n else:\n root = super(LibSci, self)._get_software_root(name, required=required)\n", "issue": "Environment variable change in module cray-libsci of CPE 23.12\nHi, I report a bug affecting EasyBuild on Cray systems (file [libsci.py](https://github.com/easybuilders/easybuild-framework/blob/develop/easybuild/toolchains/linalg/libsci.py)) with the Cray Programming Environment (CPE) 23.12. The bug should be fixed in CPE 24.03 according to HPE/Cray staff, therefore the impact is limited:\r\n- The environment variable name referenced in [line 68](https://github.com/easybuilders/easybuild-framework/blob/e4524c1c70e496e5886de7d4848bb8147eea84bd/easybuild/toolchains/linalg/libsci.py#L68) changed from `CRAY_LIBSCI_PREFIX_DIR` to `CRAY_PE_LIBSCI_PREFIX_DIR`\r\n- I have manually fixed [line 69](https://github.com/easybuilders/easybuild-framework/blob/e4524c1c70e496e5886de7d4848bb8147eea84bd/easybuild/toolchains/linalg/libsci.py#L69) using the workaround below:\r\n `root = os.getenv('CRAY_LIBSCI_PREFIX_DIR', None) or os.getenv('CRAY_PE_LIBSCI_PREFIX_DIR', None)`\r\n\r\nThe environment variable name should be fixed back to the original one in CPE 24.03 (I did not have the chance to test it yet, though). Since CPE variable names change sometimes, it might be useful to give the option to read the `prefix` of the external module `cray-libsci` from a [metadata file](https://docs.easybuild.io/using-external-modules/?h=metadata#using_external_modules_metadata) instead of having it hard coded.\r\n\n", "before_files": [{"content": "##\n# Copyright 2014-2024 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nSupport for Cray's LibSci library, which provides BLAS/LAPACK support.\ncfr. https://www.nersc.gov/users/software/programming-libraries/math-libraries/libsci/\n\nAuthors:\n\n* Petar Forai (IMP/IMBA, Austria)\n* Kenneth Hoste (Ghent University)\n\"\"\"\nimport os\n\nfrom easybuild.tools.build_log import EasyBuildError\nfrom easybuild.tools.toolchain.linalg import LinAlg\n\n\nCRAY_LIBSCI_MODULE_NAME = 'cray-libsci'\nTC_CONSTANT_CRAY_LIBSCI = 'CrayLibSci'\n\n\nclass LibSci(LinAlg):\n \"\"\"Support for Cray's LibSci library, which provides BLAS/LAPACK support.\"\"\"\n # BLAS/LAPACK support\n # via cray-libsci module, which gets loaded via the PrgEnv module\n # see https://www.nersc.gov/users/software/programming-libraries/math-libraries/libsci/\n BLAS_MODULE_NAME = [CRAY_LIBSCI_MODULE_NAME]\n\n # no need to specify libraries, compiler driver takes care of linking the right libraries\n # FIXME: need to revisit this, on numpy we ended up with a serial BLAS through the wrapper.\n BLAS_LIB = ['']\n BLAS_LIB_MT = ['']\n BLAS_FAMILY = TC_CONSTANT_CRAY_LIBSCI\n\n LAPACK_MODULE_NAME = [CRAY_LIBSCI_MODULE_NAME]\n LAPACK_IS_BLAS = True\n LAPACK_FAMILY = TC_CONSTANT_CRAY_LIBSCI\n\n BLACS_MODULE_NAME = []\n SCALAPACK_MODULE_NAME = []\n\n def _get_software_root(self, name, required=True):\n \"\"\"Get install prefix for specified software name; special treatment for Cray modules.\"\"\"\n if name == 'cray-libsci':\n # Cray-provided LibSci module\n env_var = 'CRAY_LIBSCI_PREFIX_DIR'\n root = os.getenv(env_var, None)\n if root is None:\n if required:\n raise EasyBuildError(\"Failed to determine install prefix for %s via $%s\", name, env_var)\n else:\n self.log.debug(\"Obtained install prefix for %s via $%s: %s\", name, env_var, root)\n else:\n root = super(LibSci, self)._get_software_root(name, required=required)\n\n return root\n\n def _set_blacs_variables(self):\n \"\"\"Skip setting BLACS related variables\"\"\"\n pass\n\n def _set_scalapack_variables(self):\n \"\"\"Skip setting ScaLAPACK related variables\"\"\"\n pass\n\n def definition(self):\n \"\"\"\n Filter BLAS module from toolchain definition.\n The cray-libsci module is loaded indirectly (and versionless) via the PrgEnv module,\n and thus is not a direct toolchain component.\n \"\"\"\n tc_def = super(LibSci, self).definition()\n tc_def['BLAS'] = []\n tc_def['LAPACK'] = []\n return tc_def\n", "path": "easybuild/toolchains/linalg/libsci.py"}], "after_files": [{"content": "##\n# Copyright 2014-2024 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nSupport for Cray's LibSci library, which provides BLAS/LAPACK support.\ncfr. https://www.nersc.gov/users/software/programming-libraries/math-libraries/libsci/\n\nAuthors:\n\n* Petar Forai (IMP/IMBA, Austria)\n* Kenneth Hoste (Ghent University)\n\"\"\"\nimport os\n\nfrom easybuild.tools.build_log import EasyBuildError\nfrom easybuild.tools.toolchain.linalg import LinAlg\n\n\nCRAY_LIBSCI_MODULE_NAME = 'cray-libsci'\nTC_CONSTANT_CRAY_LIBSCI = 'CrayLibSci'\n\n\nclass LibSci(LinAlg):\n \"\"\"Support for Cray's LibSci library, which provides BLAS/LAPACK support.\"\"\"\n # BLAS/LAPACK support\n # via cray-libsci module, which gets loaded via the PrgEnv module\n # see https://www.nersc.gov/users/software/programming-libraries/math-libraries/libsci/\n BLAS_MODULE_NAME = [CRAY_LIBSCI_MODULE_NAME]\n\n # no need to specify libraries, compiler driver takes care of linking the right libraries\n # FIXME: need to revisit this, on numpy we ended up with a serial BLAS through the wrapper.\n BLAS_LIB = ['']\n BLAS_LIB_MT = ['']\n BLAS_FAMILY = TC_CONSTANT_CRAY_LIBSCI\n\n LAPACK_MODULE_NAME = [CRAY_LIBSCI_MODULE_NAME]\n LAPACK_IS_BLAS = True\n LAPACK_FAMILY = TC_CONSTANT_CRAY_LIBSCI\n\n BLACS_MODULE_NAME = []\n SCALAPACK_MODULE_NAME = []\n\n def _get_software_root(self, name, required=True):\n \"\"\"Get install prefix for specified software name; special treatment for Cray modules.\"\"\"\n if name == 'cray-libsci':\n # Cray-provided LibSci module\n root = None\n # consider both $CRAY_LIBSCI_PREFIX_DIR and $CRAY_PE_LIBSCI_PREFIX_DIR,\n # cfr. https://github.com/easybuilders/easybuild-framework/issues/4536\n env_vars = ('CRAY_LIBSCI_PREFIX_DIR', 'CRAY_PE_LIBSCI_PREFIX_DIR')\n for env_var in env_vars:\n root = os.getenv(env_var, None)\n if root is not None:\n self.log.debug(\"Obtained install prefix for %s via $%s: %s\", name, env_var, root)\n break\n\n if root is None:\n if required:\n env_vars_str = ', '.join('$' + e for e in env_vars)\n raise EasyBuildError(\"Failed to determine install prefix for %s via $%s\", name, env_vars_str)\n else:\n root = super(LibSci, self)._get_software_root(name, required=required)\n\n return root\n\n def _set_blacs_variables(self):\n \"\"\"Skip setting BLACS related variables\"\"\"\n pass\n\n def _set_scalapack_variables(self):\n \"\"\"Skip setting ScaLAPACK related variables\"\"\"\n pass\n\n def definition(self):\n \"\"\"\n Filter BLAS module from toolchain definition.\n The cray-libsci module is loaded indirectly (and versionless) via the PrgEnv module,\n and thus is not a direct toolchain component.\n \"\"\"\n tc_def = super(LibSci, self).definition()\n tc_def['BLAS'] = []\n tc_def['LAPACK'] = []\n return tc_def\n", "path": "easybuild/toolchains/linalg/libsci.py"}]} | 1,774 | 399 |
gh_patches_debug_1251 | rasdani/github-patches | git_diff | chainer__chainer-987 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix the shape of return value of F.det
Currently, return value of `det` is `xp.array` whose shape is `(1, )`, not a scalar.
```
In [16]: a = chainer.Variable(numpy.random.uniform(-1, 1, (3, 3)).astype(numpy.float32))
In [17]: chainer.functions.det(a).data
Out[17]: array([-0.80874199], dtype=float32)
```
But the document says the return value should be `chainer.Variable` whose data have the shape `()`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/functions/math/det.py`
Content:
```
1 import numpy
2
3 from chainer import cuda
4 from chainer import function
5 from chainer.functions.array import reshape
6 from chainer.functions.math import inv
7 from chainer.functions.math import matmul
8 from chainer import utils
9 from chainer.utils import type_check
10
11
12 def _det_gpu(b):
13 # We do a batched LU decomposition on the GPU to compute
14 # and compute the determinant by multiplying the diagonal.
15 # Change the shape of the array to be size=1 minibatch if necessary.
16 # Also copy the matrix as the elments will be modified in-place.
17 a = matmul._as_batch_mat(b).copy()
18 n = a.shape[1]
19 n_matrices = len(a)
20 # Pivot array
21 p = cuda.cupy.zeros((n_matrices, n), dtype='int32')
22 # Output array
23 # These arrays hold information on the execution success
24 # or if the matrix was singular.
25 info1 = cuda.cupy.zeros(n_matrices, dtype=numpy.intp)
26 ap = matmul._mat_ptrs(a)
27 _, lda = matmul._get_ld(a)
28 cuda.cublas.sgetrfBatched(cuda.Device().cublas_handle, n, ap.data.ptr, lda,
29 p.data.ptr, info1.data.ptr, n_matrices)
30 det = cuda.cupy.prod(a.diagonal(axis1=1, axis2=2), axis=1)
31 # The determinant is equal to the product of the diagonal entries
32 # of `a` where the sign of `a` is flipped depending on whether
33 # the pivot array is equal to its index.
34 rng = cuda.cupy.arange(1, n + 1, dtype='int32')
35 parity = cuda.cupy.sum(p != rng, axis=1) % 2
36 sign = 1. - 2. * parity.astype('float32')
37 success = cuda.cupy.all(info1 == 0)
38 return det * sign, success
39
40
41 class BatchDet(function.Function):
42
43 @property
44 def label(self):
45 return 'det'
46
47 def check_type_forward(self, in_types):
48 type_check.expect(in_types.size() == 1)
49 a_type, = in_types
50 a_type = matmul._convert_type(a_type)
51 type_check.expect(a_type.dtype.kind == 'f')
52 # Only a minibatch of 2D array shapes allowed.
53 type_check.expect(a_type.ndim == 3)
54 # Matrix inversion only allowed for square matrices
55 # so assert the last two dimensions are equal.
56 type_check.expect(a_type.shape[-1] == a_type.shape[-2])
57
58 def forward_cpu(self, x):
59 self.detx = utils.force_array(numpy.linalg.det(x[0]))
60 return self.detx,
61
62 def forward_gpu(self, x):
63 self.detx, success = _det_gpu(x[0])
64 if not success:
65 raise ValueError('Singular Matrix')
66 return self.detx,
67
68 def backward_cpu(self, x, gy):
69 x, = x
70 gy, = gy
71 grad = (gy[:, None, None] * self.detx[:, None, None] *
72 numpy.linalg.inv(x.transpose((0, 2, 1))))
73 return utils.force_array(grad),
74
75 def backward_gpu(self, x, gy):
76 x, = x
77 gy, = gy
78 grad = (gy[:, None, None] * self.detx[:, None, None] *
79 inv._inv_gpu(x.transpose((0, 2, 1))))
80 return utils.force_array(grad),
81
82
83 def batch_det(a):
84 """Computes the determinant of a batch of square matrices.
85
86 Args:
87 a (Variable): Input array to compute the determinant for.
88 The first dimension should iterate over each matrix and be
89 of the batchsize.
90
91 Returns:
92 ~chainer.Variable: vector of determinants for every matrix
93 in the batch.
94
95 """
96 return BatchDet()(a)
97
98
99 def det(a):
100 """Computes the determinant of a single square matrix.
101
102 Args:
103 a (Variable): Input array to compute the determinant for.
104
105 Returns:
106 ~chainer.Variable: Scalar determinant of the matrix a.
107
108 """
109 shape = (1, len(a.data), a.data.shape[1])
110 batched_a = reshape.Reshape(shape)(a)
111 batched_det = BatchDet()(batched_a)
112 return reshape.Reshape((1, ))(batched_det)
113
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/chainer/functions/math/det.py b/chainer/functions/math/det.py
--- a/chainer/functions/math/det.py
+++ b/chainer/functions/math/det.py
@@ -109,4 +109,4 @@
shape = (1, len(a.data), a.data.shape[1])
batched_a = reshape.Reshape(shape)(a)
batched_det = BatchDet()(batched_a)
- return reshape.Reshape((1, ))(batched_det)
+ return reshape.Reshape(())(batched_det)
| {"golden_diff": "diff --git a/chainer/functions/math/det.py b/chainer/functions/math/det.py\n--- a/chainer/functions/math/det.py\n+++ b/chainer/functions/math/det.py\n@@ -109,4 +109,4 @@\n shape = (1, len(a.data), a.data.shape[1])\n batched_a = reshape.Reshape(shape)(a)\n batched_det = BatchDet()(batched_a)\n- return reshape.Reshape((1, ))(batched_det)\n+ return reshape.Reshape(())(batched_det)\n", "issue": "Fix the shape of return value of F.det\nCurrently, return value of `det` is `xp.array` whose shape is `(1, )`, not a scalar.\n\n```\nIn [16]: a = chainer.Variable(numpy.random.uniform(-1, 1, (3, 3)).astype(numpy.float32))\nIn [17]: chainer.functions.det(a).data\nOut[17]: array([-0.80874199], dtype=float32)\n```\n\nBut the document says the return value should be `chainer.Variable` whose data have the shape `()`.\n\n", "before_files": [{"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.functions.array import reshape\nfrom chainer.functions.math import inv\nfrom chainer.functions.math import matmul\nfrom chainer import utils\nfrom chainer.utils import type_check\n\n\ndef _det_gpu(b):\n # We do a batched LU decomposition on the GPU to compute\n # and compute the determinant by multiplying the diagonal.\n # Change the shape of the array to be size=1 minibatch if necessary.\n # Also copy the matrix as the elments will be modified in-place.\n a = matmul._as_batch_mat(b).copy()\n n = a.shape[1]\n n_matrices = len(a)\n # Pivot array\n p = cuda.cupy.zeros((n_matrices, n), dtype='int32')\n # Output array\n # These arrays hold information on the execution success\n # or if the matrix was singular.\n info1 = cuda.cupy.zeros(n_matrices, dtype=numpy.intp)\n ap = matmul._mat_ptrs(a)\n _, lda = matmul._get_ld(a)\n cuda.cublas.sgetrfBatched(cuda.Device().cublas_handle, n, ap.data.ptr, lda,\n p.data.ptr, info1.data.ptr, n_matrices)\n det = cuda.cupy.prod(a.diagonal(axis1=1, axis2=2), axis=1)\n # The determinant is equal to the product of the diagonal entries\n # of `a` where the sign of `a` is flipped depending on whether\n # the pivot array is equal to its index.\n rng = cuda.cupy.arange(1, n + 1, dtype='int32')\n parity = cuda.cupy.sum(p != rng, axis=1) % 2\n sign = 1. - 2. * parity.astype('float32')\n success = cuda.cupy.all(info1 == 0)\n return det * sign, success\n\n\nclass BatchDet(function.Function):\n\n @property\n def label(self):\n return 'det'\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n a_type, = in_types\n a_type = matmul._convert_type(a_type)\n type_check.expect(a_type.dtype.kind == 'f')\n # Only a minibatch of 2D array shapes allowed.\n type_check.expect(a_type.ndim == 3)\n # Matrix inversion only allowed for square matrices\n # so assert the last two dimensions are equal.\n type_check.expect(a_type.shape[-1] == a_type.shape[-2])\n\n def forward_cpu(self, x):\n self.detx = utils.force_array(numpy.linalg.det(x[0]))\n return self.detx,\n\n def forward_gpu(self, x):\n self.detx, success = _det_gpu(x[0])\n if not success:\n raise ValueError('Singular Matrix')\n return self.detx,\n\n def backward_cpu(self, x, gy):\n x, = x\n gy, = gy\n grad = (gy[:, None, None] * self.detx[:, None, None] *\n numpy.linalg.inv(x.transpose((0, 2, 1))))\n return utils.force_array(grad),\n\n def backward_gpu(self, x, gy):\n x, = x\n gy, = gy\n grad = (gy[:, None, None] * self.detx[:, None, None] *\n inv._inv_gpu(x.transpose((0, 2, 1))))\n return utils.force_array(grad),\n\n\ndef batch_det(a):\n \"\"\"Computes the determinant of a batch of square matrices.\n\n Args:\n a (Variable): Input array to compute the determinant for.\n The first dimension should iterate over each matrix and be\n of the batchsize.\n\n Returns:\n ~chainer.Variable: vector of determinants for every matrix\n in the batch.\n\n \"\"\"\n return BatchDet()(a)\n\n\ndef det(a):\n \"\"\"Computes the determinant of a single square matrix.\n\n Args:\n a (Variable): Input array to compute the determinant for.\n\n Returns:\n ~chainer.Variable: Scalar determinant of the matrix a.\n\n \"\"\"\n shape = (1, len(a.data), a.data.shape[1])\n batched_a = reshape.Reshape(shape)(a)\n batched_det = BatchDet()(batched_a)\n return reshape.Reshape((1, ))(batched_det)\n", "path": "chainer/functions/math/det.py"}], "after_files": [{"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.functions.array import reshape\nfrom chainer.functions.math import inv\nfrom chainer.functions.math import matmul\nfrom chainer import utils\nfrom chainer.utils import type_check\n\n\ndef _det_gpu(b):\n # We do a batched LU decomposition on the GPU to compute\n # and compute the determinant by multiplying the diagonal.\n # Change the shape of the array to be size=1 minibatch if necessary.\n # Also copy the matrix as the elments will be modified in-place.\n a = matmul._as_batch_mat(b).copy()\n n = a.shape[1]\n n_matrices = len(a)\n # Pivot array\n p = cuda.cupy.zeros((n_matrices, n), dtype='int32')\n # Output array\n # These arrays hold information on the execution success\n # or if the matrix was singular.\n info1 = cuda.cupy.zeros(n_matrices, dtype=numpy.intp)\n ap = matmul._mat_ptrs(a)\n _, lda = matmul._get_ld(a)\n cuda.cublas.sgetrfBatched(cuda.Device().cublas_handle, n, ap.data.ptr, lda,\n p.data.ptr, info1.data.ptr, n_matrices)\n det = cuda.cupy.prod(a.diagonal(axis1=1, axis2=2), axis=1)\n # The determinant is equal to the product of the diagonal entries\n # of `a` where the sign of `a` is flipped depending on whether\n # the pivot array is equal to its index.\n rng = cuda.cupy.arange(1, n + 1, dtype='int32')\n parity = cuda.cupy.sum(p != rng, axis=1) % 2\n sign = 1. - 2. * parity.astype('float32')\n success = cuda.cupy.all(info1 == 0)\n return det * sign, success\n\n\nclass BatchDet(function.Function):\n\n @property\n def label(self):\n return 'det'\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n a_type, = in_types\n a_type = matmul._convert_type(a_type)\n type_check.expect(a_type.dtype.kind == 'f')\n # Only a minibatch of 2D array shapes allowed.\n type_check.expect(a_type.ndim == 3)\n # Matrix inversion only allowed for square matrices\n # so assert the last two dimensions are equal.\n type_check.expect(a_type.shape[-1] == a_type.shape[-2])\n\n def forward_cpu(self, x):\n self.detx = utils.force_array(numpy.linalg.det(x[0]))\n return self.detx,\n\n def forward_gpu(self, x):\n self.detx, success = _det_gpu(x[0])\n if not success:\n raise ValueError('Singular Matrix')\n return self.detx,\n\n def backward_cpu(self, x, gy):\n x, = x\n gy, = gy\n grad = (gy[:, None, None] * self.detx[:, None, None] *\n numpy.linalg.inv(x.transpose((0, 2, 1))))\n return utils.force_array(grad),\n\n def backward_gpu(self, x, gy):\n x, = x\n gy, = gy\n grad = (gy[:, None, None] * self.detx[:, None, None] *\n inv._inv_gpu(x.transpose((0, 2, 1))))\n return utils.force_array(grad),\n\n\ndef batch_det(a):\n \"\"\"Computes the determinant of a batch of square matrices.\n\n Args:\n a (Variable): Input array to compute the determinant for.\n The first dimension should iterate over each matrix and be\n of the batchsize.\n\n Returns:\n ~chainer.Variable: vector of determinants for every matrix\n in the batch.\n\n \"\"\"\n return BatchDet()(a)\n\n\ndef det(a):\n \"\"\"Computes the determinant of a single square matrix.\n\n Args:\n a (Variable): Input array to compute the determinant for.\n\n Returns:\n ~chainer.Variable: Scalar determinant of the matrix a.\n\n \"\"\"\n shape = (1, len(a.data), a.data.shape[1])\n batched_a = reshape.Reshape(shape)(a)\n batched_det = BatchDet()(batched_a)\n return reshape.Reshape(())(batched_det)\n", "path": "chainer/functions/math/det.py"}]} | 1,585 | 122 |
gh_patches_debug_1484 | rasdani/github-patches | git_diff | PyGithub__PyGithub-1891 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
allow PyJWT 2+
other libraries are moving to PyJWT2+ as requirement, is it possible to update pygithub as well? currently we can't use for example pygithub together with django-social-core
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 ############################ Copyrights and license ############################
4 # #
5 # Copyright 2012 Vincent Jacques <[email protected]> #
6 # Copyright 2012 Zearin <[email protected]> #
7 # Copyright 2013 Vincent Jacques <[email protected]> #
8 # Copyright 2014 Tomas Radej <[email protected]> #
9 # Copyright 2014 Vincent Jacques <[email protected]> #
10 # Copyright 2015 Jimmy Zelinskie <[email protected]> #
11 # Copyright 2016 Felix Yan <[email protected]> #
12 # Copyright 2016 Jakub Wilk <[email protected]> #
13 # Copyright 2016 Jannis Gebauer <[email protected]> #
14 # Copyright 2016 Peter Buckley <[email protected]> #
15 # Copyright 2017 Hugo <[email protected]> #
16 # Copyright 2017 Jannis Gebauer <[email protected]> #
17 # Copyright 2017 Jannis Gebauer <[email protected]> #
18 # Copyright 2017 Nhomar Hernandez <[email protected]> #
19 # Copyright 2017 Paul Ortman <[email protected]> #
20 # Copyright 2018 Jason White <[email protected]> #
21 # Copyright 2018 Mike Miller <[email protected]> #
22 # Copyright 2018 Wan Liuyang <[email protected]> #
23 # Copyright 2018 sfdye <[email protected]> #
24 # #
25 # This file is part of PyGithub. #
26 # http://pygithub.readthedocs.io/ #
27 # #
28 # PyGithub is free software: you can redistribute it and/or modify it under #
29 # the terms of the GNU Lesser General Public License as published by the Free #
30 # Software Foundation, either version 3 of the License, or (at your option) #
31 # any later version. #
32 # #
33 # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
34 # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
35 # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
36 # details. #
37 # #
38 # You should have received a copy of the GNU Lesser General Public License #
39 # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
40 # #
41 ################################################################################
42
43 import textwrap
44
45 import setuptools
46
47 version = "1.54.1"
48
49
50 if __name__ == "__main__":
51 setuptools.setup(
52 name="PyGithub",
53 version=version,
54 description="Use the full Github API v3",
55 author="Vincent Jacques",
56 author_email="[email protected]",
57 url="https://github.com/pygithub/pygithub",
58 project_urls={
59 "Documentation": "http://pygithub.readthedocs.io/en/latest/",
60 "Source": "https://github.com/pygithub/pygithub",
61 "Tracker": "https://github.com/pygithub/pygithub/issues",
62 },
63 long_description=textwrap.dedent(
64 """\
65 (Very short) Tutorial
66 =====================
67
68 First create a Github instance::
69
70 from github import Github
71
72 # using username and password
73 g = Github("user", "password")
74
75 # or using an access token
76 g = Github("access_token")
77
78 Then play with your Github objects::
79
80 for repo in g.get_user().get_repos():
81 print(repo.name)
82 repo.edit(has_wiki=False)
83
84 Reference documentation
85 =======================
86
87 See http://pygithub.readthedocs.io/en/latest/"""
88 ),
89 packages=["github"],
90 package_data={"github": ["py.typed", "*.pyi"]},
91 classifiers=[
92 "Development Status :: 5 - Production/Stable",
93 "Environment :: Web Environment",
94 "Intended Audience :: Developers",
95 "License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)",
96 "Operating System :: OS Independent",
97 "Programming Language :: Python",
98 "Programming Language :: Python :: 3",
99 "Programming Language :: Python :: 3.6",
100 "Programming Language :: Python :: 3.7",
101 "Programming Language :: Python :: 3.8",
102 "Programming Language :: Python :: 3.9",
103 "Topic :: Software Development",
104 ],
105 python_requires=">=3.6",
106 install_requires=[
107 "deprecated",
108 "pyjwt<2.0",
109 "pynacl>=1.4.0",
110 "requests>=2.14.0",
111 ],
112 extras_require={"integrations": ["cryptography"]},
113 tests_require=["cryptography", "httpretty>=1.0.3"],
114 )
115
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -105,7 +105,7 @@
python_requires=">=3.6",
install_requires=[
"deprecated",
- "pyjwt<2.0",
+ "pyjwt>=2.0",
"pynacl>=1.4.0",
"requests>=2.14.0",
],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -105,7 +105,7 @@\n python_requires=\">=3.6\",\n install_requires=[\n \"deprecated\",\n- \"pyjwt<2.0\",\n+ \"pyjwt>=2.0\",\n \"pynacl>=1.4.0\",\n \"requests>=2.14.0\",\n ],\n", "issue": "allow PyJWT 2+\nother libraries are moving to PyJWT2+ as requirement, is it possible to update pygithub as well? currently we can't use for example pygithub together with django-social-core\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n############################ Copyrights and license ############################\n# #\n# Copyright 2012 Vincent Jacques <[email protected]> #\n# Copyright 2012 Zearin <[email protected]> #\n# Copyright 2013 Vincent Jacques <[email protected]> #\n# Copyright 2014 Tomas Radej <[email protected]> #\n# Copyright 2014 Vincent Jacques <[email protected]> #\n# Copyright 2015 Jimmy Zelinskie <[email protected]> #\n# Copyright 2016 Felix Yan <[email protected]> #\n# Copyright 2016 Jakub Wilk <[email protected]> #\n# Copyright 2016 Jannis Gebauer <[email protected]> #\n# Copyright 2016 Peter Buckley <[email protected]> #\n# Copyright 2017 Hugo <[email protected]> #\n# Copyright 2017 Jannis Gebauer <[email protected]> #\n# Copyright 2017 Jannis Gebauer <[email protected]> #\n# Copyright 2017 Nhomar Hernandez <[email protected]> #\n# Copyright 2017 Paul Ortman <[email protected]> #\n# Copyright 2018 Jason White <[email protected]> #\n# Copyright 2018 Mike Miller <[email protected]> #\n# Copyright 2018 Wan Liuyang <[email protected]> #\n# Copyright 2018 sfdye <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.readthedocs.io/ #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n################################################################################\n\nimport textwrap\n\nimport setuptools\n\nversion = \"1.54.1\"\n\n\nif __name__ == \"__main__\":\n setuptools.setup(\n name=\"PyGithub\",\n version=version,\n description=\"Use the full Github API v3\",\n author=\"Vincent Jacques\",\n author_email=\"[email protected]\",\n url=\"https://github.com/pygithub/pygithub\",\n project_urls={\n \"Documentation\": \"http://pygithub.readthedocs.io/en/latest/\",\n \"Source\": \"https://github.com/pygithub/pygithub\",\n \"Tracker\": \"https://github.com/pygithub/pygithub/issues\",\n },\n long_description=textwrap.dedent(\n \"\"\"\\\n (Very short) Tutorial\n =====================\n\n First create a Github instance::\n\n from github import Github\n\n # using username and password\n g = Github(\"user\", \"password\")\n\n # or using an access token\n g = Github(\"access_token\")\n\n Then play with your Github objects::\n\n for repo in g.get_user().get_repos():\n print(repo.name)\n repo.edit(has_wiki=False)\n\n Reference documentation\n =======================\n\n See http://pygithub.readthedocs.io/en/latest/\"\"\"\n ),\n packages=[\"github\"],\n package_data={\"github\": [\"py.typed\", \"*.pyi\"]},\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Software Development\",\n ],\n python_requires=\">=3.6\",\n install_requires=[\n \"deprecated\",\n \"pyjwt<2.0\",\n \"pynacl>=1.4.0\",\n \"requests>=2.14.0\",\n ],\n extras_require={\"integrations\": [\"cryptography\"]},\n tests_require=[\"cryptography\", \"httpretty>=1.0.3\"],\n )\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n############################ Copyrights and license ############################\n# #\n# Copyright 2012 Vincent Jacques <[email protected]> #\n# Copyright 2012 Zearin <[email protected]> #\n# Copyright 2013 Vincent Jacques <[email protected]> #\n# Copyright 2014 Tomas Radej <[email protected]> #\n# Copyright 2014 Vincent Jacques <[email protected]> #\n# Copyright 2015 Jimmy Zelinskie <[email protected]> #\n# Copyright 2016 Felix Yan <[email protected]> #\n# Copyright 2016 Jakub Wilk <[email protected]> #\n# Copyright 2016 Jannis Gebauer <[email protected]> #\n# Copyright 2016 Peter Buckley <[email protected]> #\n# Copyright 2017 Hugo <[email protected]> #\n# Copyright 2017 Jannis Gebauer <[email protected]> #\n# Copyright 2017 Jannis Gebauer <[email protected]> #\n# Copyright 2017 Nhomar Hernandez <[email protected]> #\n# Copyright 2017 Paul Ortman <[email protected]> #\n# Copyright 2018 Jason White <[email protected]> #\n# Copyright 2018 Mike Miller <[email protected]> #\n# Copyright 2018 Wan Liuyang <[email protected]> #\n# Copyright 2018 sfdye <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.readthedocs.io/ #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n################################################################################\n\nimport textwrap\n\nimport setuptools\n\nversion = \"1.54.1\"\n\n\nif __name__ == \"__main__\":\n setuptools.setup(\n name=\"PyGithub\",\n version=version,\n description=\"Use the full Github API v3\",\n author=\"Vincent Jacques\",\n author_email=\"[email protected]\",\n url=\"https://github.com/pygithub/pygithub\",\n project_urls={\n \"Documentation\": \"http://pygithub.readthedocs.io/en/latest/\",\n \"Source\": \"https://github.com/pygithub/pygithub\",\n \"Tracker\": \"https://github.com/pygithub/pygithub/issues\",\n },\n long_description=textwrap.dedent(\n \"\"\"\\\n (Very short) Tutorial\n =====================\n\n First create a Github instance::\n\n from github import Github\n\n # using username and password\n g = Github(\"user\", \"password\")\n\n # or using an access token\n g = Github(\"access_token\")\n\n Then play with your Github objects::\n\n for repo in g.get_user().get_repos():\n print(repo.name)\n repo.edit(has_wiki=False)\n\n Reference documentation\n =======================\n\n See http://pygithub.readthedocs.io/en/latest/\"\"\"\n ),\n packages=[\"github\"],\n package_data={\"github\": [\"py.typed\", \"*.pyi\"]},\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Software Development\",\n ],\n python_requires=\">=3.6\",\n install_requires=[\n \"deprecated\",\n \"pyjwt>=2.0\",\n \"pynacl>=1.4.0\",\n \"requests>=2.14.0\",\n ],\n extras_require={\"integrations\": [\"cryptography\"]},\n tests_require=[\"cryptography\", \"httpretty>=1.0.3\"],\n )\n", "path": "setup.py"}]} | 1,663 | 96 |
gh_patches_debug_14714 | rasdani/github-patches | git_diff | bokeh__bokeh-8466 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
"CustomJS for Selections" Example in Docs Broken
In the latest version of the docs, it appears [this example]( https://bokeh.pydata.org/en/latest/docs/user_guide/interaction/callbacks.html#customjs-for-selections ) is broken. This is also true of the example in the Bokeh 1.0.0 docs. Selecting points in the plot on the left does not result in points being shown in the right plot. Compare this to [the same plot using Bokeh 0.13.0]( https://bokeh.pydata.org/en/0.13.0/docs/user_guide/interaction/callbacks.html#customjs-for-selections ), which seems to work without issues.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sphinx/source/docs/user_guide/examples/interaction_callbacks_for_selections.py`
Content:
```
1 from random import random
2
3 from bokeh.layouts import row
4 from bokeh.models import CustomJS, ColumnDataSource
5 from bokeh.plotting import figure, output_file, show
6
7 output_file("callback.html")
8
9 x = [random() for x in range(500)]
10 y = [random() for y in range(500)]
11
12 s1 = ColumnDataSource(data=dict(x=x, y=y))
13 p1 = figure(plot_width=400, plot_height=400, tools="lasso_select", title="Select Here")
14 p1.circle('x', 'y', source=s1, alpha=0.6)
15
16 s2 = ColumnDataSource(data=dict(x=[], y=[]))
17 p2 = figure(plot_width=400, plot_height=400, x_range=(0, 1), y_range=(0, 1),
18 tools="", title="Watch Here")
19 p2.circle('x', 'y', source=s2, alpha=0.6)
20
21 s1.callback = CustomJS(args=dict(s2=s2), code="""
22 var inds = cb_obj.selected.indices;
23 var d1 = cb_obj.data;
24 var d2 = s2.data;
25 d2['x'] = []
26 d2['y'] = []
27 for (var i = 0; i < inds.length; i++) {
28 d2['x'].push(d1['x'][inds[i]])
29 d2['y'].push(d1['y'][inds[i]])
30 }
31 s2.change.emit();
32 """)
33
34 layout = row(p1, p2)
35
36 show(layout)
37
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sphinx/source/docs/user_guide/examples/interaction_callbacks_for_selections.py b/sphinx/source/docs/user_guide/examples/interaction_callbacks_for_selections.py
--- a/sphinx/source/docs/user_guide/examples/interaction_callbacks_for_selections.py
+++ b/sphinx/source/docs/user_guide/examples/interaction_callbacks_for_selections.py
@@ -18,9 +18,9 @@
tools="", title="Watch Here")
p2.circle('x', 'y', source=s2, alpha=0.6)
-s1.callback = CustomJS(args=dict(s2=s2), code="""
- var inds = cb_obj.selected.indices;
- var d1 = cb_obj.data;
+s1.selected.js_on_change('indices', CustomJS(args=dict(s1=s1, s2=s2), code="""
+ var inds = cb_obj.indices;
+ var d1 = s1.data;
var d2 = s2.data;
d2['x'] = []
d2['y'] = []
@@ -30,6 +30,7 @@
}
s2.change.emit();
""")
+)
layout = row(p1, p2)
| {"golden_diff": "diff --git a/sphinx/source/docs/user_guide/examples/interaction_callbacks_for_selections.py b/sphinx/source/docs/user_guide/examples/interaction_callbacks_for_selections.py\n--- a/sphinx/source/docs/user_guide/examples/interaction_callbacks_for_selections.py\n+++ b/sphinx/source/docs/user_guide/examples/interaction_callbacks_for_selections.py\n@@ -18,9 +18,9 @@\n tools=\"\", title=\"Watch Here\")\n p2.circle('x', 'y', source=s2, alpha=0.6)\n \n-s1.callback = CustomJS(args=dict(s2=s2), code=\"\"\"\n- var inds = cb_obj.selected.indices;\n- var d1 = cb_obj.data;\n+s1.selected.js_on_change('indices', CustomJS(args=dict(s1=s1, s2=s2), code=\"\"\"\n+ var inds = cb_obj.indices;\n+ var d1 = s1.data;\n var d2 = s2.data;\n d2['x'] = []\n d2['y'] = []\n@@ -30,6 +30,7 @@\n }\n s2.change.emit();\n \"\"\")\n+)\n \n layout = row(p1, p2)\n", "issue": "\"CustomJS for Selections\" Example in Docs Broken\nIn the latest version of the docs, it appears [this example]( https://bokeh.pydata.org/en/latest/docs/user_guide/interaction/callbacks.html#customjs-for-selections ) is broken. This is also true of the example in the Bokeh 1.0.0 docs. Selecting points in the plot on the left does not result in points being shown in the right plot. Compare this to [the same plot using Bokeh 0.13.0]( https://bokeh.pydata.org/en/0.13.0/docs/user_guide/interaction/callbacks.html#customjs-for-selections ), which seems to work without issues.\n", "before_files": [{"content": "from random import random\n\nfrom bokeh.layouts import row\nfrom bokeh.models import CustomJS, ColumnDataSource\nfrom bokeh.plotting import figure, output_file, show\n\noutput_file(\"callback.html\")\n\nx = [random() for x in range(500)]\ny = [random() for y in range(500)]\n\ns1 = ColumnDataSource(data=dict(x=x, y=y))\np1 = figure(plot_width=400, plot_height=400, tools=\"lasso_select\", title=\"Select Here\")\np1.circle('x', 'y', source=s1, alpha=0.6)\n\ns2 = ColumnDataSource(data=dict(x=[], y=[]))\np2 = figure(plot_width=400, plot_height=400, x_range=(0, 1), y_range=(0, 1),\n tools=\"\", title=\"Watch Here\")\np2.circle('x', 'y', source=s2, alpha=0.6)\n\ns1.callback = CustomJS(args=dict(s2=s2), code=\"\"\"\n var inds = cb_obj.selected.indices;\n var d1 = cb_obj.data;\n var d2 = s2.data;\n d2['x'] = []\n d2['y'] = []\n for (var i = 0; i < inds.length; i++) {\n d2['x'].push(d1['x'][inds[i]])\n d2['y'].push(d1['y'][inds[i]])\n }\n s2.change.emit();\n \"\"\")\n\nlayout = row(p1, p2)\n\nshow(layout)\n", "path": "sphinx/source/docs/user_guide/examples/interaction_callbacks_for_selections.py"}], "after_files": [{"content": "from random import random\n\nfrom bokeh.layouts import row\nfrom bokeh.models import CustomJS, ColumnDataSource\nfrom bokeh.plotting import figure, output_file, show\n\noutput_file(\"callback.html\")\n\nx = [random() for x in range(500)]\ny = [random() for y in range(500)]\n\ns1 = ColumnDataSource(data=dict(x=x, y=y))\np1 = figure(plot_width=400, plot_height=400, tools=\"lasso_select\", title=\"Select Here\")\np1.circle('x', 'y', source=s1, alpha=0.6)\n\ns2 = ColumnDataSource(data=dict(x=[], y=[]))\np2 = figure(plot_width=400, plot_height=400, x_range=(0, 1), y_range=(0, 1),\n tools=\"\", title=\"Watch Here\")\np2.circle('x', 'y', source=s2, alpha=0.6)\n\ns1.selected.js_on_change('indices', CustomJS(args=dict(s1=s1, s2=s2), code=\"\"\"\n var inds = cb_obj.indices;\n var d1 = s1.data;\n var d2 = s2.data;\n d2['x'] = []\n d2['y'] = []\n for (var i = 0; i < inds.length; i++) {\n d2['x'].push(d1['x'][inds[i]])\n d2['y'].push(d1['y'][inds[i]])\n }\n s2.change.emit();\n \"\"\")\n)\n\nlayout = row(p1, p2)\n\nshow(layout)\n", "path": "sphinx/source/docs/user_guide/examples/interaction_callbacks_for_selections.py"}]} | 822 | 249 |
gh_patches_debug_27286 | rasdani/github-patches | git_diff | psychopy__psychopy-739 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
gui import from psychopy not woriking
Hi all,
im trying to run a psychopy script from terminal but I get this error:
Traceback (most recent call last):
File "nf_test_lastrun.py", line 11, in <module>
from psychopy import visual, core, data, event, logging, sound, gui
File "/Library/Python/2.7/site-packages/PsychoPy-1.81.00-py2.7.egg/psychopy/gui.py", line 11, in <module>
from psychopy.app import localization
File "/Library/Python/2.7/site-packages/PsychoPy-1.81.00-py2.7.egg/psychopy/app/localization/**init**.py", line 89, in <module>
languageID, lang = getID()
File "/Library/Python/2.7/site-packages/PsychoPy-1.81.00-py2.7.egg/psychopy/app/localization/**init**.py", line 78, in getID
val = codeFromWxId[wx.LANGUAGE_DEFAULT]
KeyError: 0
when I open python and try to import from python, all work but gui.
any suggestions
thanks
clemens
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `psychopy/app/localization/__init__.py`
Content:
```
1 #!/usr/bin/env python2
2 # -*- coding: utf-8 -*-
3
4 """Language localization for PsychoPy.
5
6 Sets the locale value as a wx languageID (int) and initializes gettext translation _translate():
7 from psychopy.app import localization
8 """
9
10 # Part of the PsychoPy library
11 # Copyright (C) 2014 Jonathan Peirce
12 # Distributed under the terms of the GNU General Public License (GPL).
13
14 # Author: Jeremy Gray, July 2014
15
16
17 import gettext
18 import os, sys, glob, codecs
19 from psychopy import logging, prefs
20
21 import wx
22
23 # need a wx App for wx.Locale:
24 try:
25 wx.Dialog(None, -1)
26 except wx._core.PyNoAppError:
27 if wx.version() < '2.9':
28 tmpApp = wx.PySimpleApp()
29 else:
30 tmpApp = wx.App(False)
31
32 # Get a dict of locale aliases from wx.Locale() -- same cross-platform (Win 7, Mac 10.9)
33 locale = wx.Locale()
34 aliases = {}
35 wxIdFromCode = {} # int: 0, 2-229
36 codeFromWxId = {} # used in directory names e.g. ja_JP; never JPN ala Windows
37 winmap = {} # get windows 3-letter code (=val) from canonical form (=key); use only for setting locale (non-wx)
38 locname = {} # descriptive name, if available; 5-letter code if not
39 reverseMap = {}
40
41 for i in range(230):
42 info = locale.GetLanguageInfo(i)
43 if info:
44 aliases[info.Description] = info.CanonicalName # mix of forms: ja or ja_JP
45 wxIdFromCode[info.CanonicalName] = i
46 codeFromWxId[i] = info.CanonicalName
47
48 mappings = os.path.join(os.path.dirname(__file__), 'mappings.txt')
49 for line in codecs.open(mappings, 'rU', 'utf8').readlines():
50 try:
51 can, win, name = line.strip().split(' ', 2) # canonical, windows, name-with-spaces
52 except ValueError:
53 can, win = line.strip().split(' ', 1)
54 name = can
55 winmap[can] = win
56 locname[can] = name
57 reverseMap[name] = can
58
59 # what are the available translations? available languages on the OS?
60 expr = os.path.join(os.path.dirname(__file__), '..', 'locale', '*')
61 available = sorted(map(os.path.basename, glob.glob(expr)))
62 sysAvail = [str(l) for l in codeFromWxId.values() # installed language packs
63 if l and locale.IsAvailable(wxIdFromCode[l])]
64
65 def getID(lang=None):
66 """Get wx ID of language to use for translations: `lang`, pref, or system default.
67
68 `lang` is a 5 char `language_REGION`, eg ja_JP
69 """
70 if lang:
71 val = lang
72 else:
73 try:
74 val = prefs.app['locale']
75 except KeyError:
76 val = locale.GetLocale() # wx.Locale, no encoding
77 if not val:
78 val = codeFromWxId[wx.LANGUAGE_DEFAULT]
79 try:
80 # out-dated: [can't set wx.Locale here because no app yet] now there is an app
81 # here just determine the value to be used when it can be set
82 language = wxIdFromCode[val]
83 except KeyError:
84 logging.error('locale %s not known to wx.Locale, using default' % val)
85 language = wx.LANGUAGE_DEFAULT
86
87 return language, val
88
89 languageID, lang = getID()
90 #use lang like this:
91 #import locale -- the non-wx version of locale
92 #
93 #if sys.platform.startswith('win'):
94 # v = winmap[val]
95 #else: v=val
96 #locale.setlocale(locale.LC_ALL, (v, 'UTF-8'))
97
98 # set locale before splash screen:
99 if locale.IsAvailable(languageID):
100 wxlocale = wx.Locale(languageID)
101 else:
102 wxlocale = wx.Locale(wx.LANGUAGE_DEFAULT)
103
104 # ideally rewrite the following using wxlocale only:
105 path = os.path.join(os.path.dirname(__file__), '..', 'locale', lang, 'LC_MESSAGE') + os.sep
106 mofile = os.path.join(path, 'messages.mo')
107 try:
108 logging.debug("Opening message catalog %s for locale %s" % (mofile, lang))
109 trans = gettext.GNUTranslations(open(mofile, "rb"))
110 except IOError:
111 logging.debug("Locale for '%s' not found. Using default." % lang)
112 trans = gettext.NullTranslations()
113 trans.install(unicode=True)
114
115 # to avoid a crash, PsychoPy app uses a nonstandard name _translate instead of _
116 # seems like a var in a dependency is named _, clobbering _ as global translation:
117 __builtins__['_translate'] = _
118 del(__builtins__['_']) # idea: force psychopy code to use _translate
119
120
121 #__builtins__['_'] = wx.GetTranslation
122 # this seems to have no effect, needs more investigation:
123 #path = os.path.join(os.path.dirname(__file__), '..', 'locale', lang, 'LC_MESSAGE') + os.sep
124 #wxlocale.AddCatalogLookupPathPrefix(path)
125
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/psychopy/app/localization/__init__.py b/psychopy/app/localization/__init__.py
--- a/psychopy/app/localization/__init__.py
+++ b/psychopy/app/localization/__init__.py
@@ -31,13 +31,10 @@
# Get a dict of locale aliases from wx.Locale() -- same cross-platform (Win 7, Mac 10.9)
locale = wx.Locale()
-aliases = {}
-wxIdFromCode = {} # int: 0, 2-229
-codeFromWxId = {} # used in directory names e.g. ja_JP; never JPN ala Windows
-winmap = {} # get windows 3-letter code (=val) from canonical form (=key); use only for setting locale (non-wx)
-locname = {} # descriptive name, if available; 5-letter code if not
-reverseMap = {}
-
+aliases = {u'English (U.S.)': 'en_US'}
+# set defaults because locale.GetLanguageInfo(0) can return None on some systems:
+wxIdFromCode = {'en_US': wx.LANGUAGE_DEFAULT} # int: 0 default, 2-229
+codeFromWxId = {wx.LANGUAGE_DEFAULT: 'en_US'} # used in directory names e.g. ja_JP; never JPN ala Windows
for i in range(230):
info = locale.GetLanguageInfo(i)
if info:
@@ -45,6 +42,10 @@
wxIdFromCode[info.CanonicalName] = i
codeFromWxId[i] = info.CanonicalName
+# read all known mappings cross-platform from a file:
+winmap = {'en_US': 'ENU'} # get windows 3-letter code (=val) from canonical form (=key); use only for setting locale (non-wx)
+locname = {'en_US': u'English (U.S.)'} # descriptive name, if available; 5-letter code if not
+reverseMap = {u'English (U.S.)': 'en_US'}
mappings = os.path.join(os.path.dirname(__file__), 'mappings.txt')
for line in codecs.open(mappings, 'rU', 'utf8').readlines():
try:
| {"golden_diff": "diff --git a/psychopy/app/localization/__init__.py b/psychopy/app/localization/__init__.py\n--- a/psychopy/app/localization/__init__.py\n+++ b/psychopy/app/localization/__init__.py\n@@ -31,13 +31,10 @@\n \n # Get a dict of locale aliases from wx.Locale() -- same cross-platform (Win 7, Mac 10.9)\n locale = wx.Locale()\n-aliases = {}\n-wxIdFromCode = {} # int: 0, 2-229\n-codeFromWxId = {} # used in directory names e.g. ja_JP; never JPN ala Windows\n-winmap = {} # get windows 3-letter code (=val) from canonical form (=key); use only for setting locale (non-wx)\n-locname = {} # descriptive name, if available; 5-letter code if not\n-reverseMap = {}\n-\n+aliases = {u'English (U.S.)': 'en_US'}\n+# set defaults because locale.GetLanguageInfo(0) can return None on some systems:\n+wxIdFromCode = {'en_US': wx.LANGUAGE_DEFAULT} # int: 0 default, 2-229\n+codeFromWxId = {wx.LANGUAGE_DEFAULT: 'en_US'} # used in directory names e.g. ja_JP; never JPN ala Windows\n for i in range(230):\n info = locale.GetLanguageInfo(i)\n if info:\n@@ -45,6 +42,10 @@\n wxIdFromCode[info.CanonicalName] = i\n codeFromWxId[i] = info.CanonicalName\n \n+# read all known mappings cross-platform from a file:\n+winmap = {'en_US': 'ENU'} # get windows 3-letter code (=val) from canonical form (=key); use only for setting locale (non-wx)\n+locname = {'en_US': u'English (U.S.)'} # descriptive name, if available; 5-letter code if not\n+reverseMap = {u'English (U.S.)': 'en_US'}\n mappings = os.path.join(os.path.dirname(__file__), 'mappings.txt')\n for line in codecs.open(mappings, 'rU', 'utf8').readlines():\n try:\n", "issue": "gui import from psychopy not woriking\nHi all,\n\nim trying to run a psychopy script from terminal but I get this error:\n\nTraceback (most recent call last):\n File \"nf_test_lastrun.py\", line 11, in <module>\n from psychopy import visual, core, data, event, logging, sound, gui\n File \"/Library/Python/2.7/site-packages/PsychoPy-1.81.00-py2.7.egg/psychopy/gui.py\", line 11, in <module>\n from psychopy.app import localization\n File \"/Library/Python/2.7/site-packages/PsychoPy-1.81.00-py2.7.egg/psychopy/app/localization/**init**.py\", line 89, in <module>\n languageID, lang = getID()\n File \"/Library/Python/2.7/site-packages/PsychoPy-1.81.00-py2.7.egg/psychopy/app/localization/**init**.py\", line 78, in getID\n val = codeFromWxId[wx.LANGUAGE_DEFAULT]\nKeyError: 0\n\nwhen I open python and try to import from python, all work but gui.\n\nany suggestions\nthanks\nclemens\n\n", "before_files": [{"content": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\n\"\"\"Language localization for PsychoPy.\n\nSets the locale value as a wx languageID (int) and initializes gettext translation _translate():\n from psychopy.app import localization\n\"\"\"\n\n# Part of the PsychoPy library\n# Copyright (C) 2014 Jonathan Peirce\n# Distributed under the terms of the GNU General Public License (GPL).\n\n# Author: Jeremy Gray, July 2014\n\n\nimport gettext\nimport os, sys, glob, codecs\nfrom psychopy import logging, prefs\n\nimport wx\n\n# need a wx App for wx.Locale:\ntry:\n wx.Dialog(None, -1)\nexcept wx._core.PyNoAppError:\n if wx.version() < '2.9':\n tmpApp = wx.PySimpleApp()\n else:\n tmpApp = wx.App(False)\n\n# Get a dict of locale aliases from wx.Locale() -- same cross-platform (Win 7, Mac 10.9)\nlocale = wx.Locale()\naliases = {}\nwxIdFromCode = {} # int: 0, 2-229\ncodeFromWxId = {} # used in directory names e.g. ja_JP; never JPN ala Windows\nwinmap = {} # get windows 3-letter code (=val) from canonical form (=key); use only for setting locale (non-wx)\nlocname = {} # descriptive name, if available; 5-letter code if not\nreverseMap = {}\n\nfor i in range(230):\n info = locale.GetLanguageInfo(i)\n if info:\n aliases[info.Description] = info.CanonicalName # mix of forms: ja or ja_JP\n wxIdFromCode[info.CanonicalName] = i\n codeFromWxId[i] = info.CanonicalName\n\nmappings = os.path.join(os.path.dirname(__file__), 'mappings.txt')\nfor line in codecs.open(mappings, 'rU', 'utf8').readlines():\n try:\n can, win, name = line.strip().split(' ', 2) # canonical, windows, name-with-spaces\n except ValueError:\n can, win = line.strip().split(' ', 1)\n name = can\n winmap[can] = win\n locname[can] = name\n reverseMap[name] = can\n\n# what are the available translations? available languages on the OS?\nexpr = os.path.join(os.path.dirname(__file__), '..', 'locale', '*')\navailable = sorted(map(os.path.basename, glob.glob(expr)))\nsysAvail = [str(l) for l in codeFromWxId.values() # installed language packs\n if l and locale.IsAvailable(wxIdFromCode[l])]\n\ndef getID(lang=None):\n \"\"\"Get wx ID of language to use for translations: `lang`, pref, or system default.\n\n `lang` is a 5 char `language_REGION`, eg ja_JP\n \"\"\"\n if lang:\n val = lang\n else:\n try:\n val = prefs.app['locale']\n except KeyError:\n val = locale.GetLocale() # wx.Locale, no encoding\n if not val:\n val = codeFromWxId[wx.LANGUAGE_DEFAULT]\n try:\n # out-dated: [can't set wx.Locale here because no app yet] now there is an app\n # here just determine the value to be used when it can be set\n language = wxIdFromCode[val]\n except KeyError:\n logging.error('locale %s not known to wx.Locale, using default' % val)\n language = wx.LANGUAGE_DEFAULT\n\n return language, val\n\nlanguageID, lang = getID()\n#use lang like this:\n#import locale -- the non-wx version of locale\n#\n#if sys.platform.startswith('win'):\n# v = winmap[val]\n#else: v=val\n#locale.setlocale(locale.LC_ALL, (v, 'UTF-8'))\n\n# set locale before splash screen:\nif locale.IsAvailable(languageID):\n wxlocale = wx.Locale(languageID)\nelse:\n wxlocale = wx.Locale(wx.LANGUAGE_DEFAULT)\n\n# ideally rewrite the following using wxlocale only:\npath = os.path.join(os.path.dirname(__file__), '..', 'locale', lang, 'LC_MESSAGE') + os.sep\nmofile = os.path.join(path, 'messages.mo')\ntry:\n logging.debug(\"Opening message catalog %s for locale %s\" % (mofile, lang))\n trans = gettext.GNUTranslations(open(mofile, \"rb\"))\nexcept IOError:\n logging.debug(\"Locale for '%s' not found. Using default.\" % lang)\n trans = gettext.NullTranslations()\ntrans.install(unicode=True)\n\n# to avoid a crash, PsychoPy app uses a nonstandard name _translate instead of _\n# seems like a var in a dependency is named _, clobbering _ as global translation:\n__builtins__['_translate'] = _\ndel(__builtins__['_']) # idea: force psychopy code to use _translate\n\n\n#__builtins__['_'] = wx.GetTranslation\n# this seems to have no effect, needs more investigation:\n#path = os.path.join(os.path.dirname(__file__), '..', 'locale', lang, 'LC_MESSAGE') + os.sep\n#wxlocale.AddCatalogLookupPathPrefix(path)\n", "path": "psychopy/app/localization/__init__.py"}], "after_files": [{"content": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\n\"\"\"Language localization for PsychoPy.\n\nSets the locale value as a wx languageID (int) and initializes gettext translation _translate():\n from psychopy.app import localization\n\"\"\"\n\n# Part of the PsychoPy library\n# Copyright (C) 2014 Jonathan Peirce\n# Distributed under the terms of the GNU General Public License (GPL).\n\n# Author: Jeremy Gray, July 2014\n\n\nimport gettext\nimport os, sys, glob, codecs\nfrom psychopy import logging, prefs\n\nimport wx\n\n# need a wx App for wx.Locale:\ntry:\n wx.Dialog(None, -1)\nexcept wx._core.PyNoAppError:\n if wx.version() < '2.9':\n tmpApp = wx.PySimpleApp()\n else:\n tmpApp = wx.App(False)\n\n# Get a dict of locale aliases from wx.Locale() -- same cross-platform (Win 7, Mac 10.9)\nlocale = wx.Locale()\naliases = {u'English (U.S.)': 'en_US'}\n# set defaults because locale.GetLanguageInfo(0) can return None on some systems:\nwxIdFromCode = {'en_US': wx.LANGUAGE_DEFAULT} # int: 0 default, 2-229\ncodeFromWxId = {wx.LANGUAGE_DEFAULT: 'en_US'} # used in directory names e.g. ja_JP; never JPN ala Windows\nfor i in range(230):\n info = locale.GetLanguageInfo(i)\n if info:\n aliases[info.Description] = info.CanonicalName # mix of forms: ja or ja_JP\n wxIdFromCode[info.CanonicalName] = i\n codeFromWxId[i] = info.CanonicalName\n\n# read all known mappings cross-platform from a file:\nwinmap = {'en_US': 'ENU'} # get windows 3-letter code (=val) from canonical form (=key); use only for setting locale (non-wx)\nlocname = {'en_US': u'English (U.S.)'} # descriptive name, if available; 5-letter code if not\nreverseMap = {u'English (U.S.)': 'en_US'}\nmappings = os.path.join(os.path.dirname(__file__), 'mappings.txt')\nfor line in codecs.open(mappings, 'rU', 'utf8').readlines():\n try:\n can, win, name = line.strip().split(' ', 2) # canonical, windows, name-with-spaces\n except ValueError:\n can, win = line.strip().split(' ', 1)\n name = can\n winmap[can] = win\n locname[can] = name\n reverseMap[name] = can\n\n# what are the available translations? available languages on the OS?\nexpr = os.path.join(os.path.dirname(__file__), '..', 'locale', '*')\navailable = sorted(map(os.path.basename, glob.glob(expr)))\nsysAvail = [str(l) for l in codeFromWxId.values() # installed language packs\n if l and locale.IsAvailable(wxIdFromCode[l])]\n\ndef getID(lang=None):\n \"\"\"Get wx ID of language to use for translations: `lang`, pref, or system default.\n\n `lang` is a 5 char `language_REGION`, eg ja_JP\n \"\"\"\n if lang:\n val = lang\n else:\n try:\n val = prefs.app['locale']\n except KeyError:\n val = locale.GetLocale() # wx.Locale, no encoding\n if not val:\n val = codeFromWxId[wx.LANGUAGE_DEFAULT]\n try:\n # out-dated: [can't set wx.Locale here because no app yet] now there is an app\n # here just determine the value to be used when it can be set\n language = wxIdFromCode[val]\n except KeyError:\n logging.error('locale %s not known to wx.Locale, using default' % val)\n language = wx.LANGUAGE_DEFAULT\n\n return language, val\n\nlanguageID, lang = getID()\n#use lang like this:\n#import locale -- the non-wx version of locale\n#\n#if sys.platform.startswith('win'):\n# v = winmap[val]\n#else: v=val\n#locale.setlocale(locale.LC_ALL, (v, 'UTF-8'))\n\n# set locale before splash screen:\nif locale.IsAvailable(languageID):\n wxlocale = wx.Locale(languageID)\nelse:\n wxlocale = wx.Locale(wx.LANGUAGE_DEFAULT)\n\n# ideally rewrite the following using wxlocale only:\npath = os.path.join(os.path.dirname(__file__), '..', 'locale', lang, 'LC_MESSAGE') + os.sep\nmofile = os.path.join(path, 'messages.mo')\ntry:\n logging.debug(\"Opening message catalog %s for locale %s\" % (mofile, lang))\n trans = gettext.GNUTranslations(open(mofile, \"rb\"))\nexcept IOError:\n logging.debug(\"Locale for '%s' not found. Using default.\" % lang)\n trans = gettext.NullTranslations()\ntrans.install(unicode=True)\n\n# to avoid a crash, PsychoPy app uses a nonstandard name _translate instead of _\n# seems like a var in a dependency is named _, clobbering _ as global translation:\n__builtins__['_translate'] = _\ndel(__builtins__['_']) # idea: force psychopy code to use _translate\n\n\n#__builtins__['_'] = wx.GetTranslation\n# this seems to have no effect, needs more investigation:\n#path = os.path.join(os.path.dirname(__file__), '..', 'locale', lang, 'LC_MESSAGE') + os.sep\n#wxlocale.AddCatalogLookupPathPrefix(path)\n", "path": "psychopy/app/localization/__init__.py"}]} | 1,968 | 507 |
gh_patches_debug_41073 | rasdani/github-patches | git_diff | PaddlePaddle__PaddleSeg-1747 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
paddleseg/models/hrnet_contrast.py 中没有执行 init_weight
paddleseg/models/hrnet_contrast.py 中__init__()没有执行 init_weight,导致hrnet_w48_contrast 没法加载完整的模型
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `paddleseg/models/hrnet_contrast.py`
Content:
```
1 # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import paddle
16 import paddle.nn as nn
17 import paddle.nn.functional as F
18
19 from paddleseg.cvlibs import manager
20 from paddleseg.models import layers
21 from paddleseg.utils import utils
22
23
24 @manager.MODELS.add_component
25 class HRNetW48Contrast(nn.Layer):
26 """
27 The HRNetW48Contrast implementation based on PaddlePaddle.
28
29 The original article refers to
30 Wenguan Wang, Tianfei Zhou, et al. "Exploring Cross-Image Pixel Contrast for Semantic Segmentation"
31 (https://arxiv.org/abs/2101.11939).
32
33 Args:
34 in_channels (int): The output dimensions of backbone.
35 num_classes (int): The unique number of target classes.
36 backbone (Paddle.nn.Layer): Backbone network, currently support HRNet_W48.
37 drop_prob (float): The probability of dropout.
38 proj_dim (int): The projection dimensions.
39 align_corners (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even,
40 e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False.
41 pretrained (str, optional): The path or url of pretrained model. Default: None.
42 """
43 def __init__(self,
44 in_channels,
45 num_classes,
46 backbone,
47 drop_prob,
48 proj_dim,
49 align_corners=False,
50 pretrained=None):
51 super().__init__()
52 self.in_channels = in_channels
53 self.backbone = backbone
54 self.num_classes = num_classes
55 self.proj_dim = proj_dim
56 self.align_corners = align_corners
57 self.pretrained = pretrained
58
59 self.cls_head = nn.Sequential(
60 layers.ConvBNReLU(in_channels,
61 in_channels,
62 kernel_size=3,
63 stride=1,
64 padding=1),
65 nn.Dropout2D(drop_prob),
66 nn.Conv2D(in_channels,
67 num_classes,
68 kernel_size=1,
69 stride=1,
70 bias_attr=False),
71 )
72 self.proj_head = ProjectionHead(dim_in=in_channels,
73 proj_dim=self.proj_dim)
74
75 def init_weight(self):
76 if self.pretrained is not None:
77 utils.load_entire_model(self, self.pretrained)
78
79 def forward(self, x):
80 feats = self.backbone(x)[0]
81 out = self.cls_head(feats)
82 logit_list = []
83 if self.training:
84 emb = self.proj_head(feats)
85 logit_list.append(
86 F.interpolate(out,
87 paddle.shape(x)[2:],
88 mode='bilinear',
89 align_corners=self.align_corners))
90 logit_list.append({'seg': out, 'embed': emb})
91 else:
92 logit_list.append(
93 F.interpolate(out,
94 paddle.shape(x)[2:],
95 mode='bilinear',
96 align_corners=self.align_corners))
97 return logit_list
98
99
100 class ProjectionHead(nn.Layer):
101 """
102 The projection head used by contrast learning.
103 Args:
104 dim_in (int): The dimensions of input features.
105 proj_dim (int, optional): The output dimensions of projection head. Default: 256.
106 proj (str, optional): The type of projection head, only support 'linear' and 'convmlp'. Default: 'convmlp'.
107 """
108 def __init__(self, dim_in, proj_dim=256, proj='convmlp'):
109 super(ProjectionHead, self).__init__()
110 if proj == 'linear':
111 self.proj = nn.Conv2D(dim_in, proj_dim, kernel_size=1)
112 elif proj == 'convmlp':
113 self.proj = nn.Sequential(
114 layers.ConvBNReLU(dim_in, dim_in, kernel_size=1),
115 nn.Conv2D(dim_in, proj_dim, kernel_size=1),
116 )
117 else:
118 raise ValueError(
119 "The type of project head only support 'linear' and 'convmlp', but got {}."
120 .format(proj))
121
122 def forward(self, x):
123 return F.normalize(self.proj(x), p=2, axis=1)
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/paddleseg/models/hrnet_contrast.py b/paddleseg/models/hrnet_contrast.py
--- a/paddleseg/models/hrnet_contrast.py
+++ b/paddleseg/models/hrnet_contrast.py
@@ -40,6 +40,7 @@
e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False.
pretrained (str, optional): The path or url of pretrained model. Default: None.
"""
+
def __init__(self,
in_channels,
num_classes,
@@ -54,23 +55,23 @@
self.num_classes = num_classes
self.proj_dim = proj_dim
self.align_corners = align_corners
- self.pretrained = pretrained
self.cls_head = nn.Sequential(
- layers.ConvBNReLU(in_channels,
- in_channels,
- kernel_size=3,
- stride=1,
- padding=1),
+ layers.ConvBNReLU(
+ in_channels, in_channels, kernel_size=3, stride=1, padding=1),
nn.Dropout2D(drop_prob),
- nn.Conv2D(in_channels,
- num_classes,
- kernel_size=1,
- stride=1,
- bias_attr=False),
+ nn.Conv2D(
+ in_channels,
+ num_classes,
+ kernel_size=1,
+ stride=1,
+ bias_attr=False),
)
- self.proj_head = ProjectionHead(dim_in=in_channels,
- proj_dim=self.proj_dim)
+ self.proj_head = ProjectionHead(
+ dim_in=in_channels, proj_dim=self.proj_dim)
+
+ self.pretrained = pretrained
+ self.init_weight()
def init_weight(self):
if self.pretrained is not None:
@@ -83,17 +84,19 @@
if self.training:
emb = self.proj_head(feats)
logit_list.append(
- F.interpolate(out,
- paddle.shape(x)[2:],
- mode='bilinear',
- align_corners=self.align_corners))
+ F.interpolate(
+ out,
+ paddle.shape(x)[2:],
+ mode='bilinear',
+ align_corners=self.align_corners))
logit_list.append({'seg': out, 'embed': emb})
else:
logit_list.append(
- F.interpolate(out,
- paddle.shape(x)[2:],
- mode='bilinear',
- align_corners=self.align_corners))
+ F.interpolate(
+ out,
+ paddle.shape(x)[2:],
+ mode='bilinear',
+ align_corners=self.align_corners))
return logit_list
@@ -105,6 +108,7 @@
proj_dim (int, optional): The output dimensions of projection head. Default: 256.
proj (str, optional): The type of projection head, only support 'linear' and 'convmlp'. Default: 'convmlp'.
"""
+
def __init__(self, dim_in, proj_dim=256, proj='convmlp'):
super(ProjectionHead, self).__init__()
if proj == 'linear':
| {"golden_diff": "diff --git a/paddleseg/models/hrnet_contrast.py b/paddleseg/models/hrnet_contrast.py\n--- a/paddleseg/models/hrnet_contrast.py\n+++ b/paddleseg/models/hrnet_contrast.py\n@@ -40,6 +40,7 @@\n e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False.\n pretrained (str, optional): The path or url of pretrained model. Default: None.\n \"\"\"\n+\n def __init__(self,\n in_channels,\n num_classes,\n@@ -54,23 +55,23 @@\n self.num_classes = num_classes\n self.proj_dim = proj_dim\n self.align_corners = align_corners\n- self.pretrained = pretrained\n \n self.cls_head = nn.Sequential(\n- layers.ConvBNReLU(in_channels,\n- in_channels,\n- kernel_size=3,\n- stride=1,\n- padding=1),\n+ layers.ConvBNReLU(\n+ in_channels, in_channels, kernel_size=3, stride=1, padding=1),\n nn.Dropout2D(drop_prob),\n- nn.Conv2D(in_channels,\n- num_classes,\n- kernel_size=1,\n- stride=1,\n- bias_attr=False),\n+ nn.Conv2D(\n+ in_channels,\n+ num_classes,\n+ kernel_size=1,\n+ stride=1,\n+ bias_attr=False),\n )\n- self.proj_head = ProjectionHead(dim_in=in_channels,\n- proj_dim=self.proj_dim)\n+ self.proj_head = ProjectionHead(\n+ dim_in=in_channels, proj_dim=self.proj_dim)\n+\n+ self.pretrained = pretrained\n+ self.init_weight()\n \n def init_weight(self):\n if self.pretrained is not None:\n@@ -83,17 +84,19 @@\n if self.training:\n emb = self.proj_head(feats)\n logit_list.append(\n- F.interpolate(out,\n- paddle.shape(x)[2:],\n- mode='bilinear',\n- align_corners=self.align_corners))\n+ F.interpolate(\n+ out,\n+ paddle.shape(x)[2:],\n+ mode='bilinear',\n+ align_corners=self.align_corners))\n logit_list.append({'seg': out, 'embed': emb})\n else:\n logit_list.append(\n- F.interpolate(out,\n- paddle.shape(x)[2:],\n- mode='bilinear',\n- align_corners=self.align_corners))\n+ F.interpolate(\n+ out,\n+ paddle.shape(x)[2:],\n+ mode='bilinear',\n+ align_corners=self.align_corners))\n return logit_list\n \n \n@@ -105,6 +108,7 @@\n proj_dim (int, optional): The output dimensions of projection head. Default: 256.\n proj (str, optional): The type of projection head, only support 'linear' and 'convmlp'. Default: 'convmlp'.\n \"\"\"\n+\n def __init__(self, dim_in, proj_dim=256, proj='convmlp'):\n super(ProjectionHead, self).__init__()\n if proj == 'linear':\n", "issue": "paddleseg/models/hrnet_contrast.py \u4e2d\u6ca1\u6709\u6267\u884c init_weight\npaddleseg/models/hrnet_contrast.py \u4e2d__init__()\u6ca1\u6709\u6267\u884c init_weight\uff0c\u5bfc\u81f4hrnet_w48_contrast \u6ca1\u6cd5\u52a0\u8f7d\u5b8c\u6574\u7684\u6a21\u578b\n", "before_files": [{"content": "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\n\nfrom paddleseg.cvlibs import manager\nfrom paddleseg.models import layers\nfrom paddleseg.utils import utils\n\n\[email protected]_component\nclass HRNetW48Contrast(nn.Layer):\n \"\"\"\n The HRNetW48Contrast implementation based on PaddlePaddle.\n\n The original article refers to\n Wenguan Wang, Tianfei Zhou, et al. \"Exploring Cross-Image Pixel Contrast for Semantic Segmentation\"\n (https://arxiv.org/abs/2101.11939).\n\n Args:\n in_channels (int): The output dimensions of backbone.\n num_classes (int): The unique number of target classes.\n backbone (Paddle.nn.Layer): Backbone network, currently support HRNet_W48.\n drop_prob (float): The probability of dropout.\n proj_dim (int): The projection dimensions.\n align_corners (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even,\n e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False.\n pretrained (str, optional): The path or url of pretrained model. Default: None.\n \"\"\"\n def __init__(self,\n in_channels,\n num_classes,\n backbone,\n drop_prob,\n proj_dim,\n align_corners=False,\n pretrained=None):\n super().__init__()\n self.in_channels = in_channels\n self.backbone = backbone\n self.num_classes = num_classes\n self.proj_dim = proj_dim\n self.align_corners = align_corners\n self.pretrained = pretrained\n\n self.cls_head = nn.Sequential(\n layers.ConvBNReLU(in_channels,\n in_channels,\n kernel_size=3,\n stride=1,\n padding=1),\n nn.Dropout2D(drop_prob),\n nn.Conv2D(in_channels,\n num_classes,\n kernel_size=1,\n stride=1,\n bias_attr=False),\n )\n self.proj_head = ProjectionHead(dim_in=in_channels,\n proj_dim=self.proj_dim)\n\n def init_weight(self):\n if self.pretrained is not None:\n utils.load_entire_model(self, self.pretrained)\n\n def forward(self, x):\n feats = self.backbone(x)[0]\n out = self.cls_head(feats)\n logit_list = []\n if self.training:\n emb = self.proj_head(feats)\n logit_list.append(\n F.interpolate(out,\n paddle.shape(x)[2:],\n mode='bilinear',\n align_corners=self.align_corners))\n logit_list.append({'seg': out, 'embed': emb})\n else:\n logit_list.append(\n F.interpolate(out,\n paddle.shape(x)[2:],\n mode='bilinear',\n align_corners=self.align_corners))\n return logit_list\n\n\nclass ProjectionHead(nn.Layer):\n \"\"\"\n The projection head used by contrast learning.\n Args:\n dim_in (int): The dimensions of input features.\n proj_dim (int, optional): The output dimensions of projection head. Default: 256.\n proj (str, optional): The type of projection head, only support 'linear' and 'convmlp'. Default: 'convmlp'.\n \"\"\"\n def __init__(self, dim_in, proj_dim=256, proj='convmlp'):\n super(ProjectionHead, self).__init__()\n if proj == 'linear':\n self.proj = nn.Conv2D(dim_in, proj_dim, kernel_size=1)\n elif proj == 'convmlp':\n self.proj = nn.Sequential(\n layers.ConvBNReLU(dim_in, dim_in, kernel_size=1),\n nn.Conv2D(dim_in, proj_dim, kernel_size=1),\n )\n else:\n raise ValueError(\n \"The type of project head only support 'linear' and 'convmlp', but got {}.\"\n .format(proj))\n\n def forward(self, x):\n return F.normalize(self.proj(x), p=2, axis=1)\n", "path": "paddleseg/models/hrnet_contrast.py"}], "after_files": [{"content": "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\n\nfrom paddleseg.cvlibs import manager\nfrom paddleseg.models import layers\nfrom paddleseg.utils import utils\n\n\[email protected]_component\nclass HRNetW48Contrast(nn.Layer):\n \"\"\"\n The HRNetW48Contrast implementation based on PaddlePaddle.\n\n The original article refers to\n Wenguan Wang, Tianfei Zhou, et al. \"Exploring Cross-Image Pixel Contrast for Semantic Segmentation\"\n (https://arxiv.org/abs/2101.11939).\n\n Args:\n in_channels (int): The output dimensions of backbone.\n num_classes (int): The unique number of target classes.\n backbone (Paddle.nn.Layer): Backbone network, currently support HRNet_W48.\n drop_prob (float): The probability of dropout.\n proj_dim (int): The projection dimensions.\n align_corners (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even,\n e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False.\n pretrained (str, optional): The path or url of pretrained model. Default: None.\n \"\"\"\n\n def __init__(self,\n in_channels,\n num_classes,\n backbone,\n drop_prob,\n proj_dim,\n align_corners=False,\n pretrained=None):\n super().__init__()\n self.in_channels = in_channels\n self.backbone = backbone\n self.num_classes = num_classes\n self.proj_dim = proj_dim\n self.align_corners = align_corners\n\n self.cls_head = nn.Sequential(\n layers.ConvBNReLU(\n in_channels, in_channels, kernel_size=3, stride=1, padding=1),\n nn.Dropout2D(drop_prob),\n nn.Conv2D(\n in_channels,\n num_classes,\n kernel_size=1,\n stride=1,\n bias_attr=False),\n )\n self.proj_head = ProjectionHead(\n dim_in=in_channels, proj_dim=self.proj_dim)\n\n self.pretrained = pretrained\n self.init_weight()\n\n def init_weight(self):\n if self.pretrained is not None:\n utils.load_entire_model(self, self.pretrained)\n\n def forward(self, x):\n feats = self.backbone(x)[0]\n out = self.cls_head(feats)\n logit_list = []\n if self.training:\n emb = self.proj_head(feats)\n logit_list.append(\n F.interpolate(\n out,\n paddle.shape(x)[2:],\n mode='bilinear',\n align_corners=self.align_corners))\n logit_list.append({'seg': out, 'embed': emb})\n else:\n logit_list.append(\n F.interpolate(\n out,\n paddle.shape(x)[2:],\n mode='bilinear',\n align_corners=self.align_corners))\n return logit_list\n\n\nclass ProjectionHead(nn.Layer):\n \"\"\"\n The projection head used by contrast learning.\n Args:\n dim_in (int): The dimensions of input features.\n proj_dim (int, optional): The output dimensions of projection head. Default: 256.\n proj (str, optional): The type of projection head, only support 'linear' and 'convmlp'. Default: 'convmlp'.\n \"\"\"\n\n def __init__(self, dim_in, proj_dim=256, proj='convmlp'):\n super(ProjectionHead, self).__init__()\n if proj == 'linear':\n self.proj = nn.Conv2D(dim_in, proj_dim, kernel_size=1)\n elif proj == 'convmlp':\n self.proj = nn.Sequential(\n layers.ConvBNReLU(dim_in, dim_in, kernel_size=1),\n nn.Conv2D(dim_in, proj_dim, kernel_size=1),\n )\n else:\n raise ValueError(\n \"The type of project head only support 'linear' and 'convmlp', but got {}.\"\n .format(proj))\n\n def forward(self, x):\n return F.normalize(self.proj(x), p=2, axis=1)\n", "path": "paddleseg/models/hrnet_contrast.py"}]} | 1,602 | 701 |
gh_patches_debug_33490 | rasdani/github-patches | git_diff | apache__airflow-22536 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PostgresToGoogleCloudStorageOperator - Custom schema mapping
Version : 1.10.12
I used PostgresToGoogleCloudStorageOperator to export the data and the schema file as well. But I saw a column on Postgres was `TIMESTAMP without time zone` but in BigQuery the auto-create table (via `GoogleCloudStorageToBigQueryOperator`) used the JSON schema file and created the table. When I checked the BQ table the data type was `TIMESTAMP`.
For without timezone data, **`DATETIME`** would be the right choice. So can we manually MAP the data types during the schema file export?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `airflow/providers/google/cloud/transfers/postgres_to_gcs.py`
Content:
```
1 #
2 # Licensed to the Apache Software Foundation (ASF) under one
3 # or more contributor license agreements. See the NOTICE file
4 # distributed with this work for additional information
5 # regarding copyright ownership. The ASF licenses this file
6 # to you under the Apache License, Version 2.0 (the
7 # "License"); you may not use this file except in compliance
8 # with the License. You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing,
13 # software distributed under the License is distributed on an
14 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 # KIND, either express or implied. See the License for the
16 # specific language governing permissions and limitations
17 # under the License.
18 """PostgreSQL to GCS operator."""
19
20 import datetime
21 import json
22 import time
23 import uuid
24 from decimal import Decimal
25 from typing import Dict
26
27 import pendulum
28
29 from airflow.providers.google.cloud.transfers.sql_to_gcs import BaseSQLToGCSOperator
30 from airflow.providers.postgres.hooks.postgres import PostgresHook
31
32
33 class _PostgresServerSideCursorDecorator:
34 """
35 Inspired by `_PrestoToGCSPrestoCursorAdapter` to keep this consistent.
36
37 Decorator for allowing description to be available for postgres cursor in case server side
38 cursor is used. It doesn't provide other methods except those needed in BaseSQLToGCSOperator,
39 which is more of a safety feature.
40 """
41
42 def __init__(self, cursor):
43 self.cursor = cursor
44 self.rows = []
45 self.initialized = False
46
47 def __iter__(self):
48 return self
49
50 def __next__(self):
51 if self.rows:
52 return self.rows.pop()
53 else:
54 self.initialized = True
55 return next(self.cursor)
56
57 @property
58 def description(self):
59 """Fetch first row to initialize cursor description when using server side cursor."""
60 if not self.initialized:
61 element = self.cursor.fetchone()
62 if element is not None:
63 self.rows.append(element)
64 self.initialized = True
65 return self.cursor.description
66
67
68 class PostgresToGCSOperator(BaseSQLToGCSOperator):
69 """
70 Copy data from Postgres to Google Cloud Storage in JSON or CSV format.
71
72 :param postgres_conn_id: Reference to a specific Postgres hook.
73 :param use_server_side_cursor: If server-side cursor should be used for querying postgres.
74 For detailed info, check https://www.psycopg.org/docs/usage.html#server-side-cursors
75 :param cursor_itersize: How many records are fetched at a time in case of server-side cursor.
76 """
77
78 ui_color = '#a0e08c'
79
80 type_map = {
81 1114: 'TIMESTAMP',
82 1184: 'TIMESTAMP',
83 1082: 'TIMESTAMP',
84 1083: 'TIMESTAMP',
85 1005: 'INTEGER',
86 1007: 'INTEGER',
87 1016: 'INTEGER',
88 20: 'INTEGER',
89 21: 'INTEGER',
90 23: 'INTEGER',
91 16: 'BOOLEAN',
92 700: 'FLOAT',
93 701: 'FLOAT',
94 1700: 'FLOAT',
95 }
96
97 def __init__(
98 self,
99 *,
100 postgres_conn_id='postgres_default',
101 use_server_side_cursor=False,
102 cursor_itersize=2000,
103 **kwargs,
104 ):
105 super().__init__(**kwargs)
106 self.postgres_conn_id = postgres_conn_id
107 self.use_server_side_cursor = use_server_side_cursor
108 self.cursor_itersize = cursor_itersize
109
110 def _unique_name(self):
111 return f"{self.dag_id}__{self.task_id}__{uuid.uuid4()}" if self.use_server_side_cursor else None
112
113 def query(self):
114 """Queries Postgres and returns a cursor to the results."""
115 hook = PostgresHook(postgres_conn_id=self.postgres_conn_id)
116 conn = hook.get_conn()
117 cursor = conn.cursor(name=self._unique_name())
118 cursor.execute(self.sql, self.parameters)
119 if self.use_server_side_cursor:
120 cursor.itersize = self.cursor_itersize
121 return _PostgresServerSideCursorDecorator(cursor)
122 return cursor
123
124 def field_to_bigquery(self, field) -> Dict[str, str]:
125 return {
126 'name': field[0],
127 'type': self.type_map.get(field[1], "STRING"),
128 'mode': 'REPEATED' if field[1] in (1009, 1005, 1007, 1016) else 'NULLABLE',
129 }
130
131 def convert_type(self, value, schema_type):
132 """
133 Takes a value from Postgres, and converts it to a value that's safe for
134 JSON/Google Cloud Storage/BigQuery. Dates are converted to UTC seconds.
135 Decimals are converted to floats. Times are converted to seconds.
136 """
137 if isinstance(value, (datetime.datetime, datetime.date)):
138 return pendulum.parse(value.isoformat()).float_timestamp
139 if isinstance(value, datetime.time):
140 formatted_time = time.strptime(str(value), "%H:%M:%S")
141 return int(
142 datetime.timedelta(
143 hours=formatted_time.tm_hour, minutes=formatted_time.tm_min, seconds=formatted_time.tm_sec
144 ).total_seconds()
145 )
146 if isinstance(value, dict):
147 return json.dumps(value)
148 if isinstance(value, Decimal):
149 return float(value)
150 return value
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/airflow/providers/google/cloud/transfers/postgres_to_gcs.py b/airflow/providers/google/cloud/transfers/postgres_to_gcs.py
--- a/airflow/providers/google/cloud/transfers/postgres_to_gcs.py
+++ b/airflow/providers/google/cloud/transfers/postgres_to_gcs.py
@@ -78,10 +78,10 @@
ui_color = '#a0e08c'
type_map = {
- 1114: 'TIMESTAMP',
+ 1114: 'DATETIME',
1184: 'TIMESTAMP',
- 1082: 'TIMESTAMP',
- 1083: 'TIMESTAMP',
+ 1082: 'DATE',
+ 1083: 'TIME',
1005: 'INTEGER',
1007: 'INTEGER',
1016: 'INTEGER',
@@ -131,18 +131,24 @@
def convert_type(self, value, schema_type):
"""
Takes a value from Postgres, and converts it to a value that's safe for
- JSON/Google Cloud Storage/BigQuery. Dates are converted to UTC seconds.
- Decimals are converted to floats. Times are converted to seconds.
+ JSON/Google Cloud Storage/BigQuery.
+ Timezone aware Datetime are converted to UTC seconds.
+ Unaware Datetime, Date and Time are converted to ISO formatted strings.
+ Decimals are converted to floats.
"""
- if isinstance(value, (datetime.datetime, datetime.date)):
- return pendulum.parse(value.isoformat()).float_timestamp
+ if isinstance(value, datetime.datetime):
+ iso_format_value = value.isoformat()
+ if value.tzinfo is None:
+ return iso_format_value
+ return pendulum.parse(iso_format_value).float_timestamp
+ if isinstance(value, datetime.date):
+ return value.isoformat()
if isinstance(value, datetime.time):
formatted_time = time.strptime(str(value), "%H:%M:%S")
- return int(
- datetime.timedelta(
- hours=formatted_time.tm_hour, minutes=formatted_time.tm_min, seconds=formatted_time.tm_sec
- ).total_seconds()
+ time_delta = datetime.timedelta(
+ hours=formatted_time.tm_hour, minutes=formatted_time.tm_min, seconds=formatted_time.tm_sec
)
+ return str(time_delta)
if isinstance(value, dict):
return json.dumps(value)
if isinstance(value, Decimal):
| {"golden_diff": "diff --git a/airflow/providers/google/cloud/transfers/postgres_to_gcs.py b/airflow/providers/google/cloud/transfers/postgres_to_gcs.py\n--- a/airflow/providers/google/cloud/transfers/postgres_to_gcs.py\n+++ b/airflow/providers/google/cloud/transfers/postgres_to_gcs.py\n@@ -78,10 +78,10 @@\n ui_color = '#a0e08c'\n \n type_map = {\n- 1114: 'TIMESTAMP',\n+ 1114: 'DATETIME',\n 1184: 'TIMESTAMP',\n- 1082: 'TIMESTAMP',\n- 1083: 'TIMESTAMP',\n+ 1082: 'DATE',\n+ 1083: 'TIME',\n 1005: 'INTEGER',\n 1007: 'INTEGER',\n 1016: 'INTEGER',\n@@ -131,18 +131,24 @@\n def convert_type(self, value, schema_type):\n \"\"\"\n Takes a value from Postgres, and converts it to a value that's safe for\n- JSON/Google Cloud Storage/BigQuery. Dates are converted to UTC seconds.\n- Decimals are converted to floats. Times are converted to seconds.\n+ JSON/Google Cloud Storage/BigQuery.\n+ Timezone aware Datetime are converted to UTC seconds.\n+ Unaware Datetime, Date and Time are converted to ISO formatted strings.\n+ Decimals are converted to floats.\n \"\"\"\n- if isinstance(value, (datetime.datetime, datetime.date)):\n- return pendulum.parse(value.isoformat()).float_timestamp\n+ if isinstance(value, datetime.datetime):\n+ iso_format_value = value.isoformat()\n+ if value.tzinfo is None:\n+ return iso_format_value\n+ return pendulum.parse(iso_format_value).float_timestamp\n+ if isinstance(value, datetime.date):\n+ return value.isoformat()\n if isinstance(value, datetime.time):\n formatted_time = time.strptime(str(value), \"%H:%M:%S\")\n- return int(\n- datetime.timedelta(\n- hours=formatted_time.tm_hour, minutes=formatted_time.tm_min, seconds=formatted_time.tm_sec\n- ).total_seconds()\n+ time_delta = datetime.timedelta(\n+ hours=formatted_time.tm_hour, minutes=formatted_time.tm_min, seconds=formatted_time.tm_sec\n )\n+ return str(time_delta)\n if isinstance(value, dict):\n return json.dumps(value)\n if isinstance(value, Decimal):\n", "issue": "PostgresToGoogleCloudStorageOperator - Custom schema mapping\nVersion : 1.10.12\r\n\r\nI used PostgresToGoogleCloudStorageOperator to export the data and the schema file as well. But I saw a column on Postgres was `TIMESTAMP without time zone` but in BigQuery the auto-create table (via `GoogleCloudStorageToBigQueryOperator`) used the JSON schema file and created the table. When I checked the BQ table the data type was `TIMESTAMP`.\r\n\r\nFor without timezone data, **`DATETIME`** would be the right choice. So can we manually MAP the data types during the schema file export? \n", "before_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"PostgreSQL to GCS operator.\"\"\"\n\nimport datetime\nimport json\nimport time\nimport uuid\nfrom decimal import Decimal\nfrom typing import Dict\n\nimport pendulum\n\nfrom airflow.providers.google.cloud.transfers.sql_to_gcs import BaseSQLToGCSOperator\nfrom airflow.providers.postgres.hooks.postgres import PostgresHook\n\n\nclass _PostgresServerSideCursorDecorator:\n \"\"\"\n Inspired by `_PrestoToGCSPrestoCursorAdapter` to keep this consistent.\n\n Decorator for allowing description to be available for postgres cursor in case server side\n cursor is used. It doesn't provide other methods except those needed in BaseSQLToGCSOperator,\n which is more of a safety feature.\n \"\"\"\n\n def __init__(self, cursor):\n self.cursor = cursor\n self.rows = []\n self.initialized = False\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.rows:\n return self.rows.pop()\n else:\n self.initialized = True\n return next(self.cursor)\n\n @property\n def description(self):\n \"\"\"Fetch first row to initialize cursor description when using server side cursor.\"\"\"\n if not self.initialized:\n element = self.cursor.fetchone()\n if element is not None:\n self.rows.append(element)\n self.initialized = True\n return self.cursor.description\n\n\nclass PostgresToGCSOperator(BaseSQLToGCSOperator):\n \"\"\"\n Copy data from Postgres to Google Cloud Storage in JSON or CSV format.\n\n :param postgres_conn_id: Reference to a specific Postgres hook.\n :param use_server_side_cursor: If server-side cursor should be used for querying postgres.\n For detailed info, check https://www.psycopg.org/docs/usage.html#server-side-cursors\n :param cursor_itersize: How many records are fetched at a time in case of server-side cursor.\n \"\"\"\n\n ui_color = '#a0e08c'\n\n type_map = {\n 1114: 'TIMESTAMP',\n 1184: 'TIMESTAMP',\n 1082: 'TIMESTAMP',\n 1083: 'TIMESTAMP',\n 1005: 'INTEGER',\n 1007: 'INTEGER',\n 1016: 'INTEGER',\n 20: 'INTEGER',\n 21: 'INTEGER',\n 23: 'INTEGER',\n 16: 'BOOLEAN',\n 700: 'FLOAT',\n 701: 'FLOAT',\n 1700: 'FLOAT',\n }\n\n def __init__(\n self,\n *,\n postgres_conn_id='postgres_default',\n use_server_side_cursor=False,\n cursor_itersize=2000,\n **kwargs,\n ):\n super().__init__(**kwargs)\n self.postgres_conn_id = postgres_conn_id\n self.use_server_side_cursor = use_server_side_cursor\n self.cursor_itersize = cursor_itersize\n\n def _unique_name(self):\n return f\"{self.dag_id}__{self.task_id}__{uuid.uuid4()}\" if self.use_server_side_cursor else None\n\n def query(self):\n \"\"\"Queries Postgres and returns a cursor to the results.\"\"\"\n hook = PostgresHook(postgres_conn_id=self.postgres_conn_id)\n conn = hook.get_conn()\n cursor = conn.cursor(name=self._unique_name())\n cursor.execute(self.sql, self.parameters)\n if self.use_server_side_cursor:\n cursor.itersize = self.cursor_itersize\n return _PostgresServerSideCursorDecorator(cursor)\n return cursor\n\n def field_to_bigquery(self, field) -> Dict[str, str]:\n return {\n 'name': field[0],\n 'type': self.type_map.get(field[1], \"STRING\"),\n 'mode': 'REPEATED' if field[1] in (1009, 1005, 1007, 1016) else 'NULLABLE',\n }\n\n def convert_type(self, value, schema_type):\n \"\"\"\n Takes a value from Postgres, and converts it to a value that's safe for\n JSON/Google Cloud Storage/BigQuery. Dates are converted to UTC seconds.\n Decimals are converted to floats. Times are converted to seconds.\n \"\"\"\n if isinstance(value, (datetime.datetime, datetime.date)):\n return pendulum.parse(value.isoformat()).float_timestamp\n if isinstance(value, datetime.time):\n formatted_time = time.strptime(str(value), \"%H:%M:%S\")\n return int(\n datetime.timedelta(\n hours=formatted_time.tm_hour, minutes=formatted_time.tm_min, seconds=formatted_time.tm_sec\n ).total_seconds()\n )\n if isinstance(value, dict):\n return json.dumps(value)\n if isinstance(value, Decimal):\n return float(value)\n return value\n", "path": "airflow/providers/google/cloud/transfers/postgres_to_gcs.py"}], "after_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"PostgreSQL to GCS operator.\"\"\"\n\nimport datetime\nimport json\nimport time\nimport uuid\nfrom decimal import Decimal\nfrom typing import Dict\n\nimport pendulum\n\nfrom airflow.providers.google.cloud.transfers.sql_to_gcs import BaseSQLToGCSOperator\nfrom airflow.providers.postgres.hooks.postgres import PostgresHook\n\n\nclass _PostgresServerSideCursorDecorator:\n \"\"\"\n Inspired by `_PrestoToGCSPrestoCursorAdapter` to keep this consistent.\n\n Decorator for allowing description to be available for postgres cursor in case server side\n cursor is used. It doesn't provide other methods except those needed in BaseSQLToGCSOperator,\n which is more of a safety feature.\n \"\"\"\n\n def __init__(self, cursor):\n self.cursor = cursor\n self.rows = []\n self.initialized = False\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.rows:\n return self.rows.pop()\n else:\n self.initialized = True\n return next(self.cursor)\n\n @property\n def description(self):\n \"\"\"Fetch first row to initialize cursor description when using server side cursor.\"\"\"\n if not self.initialized:\n element = self.cursor.fetchone()\n if element is not None:\n self.rows.append(element)\n self.initialized = True\n return self.cursor.description\n\n\nclass PostgresToGCSOperator(BaseSQLToGCSOperator):\n \"\"\"\n Copy data from Postgres to Google Cloud Storage in JSON or CSV format.\n\n :param postgres_conn_id: Reference to a specific Postgres hook.\n :param use_server_side_cursor: If server-side cursor should be used for querying postgres.\n For detailed info, check https://www.psycopg.org/docs/usage.html#server-side-cursors\n :param cursor_itersize: How many records are fetched at a time in case of server-side cursor.\n \"\"\"\n\n ui_color = '#a0e08c'\n\n type_map = {\n 1114: 'DATETIME',\n 1184: 'TIMESTAMP',\n 1082: 'DATE',\n 1083: 'TIME',\n 1005: 'INTEGER',\n 1007: 'INTEGER',\n 1016: 'INTEGER',\n 20: 'INTEGER',\n 21: 'INTEGER',\n 23: 'INTEGER',\n 16: 'BOOLEAN',\n 700: 'FLOAT',\n 701: 'FLOAT',\n 1700: 'FLOAT',\n }\n\n def __init__(\n self,\n *,\n postgres_conn_id='postgres_default',\n use_server_side_cursor=False,\n cursor_itersize=2000,\n **kwargs,\n ):\n super().__init__(**kwargs)\n self.postgres_conn_id = postgres_conn_id\n self.use_server_side_cursor = use_server_side_cursor\n self.cursor_itersize = cursor_itersize\n\n def _unique_name(self):\n return f\"{self.dag_id}__{self.task_id}__{uuid.uuid4()}\" if self.use_server_side_cursor else None\n\n def query(self):\n \"\"\"Queries Postgres and returns a cursor to the results.\"\"\"\n hook = PostgresHook(postgres_conn_id=self.postgres_conn_id)\n conn = hook.get_conn()\n cursor = conn.cursor(name=self._unique_name())\n cursor.execute(self.sql, self.parameters)\n if self.use_server_side_cursor:\n cursor.itersize = self.cursor_itersize\n return _PostgresServerSideCursorDecorator(cursor)\n return cursor\n\n def field_to_bigquery(self, field) -> Dict[str, str]:\n return {\n 'name': field[0],\n 'type': self.type_map.get(field[1], \"STRING\"),\n 'mode': 'REPEATED' if field[1] in (1009, 1005, 1007, 1016) else 'NULLABLE',\n }\n\n def convert_type(self, value, schema_type):\n \"\"\"\n Takes a value from Postgres, and converts it to a value that's safe for\n JSON/Google Cloud Storage/BigQuery.\n Timezone aware Datetime are converted to UTC seconds.\n Unaware Datetime, Date and Time are converted to ISO formatted strings.\n Decimals are converted to floats.\n \"\"\"\n if isinstance(value, datetime.datetime):\n iso_format_value = value.isoformat()\n if value.tzinfo is None:\n return iso_format_value\n return pendulum.parse(iso_format_value).float_timestamp\n if isinstance(value, datetime.date):\n return value.isoformat()\n if isinstance(value, datetime.time):\n formatted_time = time.strptime(str(value), \"%H:%M:%S\")\n time_delta = datetime.timedelta(\n hours=formatted_time.tm_hour, minutes=formatted_time.tm_min, seconds=formatted_time.tm_sec\n )\n return str(time_delta)\n if isinstance(value, dict):\n return json.dumps(value)\n if isinstance(value, Decimal):\n return float(value)\n return value\n", "path": "airflow/providers/google/cloud/transfers/postgres_to_gcs.py"}]} | 1,974 | 559 |
gh_patches_debug_6236 | rasdani/github-patches | git_diff | engnadeau__pybotics-18 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update examples
Examples are now out of sync with current codebase. Potential use for iPython?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/example_robot.py`
Content:
```
1 import copy
2
3 import pybotics as py
4 import numpy as np
5
6 # set numpy print options
7 np.set_printoptions(precision=3)
8 np.set_printoptions(suppress=True)
9
10 # create robot
11 model = np.loadtxt('ur10-mdh.csv', delimiter=',')
12 robot = py.Robot(model)
13
14 print('Robot Model:\n{}\n'.format(robot.robot_model))
15
16 # demonstrate forward kinematics
17 joints = [0] * robot.num_dof()
18 pose = robot.fk(joints)
19
20 print('Pose:\n{}\n'.format(pose))
21
22 # demonstrate inverse kinematics
23 new_joints = robot.ik(pose)
24 print('Solved Joints:\n{}\n'.format(new_joints))
25
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/example_robot.py b/examples/example_robot.py
deleted file mode 100644
--- a/examples/example_robot.py
+++ /dev/null
@@ -1,24 +0,0 @@
-import copy
-
-import pybotics as py
-import numpy as np
-
-# set numpy print options
-np.set_printoptions(precision=3)
-np.set_printoptions(suppress=True)
-
-# create robot
-model = np.loadtxt('ur10-mdh.csv', delimiter=',')
-robot = py.Robot(model)
-
-print('Robot Model:\n{}\n'.format(robot.robot_model))
-
-# demonstrate forward kinematics
-joints = [0] * robot.num_dof()
-pose = robot.fk(joints)
-
-print('Pose:\n{}\n'.format(pose))
-
-# demonstrate inverse kinematics
-new_joints = robot.ik(pose)
-print('Solved Joints:\n{}\n'.format(new_joints))
| {"golden_diff": "diff --git a/examples/example_robot.py b/examples/example_robot.py\ndeleted file mode 100644\n--- a/examples/example_robot.py\n+++ /dev/null\n@@ -1,24 +0,0 @@\n-import copy\n-\n-import pybotics as py\n-import numpy as np\n-\n-# set numpy print options\n-np.set_printoptions(precision=3)\n-np.set_printoptions(suppress=True)\n-\n-# create robot\n-model = np.loadtxt('ur10-mdh.csv', delimiter=',')\n-robot = py.Robot(model)\n-\n-print('Robot Model:\\n{}\\n'.format(robot.robot_model))\n-\n-# demonstrate forward kinematics\n-joints = [0] * robot.num_dof()\n-pose = robot.fk(joints)\n-\n-print('Pose:\\n{}\\n'.format(pose))\n-\n-# demonstrate inverse kinematics\n-new_joints = robot.ik(pose)\n-print('Solved Joints:\\n{}\\n'.format(new_joints))\n", "issue": "Update examples\nExamples are now out of sync with current codebase. Potential use for iPython?\n", "before_files": [{"content": "import copy\n\nimport pybotics as py\nimport numpy as np\n\n# set numpy print options\nnp.set_printoptions(precision=3)\nnp.set_printoptions(suppress=True)\n\n# create robot\nmodel = np.loadtxt('ur10-mdh.csv', delimiter=',')\nrobot = py.Robot(model)\n\nprint('Robot Model:\\n{}\\n'.format(robot.robot_model))\n\n# demonstrate forward kinematics\njoints = [0] * robot.num_dof()\npose = robot.fk(joints)\n\nprint('Pose:\\n{}\\n'.format(pose))\n\n# demonstrate inverse kinematics\nnew_joints = robot.ik(pose)\nprint('Solved Joints:\\n{}\\n'.format(new_joints))\n", "path": "examples/example_robot.py"}], "after_files": [{"content": null, "path": "examples/example_robot.py"}]} | 474 | 212 |
gh_patches_debug_31739 | rasdani/github-patches | git_diff | streamlink__streamlink-1863 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove 9anime.to
As discussed over here: https://github.com/streamlink/streamlink/issues/1110#issuecomment-400687075 9anime.to isn't worth supporting at this point and is broken so I'm proposing we remove it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/nineanime.py`
Content:
```
1 import re
2 from streamlink.plugin import Plugin
3 from streamlink.plugin.api import http
4 from streamlink.plugin.api import useragents
5 from streamlink.plugin.api import validate
6 from streamlink.stream import HTTPStream
7 from streamlink.compat import urlparse
8
9
10 class NineAnime(Plugin):
11 _episode_info_url = "//9anime.to/ajax/episode/info"
12
13 _info_schema = validate.Schema({
14 "grabber": validate.url(),
15 "params": {
16 "id": validate.text,
17 "token": validate.text,
18 "options": validate.text,
19 }
20 })
21
22 _streams_schema = validate.Schema({
23 "token": validate.text,
24 "error": None,
25 "data": [{
26 "label": validate.text,
27 "file": validate.url(),
28 "type": "mp4"
29 }]
30 })
31
32 _url_re = re.compile(r"https?://9anime.to/watch/(?:[^.]+?\.)(\w+)/(\w+)")
33
34 @classmethod
35 def can_handle_url(cls, url):
36 return cls._url_re.match(url) is not None
37
38 def add_scheme(self, url):
39 # update the scheme for the grabber url if required
40 if url.startswith("//"):
41 url = "{0}:{1}".format(urlparse(self.url).scheme, url)
42 return url
43
44 @Plugin.broken(1110)
45 def _get_streams(self):
46 match = self._url_re.match(self.url)
47 film_id, episode_id = match.groups()
48
49 headers = {
50 "Referer": self.url,
51 "User-Agent": useragents.FIREFOX
52 }
53
54 # Get the info about the Episode, including the Grabber API URL
55 info_res = http.get(self.add_scheme(self._episode_info_url),
56 params=dict(update=0, film=film_id, id=episode_id),
57 headers=headers)
58 info = http.json(info_res, schema=self._info_schema)
59
60 # Get the data about the streams from the Grabber API
61 grabber_url = self.add_scheme(info["grabber"])
62 stream_list_res = http.get(grabber_url, params=info["params"], headers=headers)
63 stream_data = http.json(stream_list_res, schema=self._streams_schema)
64
65 for stream in stream_data["data"]:
66 yield stream["label"], HTTPStream(self.session, stream["file"])
67
68
69 __plugin__ = NineAnime
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/plugins/nineanime.py b/src/streamlink/plugins/nineanime.py
--- a/src/streamlink/plugins/nineanime.py
+++ b/src/streamlink/plugins/nineanime.py
@@ -1,69 +1 @@
-import re
-from streamlink.plugin import Plugin
-from streamlink.plugin.api import http
-from streamlink.plugin.api import useragents
-from streamlink.plugin.api import validate
-from streamlink.stream import HTTPStream
-from streamlink.compat import urlparse
-
-
-class NineAnime(Plugin):
- _episode_info_url = "//9anime.to/ajax/episode/info"
-
- _info_schema = validate.Schema({
- "grabber": validate.url(),
- "params": {
- "id": validate.text,
- "token": validate.text,
- "options": validate.text,
- }
- })
-
- _streams_schema = validate.Schema({
- "token": validate.text,
- "error": None,
- "data": [{
- "label": validate.text,
- "file": validate.url(),
- "type": "mp4"
- }]
- })
-
- _url_re = re.compile(r"https?://9anime.to/watch/(?:[^.]+?\.)(\w+)/(\w+)")
-
- @classmethod
- def can_handle_url(cls, url):
- return cls._url_re.match(url) is not None
-
- def add_scheme(self, url):
- # update the scheme for the grabber url if required
- if url.startswith("//"):
- url = "{0}:{1}".format(urlparse(self.url).scheme, url)
- return url
-
- @Plugin.broken(1110)
- def _get_streams(self):
- match = self._url_re.match(self.url)
- film_id, episode_id = match.groups()
-
- headers = {
- "Referer": self.url,
- "User-Agent": useragents.FIREFOX
- }
-
- # Get the info about the Episode, including the Grabber API URL
- info_res = http.get(self.add_scheme(self._episode_info_url),
- params=dict(update=0, film=film_id, id=episode_id),
- headers=headers)
- info = http.json(info_res, schema=self._info_schema)
-
- # Get the data about the streams from the Grabber API
- grabber_url = self.add_scheme(info["grabber"])
- stream_list_res = http.get(grabber_url, params=info["params"], headers=headers)
- stream_data = http.json(stream_list_res, schema=self._streams_schema)
-
- for stream in stream_data["data"]:
- yield stream["label"], HTTPStream(self.session, stream["file"])
-
-
-__plugin__ = NineAnime
+# Plugin removed - https://github.com/streamlink/streamlink/issues/1862
| {"golden_diff": "diff --git a/src/streamlink/plugins/nineanime.py b/src/streamlink/plugins/nineanime.py\n--- a/src/streamlink/plugins/nineanime.py\n+++ b/src/streamlink/plugins/nineanime.py\n@@ -1,69 +1 @@\n-import re\n-from streamlink.plugin import Plugin\n-from streamlink.plugin.api import http\n-from streamlink.plugin.api import useragents\n-from streamlink.plugin.api import validate\n-from streamlink.stream import HTTPStream\n-from streamlink.compat import urlparse\n-\n-\n-class NineAnime(Plugin):\n- _episode_info_url = \"//9anime.to/ajax/episode/info\"\n-\n- _info_schema = validate.Schema({\n- \"grabber\": validate.url(),\n- \"params\": {\n- \"id\": validate.text,\n- \"token\": validate.text,\n- \"options\": validate.text,\n- }\n- })\n-\n- _streams_schema = validate.Schema({\n- \"token\": validate.text,\n- \"error\": None,\n- \"data\": [{\n- \"label\": validate.text,\n- \"file\": validate.url(),\n- \"type\": \"mp4\"\n- }]\n- })\n-\n- _url_re = re.compile(r\"https?://9anime.to/watch/(?:[^.]+?\\.)(\\w+)/(\\w+)\")\n-\n- @classmethod\n- def can_handle_url(cls, url):\n- return cls._url_re.match(url) is not None\n-\n- def add_scheme(self, url):\n- # update the scheme for the grabber url if required\n- if url.startswith(\"//\"):\n- url = \"{0}:{1}\".format(urlparse(self.url).scheme, url)\n- return url\n-\n- @Plugin.broken(1110)\n- def _get_streams(self):\n- match = self._url_re.match(self.url)\n- film_id, episode_id = match.groups()\n-\n- headers = {\n- \"Referer\": self.url,\n- \"User-Agent\": useragents.FIREFOX\n- }\n-\n- # Get the info about the Episode, including the Grabber API URL\n- info_res = http.get(self.add_scheme(self._episode_info_url),\n- params=dict(update=0, film=film_id, id=episode_id),\n- headers=headers)\n- info = http.json(info_res, schema=self._info_schema)\n-\n- # Get the data about the streams from the Grabber API\n- grabber_url = self.add_scheme(info[\"grabber\"])\n- stream_list_res = http.get(grabber_url, params=info[\"params\"], headers=headers)\n- stream_data = http.json(stream_list_res, schema=self._streams_schema)\n-\n- for stream in stream_data[\"data\"]:\n- yield stream[\"label\"], HTTPStream(self.session, stream[\"file\"])\n-\n-\n-__plugin__ = NineAnime\n+# Plugin removed - https://github.com/streamlink/streamlink/issues/1862\n", "issue": "Remove 9anime.to\nAs discussed over here: https://github.com/streamlink/streamlink/issues/1110#issuecomment-400687075 9anime.to isn't worth supporting at this point and is broken so I'm proposing we remove it.\r\n\n", "before_files": [{"content": "import re\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http\nfrom streamlink.plugin.api import useragents\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HTTPStream\nfrom streamlink.compat import urlparse\n\n\nclass NineAnime(Plugin):\n _episode_info_url = \"//9anime.to/ajax/episode/info\"\n\n _info_schema = validate.Schema({\n \"grabber\": validate.url(),\n \"params\": {\n \"id\": validate.text,\n \"token\": validate.text,\n \"options\": validate.text,\n }\n })\n\n _streams_schema = validate.Schema({\n \"token\": validate.text,\n \"error\": None,\n \"data\": [{\n \"label\": validate.text,\n \"file\": validate.url(),\n \"type\": \"mp4\"\n }]\n })\n\n _url_re = re.compile(r\"https?://9anime.to/watch/(?:[^.]+?\\.)(\\w+)/(\\w+)\")\n\n @classmethod\n def can_handle_url(cls, url):\n return cls._url_re.match(url) is not None\n\n def add_scheme(self, url):\n # update the scheme for the grabber url if required\n if url.startswith(\"//\"):\n url = \"{0}:{1}\".format(urlparse(self.url).scheme, url)\n return url\n\n @Plugin.broken(1110)\n def _get_streams(self):\n match = self._url_re.match(self.url)\n film_id, episode_id = match.groups()\n\n headers = {\n \"Referer\": self.url,\n \"User-Agent\": useragents.FIREFOX\n }\n\n # Get the info about the Episode, including the Grabber API URL\n info_res = http.get(self.add_scheme(self._episode_info_url),\n params=dict(update=0, film=film_id, id=episode_id),\n headers=headers)\n info = http.json(info_res, schema=self._info_schema)\n\n # Get the data about the streams from the Grabber API\n grabber_url = self.add_scheme(info[\"grabber\"])\n stream_list_res = http.get(grabber_url, params=info[\"params\"], headers=headers)\n stream_data = http.json(stream_list_res, schema=self._streams_schema)\n\n for stream in stream_data[\"data\"]:\n yield stream[\"label\"], HTTPStream(self.session, stream[\"file\"])\n\n\n__plugin__ = NineAnime\n", "path": "src/streamlink/plugins/nineanime.py"}], "after_files": [{"content": "# Plugin removed - https://github.com/streamlink/streamlink/issues/1862\n", "path": "src/streamlink/plugins/nineanime.py"}]} | 968 | 632 |
gh_patches_debug_5172 | rasdani/github-patches | git_diff | yt-project__yt-4776 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BUG: modifications through matplotlib engine cannot be properly displayed
<!--To help us understand and resolve your issue, please fill out the form to
the best of your ability.-->
<!--You can feel free to delete the sections that do not apply.-->
### Bug report
**Bug summary**
The Modifications through Matplotlib engine cannot be properly displayed.
Taking the following code for example, the expected modifications can only be shown by the containing matplotlib figure object like `fig.savefig("sloshing.png")`.
**Code for reproduction**
adapted from [docs](https://yt-project.org/docs/dev/cookbook/simple_plots.html#accessing-and-modifying-plots-directly) (also broken there)
```python
import numpy as np
import yt
# Load the dataset.
ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
# Create a slice object
slc = yt.SlicePlot(ds, "x", ("gas", "density"), width=(800.0, "kpc"))
# Get a reference to the matplotlib axes object for the plot
ax = slc.plots[("gas", "density")].axes
# Let's adjust the x axis tick labels
for label in ax.xaxis.get_ticklabels():
label.set_color("red")
label.set_fontsize(16)
# Get a reference to the matplotlib figure object for the plot
fig = slc.plots[("gas", "density")].figure
# And create a mini-panel of a gaussian histogram inside the plot
rect = (0.2, 0.2, 0.2, 0.2)
new_ax = fig.add_axes(rect)
n, bins, patches = new_ax.hist(
np.random.randn(1000) + 20, 50, facecolor="black", edgecolor="black"
)
# Make sure its visible
new_ax.tick_params(colors="white")
# And label it
la = new_ax.set_xlabel("Dinosaurs per furlong")
la.set_color("white")
slc.save()
```
**Actual outcome**

**Expected outcome**
The changes of the x-axis tick labels

**Version Information**
* Operating System: MacOS 14.1.1 and Red Hat Enterprise Linux Server release 7.8 (Maipo)
* Python Version: 3.9
* yt version: 4.2.1 and 4.3.0
<!--Please tell us how you installed yt and python e.g., from source,
pip, conda. If you installed from conda, please specify which channel you used
if not the default-->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `doc/source/cookbook/simple_slice_matplotlib_example.py`
Content:
```
1 import numpy as np
2
3 import yt
4
5 # Load the dataset.
6 ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
7
8 # Create a slice object
9 slc = yt.SlicePlot(ds, "x", ("gas", "density"), width=(800.0, "kpc"))
10
11 # Get a reference to the matplotlib axes object for the plot
12 ax = slc.plots[("gas", "density")].axes
13
14 # Let's adjust the x axis tick labels
15 for label in ax.xaxis.get_ticklabels():
16 label.set_color("red")
17 label.set_fontsize(16)
18
19 # Get a reference to the matplotlib figure object for the plot
20 fig = slc.plots[("gas", "density")].figure
21
22 # And create a mini-panel of a gaussian histogram inside the plot
23 rect = (0.2, 0.2, 0.2, 0.2)
24 new_ax = fig.add_axes(rect)
25
26 n, bins, patches = new_ax.hist(
27 np.random.randn(1000) + 20, 50, facecolor="black", edgecolor="black"
28 )
29
30 # Make sure its visible
31 new_ax.tick_params(colors="white")
32
33 # And label it
34 la = new_ax.set_xlabel("Dinosaurs per furlong")
35 la.set_color("white")
36
37 slc.save()
38
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/doc/source/cookbook/simple_slice_matplotlib_example.py b/doc/source/cookbook/simple_slice_matplotlib_example.py
--- a/doc/source/cookbook/simple_slice_matplotlib_example.py
+++ b/doc/source/cookbook/simple_slice_matplotlib_example.py
@@ -8,6 +8,10 @@
# Create a slice object
slc = yt.SlicePlot(ds, "x", ("gas", "density"), width=(800.0, "kpc"))
+# Rendering should be performed explicitly *before* any modification is
+# performed directly with matplotlib.
+slc.render()
+
# Get a reference to the matplotlib axes object for the plot
ax = slc.plots[("gas", "density")].axes
| {"golden_diff": "diff --git a/doc/source/cookbook/simple_slice_matplotlib_example.py b/doc/source/cookbook/simple_slice_matplotlib_example.py\n--- a/doc/source/cookbook/simple_slice_matplotlib_example.py\n+++ b/doc/source/cookbook/simple_slice_matplotlib_example.py\n@@ -8,6 +8,10 @@\n # Create a slice object\n slc = yt.SlicePlot(ds, \"x\", (\"gas\", \"density\"), width=(800.0, \"kpc\"))\n \n+# Rendering should be performed explicitly *before* any modification is\n+# performed directly with matplotlib.\n+slc.render()\n+\n # Get a reference to the matplotlib axes object for the plot\n ax = slc.plots[(\"gas\", \"density\")].axes\n", "issue": "BUG: modifications through matplotlib engine cannot be properly displayed\n<!--To help us understand and resolve your issue, please fill out the form to\r\nthe best of your ability.-->\r\n<!--You can feel free to delete the sections that do not apply.-->\r\n\r\n### Bug report\r\n\r\n**Bug summary**\r\n\r\nThe Modifications through Matplotlib engine cannot be properly displayed. \r\n\r\nTaking the following code for example, the expected modifications can only be shown by the containing matplotlib figure object like `fig.savefig(\"sloshing.png\")`. \r\n\r\n**Code for reproduction**\r\n\r\nadapted from [docs](https://yt-project.org/docs/dev/cookbook/simple_plots.html#accessing-and-modifying-plots-directly) (also broken there)\r\n\r\n```python\r\nimport numpy as np\r\n\r\nimport yt\r\n\r\n# Load the dataset.\r\nds = yt.load(\"GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150\")\r\n\r\n# Create a slice object\r\nslc = yt.SlicePlot(ds, \"x\", (\"gas\", \"density\"), width=(800.0, \"kpc\"))\r\n\r\n# Get a reference to the matplotlib axes object for the plot\r\nax = slc.plots[(\"gas\", \"density\")].axes\r\n\r\n# Let's adjust the x axis tick labels\r\nfor label in ax.xaxis.get_ticklabels():\r\n label.set_color(\"red\")\r\n label.set_fontsize(16)\r\n\r\n# Get a reference to the matplotlib figure object for the plot\r\nfig = slc.plots[(\"gas\", \"density\")].figure\r\n\r\n# And create a mini-panel of a gaussian histogram inside the plot\r\nrect = (0.2, 0.2, 0.2, 0.2)\r\nnew_ax = fig.add_axes(rect)\r\n\r\nn, bins, patches = new_ax.hist(\r\n np.random.randn(1000) + 20, 50, facecolor=\"black\", edgecolor=\"black\"\r\n)\r\n\r\n# Make sure its visible\r\nnew_ax.tick_params(colors=\"white\")\r\n\r\n# And label it\r\nla = new_ax.set_xlabel(\"Dinosaurs per furlong\")\r\nla.set_color(\"white\")\r\n\r\nslc.save()\r\n```\r\n\r\n**Actual outcome**\r\n\r\n\r\n\r\n**Expected outcome**\r\n\r\nThe changes of the x-axis tick labels\r\n\r\n\r\n**Version Information**\r\n * Operating System: MacOS 14.1.1 and Red Hat Enterprise Linux Server release 7.8 (Maipo)\r\n * Python Version: 3.9\r\n * yt version: 4.2.1 and 4.3.0\r\n\r\n<!--Please tell us how you installed yt and python e.g., from source,\r\npip, conda. If you installed from conda, please specify which channel you used\r\nif not the default-->\r\n\n", "before_files": [{"content": "import numpy as np\n\nimport yt\n\n# Load the dataset.\nds = yt.load(\"GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150\")\n\n# Create a slice object\nslc = yt.SlicePlot(ds, \"x\", (\"gas\", \"density\"), width=(800.0, \"kpc\"))\n\n# Get a reference to the matplotlib axes object for the plot\nax = slc.plots[(\"gas\", \"density\")].axes\n\n# Let's adjust the x axis tick labels\nfor label in ax.xaxis.get_ticklabels():\n label.set_color(\"red\")\n label.set_fontsize(16)\n\n# Get a reference to the matplotlib figure object for the plot\nfig = slc.plots[(\"gas\", \"density\")].figure\n\n# And create a mini-panel of a gaussian histogram inside the plot\nrect = (0.2, 0.2, 0.2, 0.2)\nnew_ax = fig.add_axes(rect)\n\nn, bins, patches = new_ax.hist(\n np.random.randn(1000) + 20, 50, facecolor=\"black\", edgecolor=\"black\"\n)\n\n# Make sure its visible\nnew_ax.tick_params(colors=\"white\")\n\n# And label it\nla = new_ax.set_xlabel(\"Dinosaurs per furlong\")\nla.set_color(\"white\")\n\nslc.save()\n", "path": "doc/source/cookbook/simple_slice_matplotlib_example.py"}], "after_files": [{"content": "import numpy as np\n\nimport yt\n\n# Load the dataset.\nds = yt.load(\"GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150\")\n\n# Create a slice object\nslc = yt.SlicePlot(ds, \"x\", (\"gas\", \"density\"), width=(800.0, \"kpc\"))\n\n# Rendering should be performed explicitly *before* any modification is\n# performed directly with matplotlib.\nslc.render()\n\n# Get a reference to the matplotlib axes object for the plot\nax = slc.plots[(\"gas\", \"density\")].axes\n\n# Let's adjust the x axis tick labels\nfor label in ax.xaxis.get_ticklabels():\n label.set_color(\"red\")\n label.set_fontsize(16)\n\n# Get a reference to the matplotlib figure object for the plot\nfig = slc.plots[(\"gas\", \"density\")].figure\n\n# And create a mini-panel of a gaussian histogram inside the plot\nrect = (0.2, 0.2, 0.2, 0.2)\nnew_ax = fig.add_axes(rect)\n\nn, bins, patches = new_ax.hist(\n np.random.randn(1000) + 20, 50, facecolor=\"black\", edgecolor=\"black\"\n)\n\n# Make sure its visible\nnew_ax.tick_params(colors=\"white\")\n\n# And label it\nla = new_ax.set_xlabel(\"Dinosaurs per furlong\")\nla.set_color(\"white\")\n\nslc.save()\n", "path": "doc/source/cookbook/simple_slice_matplotlib_example.py"}]} | 1,360 | 157 |
gh_patches_debug_12157 | rasdani/github-patches | git_diff | pyro-ppl__pyro-198 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pytorch broadcasting
in various places in the codebase we have tensor ops like `expand_as()`. given the extended broadcasting functionality in the latest version of pytorch, some of these may be unnecessary and/or clunky. more generally, we should investigate and deal with any pytorch warnings that popped once once we switched pytorch versions. for example:
_UserWarning: other is not broadcastable to self, but they have the same number of elements. Falling back to deprecated pointwise behavior._
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/categorical_bmm.py`
Content:
```
1 import argparse
2
3 import numpy as np
4 import torch
5 import torchvision.datasets as dset
6 import torchvision.transforms as transforms
7 import visdom
8 from torch.autograd import Variable
9 from torch.nn import Softmax
10
11 import pyro
12 from pyro.distributions import Bernoulli, Categorical
13 from pyro.infer.kl_qp import KL_QP
14
15 mnist = dset.MNIST(
16 root='./data',
17 train=True,
18 transform=None,
19 target_transform=None,
20 download=True)
21 print('dataset loaded')
22
23 softmax = Softmax()
24
25 train_loader = torch.utils.data.DataLoader(
26 dset.MNIST('../data', train=True, download=True,
27 transform=transforms.Compose([
28 transforms.ToTensor(),
29 transforms.Normalize((0.1307,), (0.3081,))
30 ])),
31 batch_size=128, shuffle=True)
32 test_loader = torch.utils.data.DataLoader(
33 dset.MNIST('../data', train=False, transform=transforms.Compose([
34 transforms.ToTensor(),
35 transforms.Normalize((0.1307,), (0.3081,))
36 ])),
37 batch_size=128, shuffle=True)
38
39
40 def local_model(i, datum):
41 beta = Variable(torch.ones(1, 10)) * 0.1
42 cll = pyro.sample("class_of_datum_" + str(i), Categorical(beta))
43 mean_param = Variable(torch.zeros(1, 784), requires_grad=True)
44 # do MLE for class means
45 mu = pyro.param("mean_of_class_" + str(cll[0]), mean_param)
46 mu_param = softmax(mu)
47 pyro.observe("obs_" + str(i), Bernoulli(mu_param), datum)
48 return cll
49
50
51 def local_guide(i, datum):
52 alpha = torch.ones(1, 10) * 0.1
53 beta_q = Variable(alpha, requires_grad=True)
54 beta_param = pyro.param("class_posterior_", beta_q)
55 guide_params = softmax(beta_param)
56 cll = pyro.sample("class_of_datum_" + str(i), Categorical(guide_params))
57 return cll
58
59
60 def inspect_posterior_samples(i):
61 cll = local_guide(i, None)
62 mean_param = Variable(torch.zeros(1, 784), requires_grad=True)
63 # do MLE for class means
64 mu = pyro.param("mean_of_class_" + str(cll[0]), mean_param)
65 dat = pyro.sample("obs_" + str(i), Bernoulli(mu))
66 return dat
67
68
69 optim_fct = pyro.optim(torch.optim.Adam, {'lr': .0001})
70
71 inference = KL_QP(local_model, local_guide, optim_fct)
72
73 vis = visdom.Visdom()
74
75 nr_epochs = 50
76 # apply it to minibatches of data by hand:
77
78 mnist_data = Variable(train_loader.dataset.train_data.float() / 255.)
79 mnist_labels = Variable(train_loader.dataset.train_labels)
80 mnist_size = mnist_data.size(0)
81 batch_size = 1 # 64
82
83 all_batches = np.arange(0, mnist_size, batch_size)
84
85 if all_batches[-1] != mnist_size:
86 all_batches = list(all_batches) + [mnist_size]
87
88
89 def main():
90 parser = argparse.ArgumentParser(description="parse args")
91 parser.add_argument('-n', '--num-epochs', nargs='?', default=1000, type=int)
92 args = parser.parse_args()
93 for i in range(args.num_epochs):
94 epoch_loss = 0.
95 for ix, batch_start in enumerate(all_batches[:-1]):
96 batch_end = all_batches[ix + 1]
97 batch_data = mnist_data[batch_start:batch_end]
98 bs_size = batch_data.size(0)
99 batch_class_raw = mnist_labels[batch_start:batch_end]
100 batch_class = torch.zeros(bs_size, 10) # maybe it needs a FloatTensor
101 batch_class.scatter_(1, batch_class_raw.data.view(-1, 1), 1)
102 batch_class = Variable(batch_class)
103 epoch_loss += inference.step(ix, batch_data)
104
105 # optional visualization!
106 # vis.image(batch_data[0].view(28, 28).data.numpy())
107 # vis.image(sample[0].view(28, 28).data.numpy())
108 # vis.image(sample_mu[0].view(28, 28).data.numpy())
109 print("epoch avg loss {}".format(epoch_loss / float(mnist_size)))
110
111
112 if __name__ == '__main__':
113 main()
114
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/categorical_bmm.py b/examples/categorical_bmm.py
--- a/examples/categorical_bmm.py
+++ b/examples/categorical_bmm.py
@@ -12,6 +12,7 @@
from pyro.distributions import Bernoulli, Categorical
from pyro.infer.kl_qp import KL_QP
+
mnist = dset.MNIST(
root='./data',
train=True,
@@ -44,7 +45,7 @@
# do MLE for class means
mu = pyro.param("mean_of_class_" + str(cll[0]), mean_param)
mu_param = softmax(mu)
- pyro.observe("obs_" + str(i), Bernoulli(mu_param), datum)
+ pyro.observe("obs_" + str(i), Bernoulli(mu_param), datum.view(1, -1))
return cll
| {"golden_diff": "diff --git a/examples/categorical_bmm.py b/examples/categorical_bmm.py\n--- a/examples/categorical_bmm.py\n+++ b/examples/categorical_bmm.py\n@@ -12,6 +12,7 @@\n from pyro.distributions import Bernoulli, Categorical\n from pyro.infer.kl_qp import KL_QP\n \n+\n mnist = dset.MNIST(\n root='./data',\n train=True,\n@@ -44,7 +45,7 @@\n # do MLE for class means\n mu = pyro.param(\"mean_of_class_\" + str(cll[0]), mean_param)\n mu_param = softmax(mu)\n- pyro.observe(\"obs_\" + str(i), Bernoulli(mu_param), datum)\n+ pyro.observe(\"obs_\" + str(i), Bernoulli(mu_param), datum.view(1, -1))\n return cll\n", "issue": "pytorch broadcasting\nin various places in the codebase we have tensor ops like `expand_as()`. given the extended broadcasting functionality in the latest version of pytorch, some of these may be unnecessary and/or clunky. more generally, we should investigate and deal with any pytorch warnings that popped once once we switched pytorch versions. for example: \r\n\r\n_UserWarning: other is not broadcastable to self, but they have the same number of elements. Falling back to deprecated pointwise behavior._\n", "before_files": [{"content": "import argparse\n\nimport numpy as np\nimport torch\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nimport visdom\nfrom torch.autograd import Variable\nfrom torch.nn import Softmax\n\nimport pyro\nfrom pyro.distributions import Bernoulli, Categorical\nfrom pyro.infer.kl_qp import KL_QP\n\nmnist = dset.MNIST(\n root='./data',\n train=True,\n transform=None,\n target_transform=None,\n download=True)\nprint('dataset loaded')\n\nsoftmax = Softmax()\n\ntrain_loader = torch.utils.data.DataLoader(\n dset.MNIST('../data', train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=128, shuffle=True)\ntest_loader = torch.utils.data.DataLoader(\n dset.MNIST('../data', train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=128, shuffle=True)\n\n\ndef local_model(i, datum):\n beta = Variable(torch.ones(1, 10)) * 0.1\n cll = pyro.sample(\"class_of_datum_\" + str(i), Categorical(beta))\n mean_param = Variable(torch.zeros(1, 784), requires_grad=True)\n # do MLE for class means\n mu = pyro.param(\"mean_of_class_\" + str(cll[0]), mean_param)\n mu_param = softmax(mu)\n pyro.observe(\"obs_\" + str(i), Bernoulli(mu_param), datum)\n return cll\n\n\ndef local_guide(i, datum):\n alpha = torch.ones(1, 10) * 0.1\n beta_q = Variable(alpha, requires_grad=True)\n beta_param = pyro.param(\"class_posterior_\", beta_q)\n guide_params = softmax(beta_param)\n cll = pyro.sample(\"class_of_datum_\" + str(i), Categorical(guide_params))\n return cll\n\n\ndef inspect_posterior_samples(i):\n cll = local_guide(i, None)\n mean_param = Variable(torch.zeros(1, 784), requires_grad=True)\n # do MLE for class means\n mu = pyro.param(\"mean_of_class_\" + str(cll[0]), mean_param)\n dat = pyro.sample(\"obs_\" + str(i), Bernoulli(mu))\n return dat\n\n\noptim_fct = pyro.optim(torch.optim.Adam, {'lr': .0001})\n\ninference = KL_QP(local_model, local_guide, optim_fct)\n\nvis = visdom.Visdom()\n\nnr_epochs = 50\n# apply it to minibatches of data by hand:\n\nmnist_data = Variable(train_loader.dataset.train_data.float() / 255.)\nmnist_labels = Variable(train_loader.dataset.train_labels)\nmnist_size = mnist_data.size(0)\nbatch_size = 1 # 64\n\nall_batches = np.arange(0, mnist_size, batch_size)\n\nif all_batches[-1] != mnist_size:\n all_batches = list(all_batches) + [mnist_size]\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"parse args\")\n parser.add_argument('-n', '--num-epochs', nargs='?', default=1000, type=int)\n args = parser.parse_args()\n for i in range(args.num_epochs):\n epoch_loss = 0.\n for ix, batch_start in enumerate(all_batches[:-1]):\n batch_end = all_batches[ix + 1]\n batch_data = mnist_data[batch_start:batch_end]\n bs_size = batch_data.size(0)\n batch_class_raw = mnist_labels[batch_start:batch_end]\n batch_class = torch.zeros(bs_size, 10) # maybe it needs a FloatTensor\n batch_class.scatter_(1, batch_class_raw.data.view(-1, 1), 1)\n batch_class = Variable(batch_class)\n epoch_loss += inference.step(ix, batch_data)\n\n # optional visualization!\n # vis.image(batch_data[0].view(28, 28).data.numpy())\n # vis.image(sample[0].view(28, 28).data.numpy())\n # vis.image(sample_mu[0].view(28, 28).data.numpy())\n print(\"epoch avg loss {}\".format(epoch_loss / float(mnist_size)))\n\n\nif __name__ == '__main__':\n main()\n", "path": "examples/categorical_bmm.py"}], "after_files": [{"content": "import argparse\n\nimport numpy as np\nimport torch\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nimport visdom\nfrom torch.autograd import Variable\nfrom torch.nn import Softmax\n\nimport pyro\nfrom pyro.distributions import Bernoulli, Categorical\nfrom pyro.infer.kl_qp import KL_QP\n\n\nmnist = dset.MNIST(\n root='./data',\n train=True,\n transform=None,\n target_transform=None,\n download=True)\nprint('dataset loaded')\n\nsoftmax = Softmax()\n\ntrain_loader = torch.utils.data.DataLoader(\n dset.MNIST('../data', train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=128, shuffle=True)\ntest_loader = torch.utils.data.DataLoader(\n dset.MNIST('../data', train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=128, shuffle=True)\n\n\ndef local_model(i, datum):\n beta = Variable(torch.ones(1, 10)) * 0.1\n cll = pyro.sample(\"class_of_datum_\" + str(i), Categorical(beta))\n mean_param = Variable(torch.zeros(1, 784), requires_grad=True)\n # do MLE for class means\n mu = pyro.param(\"mean_of_class_\" + str(cll[0]), mean_param)\n mu_param = softmax(mu)\n pyro.observe(\"obs_\" + str(i), Bernoulli(mu_param), datum.view(1, -1))\n return cll\n\n\ndef local_guide(i, datum):\n alpha = torch.ones(1, 10) * 0.1\n beta_q = Variable(alpha, requires_grad=True)\n beta_param = pyro.param(\"class_posterior_\", beta_q)\n guide_params = softmax(beta_param)\n cll = pyro.sample(\"class_of_datum_\" + str(i), Categorical(guide_params))\n return cll\n\n\ndef inspect_posterior_samples(i):\n cll = local_guide(i, None)\n mean_param = Variable(torch.zeros(1, 784), requires_grad=True)\n # do MLE for class means\n mu = pyro.param(\"mean_of_class_\" + str(cll[0]), mean_param)\n dat = pyro.sample(\"obs_\" + str(i), Bernoulli(mu))\n return dat\n\n\noptim_fct = pyro.optim(torch.optim.Adam, {'lr': .0001})\n\ninference = KL_QP(local_model, local_guide, optim_fct)\n\nvis = visdom.Visdom()\n\nnr_epochs = 50\n# apply it to minibatches of data by hand:\n\nmnist_data = Variable(train_loader.dataset.train_data.float() / 255.)\nmnist_labels = Variable(train_loader.dataset.train_labels)\nmnist_size = mnist_data.size(0)\nbatch_size = 1 # 64\n\nall_batches = np.arange(0, mnist_size, batch_size)\n\nif all_batches[-1] != mnist_size:\n all_batches = list(all_batches) + [mnist_size]\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"parse args\")\n parser.add_argument('-n', '--num-epochs', nargs='?', default=1000, type=int)\n args = parser.parse_args()\n for i in range(args.num_epochs):\n epoch_loss = 0.\n for ix, batch_start in enumerate(all_batches[:-1]):\n batch_end = all_batches[ix + 1]\n batch_data = mnist_data[batch_start:batch_end]\n bs_size = batch_data.size(0)\n batch_class_raw = mnist_labels[batch_start:batch_end]\n batch_class = torch.zeros(bs_size, 10) # maybe it needs a FloatTensor\n batch_class.scatter_(1, batch_class_raw.data.view(-1, 1), 1)\n batch_class = Variable(batch_class)\n epoch_loss += inference.step(ix, batch_data)\n\n # optional visualization!\n # vis.image(batch_data[0].view(28, 28).data.numpy())\n # vis.image(sample[0].view(28, 28).data.numpy())\n # vis.image(sample_mu[0].view(28, 28).data.numpy())\n print(\"epoch avg loss {}\".format(epoch_loss / float(mnist_size)))\n\n\nif __name__ == '__main__':\n main()\n", "path": "examples/categorical_bmm.py"}]} | 1,593 | 192 |
gh_patches_debug_22576 | rasdani/github-patches | git_diff | google__mobly-799 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Mobly Release 1.11.1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2016 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import platform
16 import setuptools
17 from setuptools.command import test
18 import sys
19
20 install_requires = [
21 'portpicker', 'pyserial', 'pyyaml', 'timeout_decorator', 'typing_extensions'
22 ]
23
24 if platform.system() == 'Windows':
25 install_requires.append('pywin32')
26
27
28 class PyTest(test.test):
29 """Class used to execute unit tests using PyTest. This allows us to execute
30 unit tests without having to install the package.
31 """
32
33 def finalize_options(self):
34 test.test.finalize_options(self)
35 self.test_args = ['-x', "tests/mobly"]
36 self.test_suite = True
37
38 def run_tests(self):
39 import pytest
40 errno = pytest.main(self.test_args)
41 sys.exit(errno)
42
43
44 def main():
45 setuptools.setup(
46 name='mobly',
47 version='1.11',
48 maintainer='Ang Li',
49 maintainer_email='[email protected]',
50 description='Automation framework for special end-to-end test cases',
51 license='Apache2.0',
52 url='https://github.com/google/mobly',
53 download_url='https://github.com/google/mobly/tarball/1.11',
54 packages=setuptools.find_packages(exclude=['tests']),
55 include_package_data=False,
56 scripts=['tools/sl4a_shell.py', 'tools/snippet_shell.py'],
57 tests_require=[
58 'mock',
59 'pytest',
60 'pytz',
61 ],
62 install_requires=install_requires,
63 cmdclass={'test': PyTest},
64 )
65
66
67 if __name__ == '__main__':
68 main()
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -18,7 +18,7 @@
import sys
install_requires = [
- 'portpicker', 'pyserial', 'pyyaml', 'timeout_decorator', 'typing_extensions'
+ 'portpicker', 'pyserial', 'pyyaml', 'timeout_decorator', 'typing_extensions>=4.1.1'
]
if platform.system() == 'Windows':
@@ -44,13 +44,13 @@
def main():
setuptools.setup(
name='mobly',
- version='1.11',
+ version='1.11.1',
maintainer='Ang Li',
maintainer_email='[email protected]',
description='Automation framework for special end-to-end test cases',
license='Apache2.0',
url='https://github.com/google/mobly',
- download_url='https://github.com/google/mobly/tarball/1.11',
+ download_url='https://github.com/google/mobly/tarball/1.11.1',
packages=setuptools.find_packages(exclude=['tests']),
include_package_data=False,
scripts=['tools/sl4a_shell.py', 'tools/snippet_shell.py'],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -18,7 +18,7 @@\n import sys\n \n install_requires = [\n- 'portpicker', 'pyserial', 'pyyaml', 'timeout_decorator', 'typing_extensions'\n+ 'portpicker', 'pyserial', 'pyyaml', 'timeout_decorator', 'typing_extensions>=4.1.1'\n ]\n \n if platform.system() == 'Windows':\n@@ -44,13 +44,13 @@\n def main():\n setuptools.setup(\n name='mobly',\n- version='1.11',\n+ version='1.11.1',\n maintainer='Ang Li',\n maintainer_email='[email protected]',\n description='Automation framework for special end-to-end test cases',\n license='Apache2.0',\n url='https://github.com/google/mobly',\n- download_url='https://github.com/google/mobly/tarball/1.11',\n+ download_url='https://github.com/google/mobly/tarball/1.11.1',\n packages=setuptools.find_packages(exclude=['tests']),\n include_package_data=False,\n scripts=['tools/sl4a_shell.py', 'tools/snippet_shell.py'],\n", "issue": "Mobly Release 1.11.1\n\n", "before_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport platform\nimport setuptools\nfrom setuptools.command import test\nimport sys\n\ninstall_requires = [\n 'portpicker', 'pyserial', 'pyyaml', 'timeout_decorator', 'typing_extensions'\n]\n\nif platform.system() == 'Windows':\n install_requires.append('pywin32')\n\n\nclass PyTest(test.test):\n \"\"\"Class used to execute unit tests using PyTest. This allows us to execute\n unit tests without having to install the package.\n \"\"\"\n\n def finalize_options(self):\n test.test.finalize_options(self)\n self.test_args = ['-x', \"tests/mobly\"]\n self.test_suite = True\n\n def run_tests(self):\n import pytest\n errno = pytest.main(self.test_args)\n sys.exit(errno)\n\n\ndef main():\n setuptools.setup(\n name='mobly',\n version='1.11',\n maintainer='Ang Li',\n maintainer_email='[email protected]',\n description='Automation framework for special end-to-end test cases',\n license='Apache2.0',\n url='https://github.com/google/mobly',\n download_url='https://github.com/google/mobly/tarball/1.11',\n packages=setuptools.find_packages(exclude=['tests']),\n include_package_data=False,\n scripts=['tools/sl4a_shell.py', 'tools/snippet_shell.py'],\n tests_require=[\n 'mock',\n 'pytest',\n 'pytz',\n ],\n install_requires=install_requires,\n cmdclass={'test': PyTest},\n )\n\n\nif __name__ == '__main__':\n main()\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport platform\nimport setuptools\nfrom setuptools.command import test\nimport sys\n\ninstall_requires = [\n 'portpicker', 'pyserial', 'pyyaml', 'timeout_decorator', 'typing_extensions>=4.1.1'\n]\n\nif platform.system() == 'Windows':\n install_requires.append('pywin32')\n\n\nclass PyTest(test.test):\n \"\"\"Class used to execute unit tests using PyTest. This allows us to execute\n unit tests without having to install the package.\n \"\"\"\n\n def finalize_options(self):\n test.test.finalize_options(self)\n self.test_args = ['-x', \"tests/mobly\"]\n self.test_suite = True\n\n def run_tests(self):\n import pytest\n errno = pytest.main(self.test_args)\n sys.exit(errno)\n\n\ndef main():\n setuptools.setup(\n name='mobly',\n version='1.11.1',\n maintainer='Ang Li',\n maintainer_email='[email protected]',\n description='Automation framework for special end-to-end test cases',\n license='Apache2.0',\n url='https://github.com/google/mobly',\n download_url='https://github.com/google/mobly/tarball/1.11.1',\n packages=setuptools.find_packages(exclude=['tests']),\n include_package_data=False,\n scripts=['tools/sl4a_shell.py', 'tools/snippet_shell.py'],\n tests_require=[\n 'mock',\n 'pytest',\n 'pytz',\n ],\n install_requires=install_requires,\n cmdclass={'test': PyTest},\n )\n\n\nif __name__ == '__main__':\n main()\n", "path": "setup.py"}]} | 870 | 281 |
gh_patches_debug_22271 | rasdani/github-patches | git_diff | pydantic__pydantic-299 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Сreating a child model causes a RecursionError exception
<!-- Questions, Feature Requests, and Bug Reports are all welcome -->
<!-- delete as applicable: -->
# Bug
* OS: **Ubuntu 14.04**
* Python version `import sys; print(sys.version)`: **3.6.7**
* Pydantic version `import pydantic; print(pydantic.VERSION)`: **0.14**
I'm expecting, that I can use a classic inheritance for dataclass models:
```py
import pydantic.dataclasses
@pydantic.dataclasses.dataclass
class A:
a: str = None
@pydantic.dataclasses.dataclass
class B(A):
b: str = None
B(a='a', b='b')
```
But as a result I'm receiving this:
```
Traceback (most recent call last):
File "/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2881, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-9-10a7116ca691>", line 12, in <module>
B(a='a', b='b')
File "<string>", line 4, in __init__
File "/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/dataclasses.py", line 13, in post_init
self.__post_init_original__()
File "/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/dataclasses.py", line 13, in post_init
self.__post_init_original__()
File "/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/dataclasses.py", line 13, in post_init
self.__post_init_original__()
[Previous line repeated 952 more times]
File "/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/dataclasses.py", line 9, in post_init
d = validate_model(self.__pydantic_model__, self.__dict__)
File "/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/main.py", line 484, in validate_model
v_, errors_ = field.validate(value, values, loc=field.alias, cls=model.__class__)
File "/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/fields.py", line 303, in validate
v, errors = self._validate_singleton(v, values, loc, cls)
File "/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/fields.py", line 406, in _validate_singleton
return self._apply_validators(v, values, loc, cls, self.validators)
File "/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/fields.py", line 412, in _apply_validators
v = validator(v)
File "/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/validators.py", line 23, in str_validator
if isinstance(v, (str, NoneType)):
RecursionError: maximum recursion depth exceeded in __instancecheck__
```
This line below causes this problem:
https://github.com/samuelcolvin/pydantic/blob/master/pydantic/dataclasses.py#L13
UPD: defining `__post_init__` in child model fixes the problem, but this is workaround.
```py
import pydantic.dataclasses
@pydantic.dataclasses.dataclass
class A:
a: str = None
@pydantic.dataclasses.dataclass
class B(A):
b: str = None
def __post_init__():
pass
B(a='a', b='b')
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pydantic/dataclasses.py`
Content:
```
1 import dataclasses
2
3 from pydantic import ValidationError
4
5 from .main import create_model, validate_model
6
7
8 def post_init(self):
9 d = validate_model(self.__pydantic_model__, self.__dict__)
10 object.__setattr__(self, '__dict__', d)
11 object.__setattr__(self, '__initialised__', True)
12 if self.__post_init_original__:
13 self.__post_init_original__()
14
15
16 def setattr_validate_assignment(self, name, value):
17 if self.__initialised__:
18 d = dict(self.__dict__)
19 d.pop(name)
20 value, error_ = self.__pydantic_model__.__fields__[name].validate(value, d, loc=name)
21 if error_:
22 raise ValidationError([error_])
23
24 object.__setattr__(self, name, value)
25
26
27 def _process_class(_cls, init, repr, eq, order, unsafe_hash, frozen, validate_assignment):
28 post_init_original = getattr(_cls, '__post_init__', None)
29 _cls.__post_init__ = post_init
30 cls = dataclasses._process_class(_cls, init, repr, eq, order, unsafe_hash, frozen)
31
32 fields = {name: (field.type, field.default) for name, field in cls.__dataclass_fields__.items()}
33 cls.__post_init_original__ = post_init_original
34 cls.__pydantic_model__ = create_model(cls.__name__, **fields)
35 cls.__initialised__ = False
36
37 if validate_assignment and not frozen:
38 cls.__setattr__ = setattr_validate_assignment
39 return cls
40
41
42 def dataclass(
43 _cls=None, *, init=True, repr=True, eq=True, order=False, unsafe_hash=False, frozen=False, validate_assignment=False
44 ):
45 """
46 Like the python standard lib dataclasses but with type validation.
47
48 Arguments are the same as for standard dataclasses, except for validate_assignment which has the same meaning
49 as Config.validate_assignment.
50 """
51
52 def wrap(cls):
53 return _process_class(cls, init, repr, eq, order, unsafe_hash, frozen, validate_assignment)
54
55 if _cls is None:
56 return wrap
57
58 return wrap(_cls)
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pydantic/dataclasses.py b/pydantic/dataclasses.py
--- a/pydantic/dataclasses.py
+++ b/pydantic/dataclasses.py
@@ -5,7 +5,7 @@
from .main import create_model, validate_model
-def post_init(self):
+def _pydantic_post_init(self):
d = validate_model(self.__pydantic_model__, self.__dict__)
object.__setattr__(self, '__dict__', d)
object.__setattr__(self, '__initialised__', True)
@@ -26,7 +26,9 @@
def _process_class(_cls, init, repr, eq, order, unsafe_hash, frozen, validate_assignment):
post_init_original = getattr(_cls, '__post_init__', None)
- _cls.__post_init__ = post_init
+ if post_init_original and post_init_original.__name__ == '_pydantic_post_init':
+ post_init_original = None
+ _cls.__post_init__ = _pydantic_post_init
cls = dataclasses._process_class(_cls, init, repr, eq, order, unsafe_hash, frozen)
fields = {name: (field.type, field.default) for name, field in cls.__dataclass_fields__.items()}
| {"golden_diff": "diff --git a/pydantic/dataclasses.py b/pydantic/dataclasses.py\n--- a/pydantic/dataclasses.py\n+++ b/pydantic/dataclasses.py\n@@ -5,7 +5,7 @@\n from .main import create_model, validate_model\n \n \n-def post_init(self):\n+def _pydantic_post_init(self):\n d = validate_model(self.__pydantic_model__, self.__dict__)\n object.__setattr__(self, '__dict__', d)\n object.__setattr__(self, '__initialised__', True)\n@@ -26,7 +26,9 @@\n \n def _process_class(_cls, init, repr, eq, order, unsafe_hash, frozen, validate_assignment):\n post_init_original = getattr(_cls, '__post_init__', None)\n- _cls.__post_init__ = post_init\n+ if post_init_original and post_init_original.__name__ == '_pydantic_post_init':\n+ post_init_original = None\n+ _cls.__post_init__ = _pydantic_post_init\n cls = dataclasses._process_class(_cls, init, repr, eq, order, unsafe_hash, frozen)\n \n fields = {name: (field.type, field.default) for name, field in cls.__dataclass_fields__.items()}\n", "issue": "\u0421reating a child model causes a RecursionError exception\n<!-- Questions, Feature Requests, and Bug Reports are all welcome -->\r\n<!-- delete as applicable: -->\r\n# Bug\r\n\r\n* OS: **Ubuntu 14.04**\r\n* Python version `import sys; print(sys.version)`: **3.6.7**\r\n* Pydantic version `import pydantic; print(pydantic.VERSION)`: **0.14**\r\n\r\nI'm expecting, that I can use a classic inheritance for dataclass models:\r\n```py\r\nimport pydantic.dataclasses\r\n\r\[email protected]\r\nclass A:\r\n a: str = None\r\n\r\[email protected]\r\nclass B(A):\r\n b: str = None\r\n\r\nB(a='a', b='b')\r\n```\r\n\r\nBut as a result I'm receiving this:\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2881, in run_code\r\n exec(code_obj, self.user_global_ns, self.user_ns)\r\n File \"<ipython-input-9-10a7116ca691>\", line 12, in <module>\r\n B(a='a', b='b')\r\n File \"<string>\", line 4, in __init__\r\n File \"/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/dataclasses.py\", line 13, in post_init\r\n self.__post_init_original__()\r\n File \"/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/dataclasses.py\", line 13, in post_init\r\n self.__post_init_original__()\r\n File \"/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/dataclasses.py\", line 13, in post_init\r\n self.__post_init_original__()\r\n [Previous line repeated 952 more times]\r\n File \"/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/dataclasses.py\", line 9, in post_init\r\n d = validate_model(self.__pydantic_model__, self.__dict__)\r\n File \"/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/main.py\", line 484, in validate_model\r\n v_, errors_ = field.validate(value, values, loc=field.alias, cls=model.__class__)\r\n File \"/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/fields.py\", line 303, in validate\r\n v, errors = self._validate_singleton(v, values, loc, cls)\r\n File \"/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/fields.py\", line 406, in _validate_singleton\r\n return self._apply_validators(v, values, loc, cls, self.validators)\r\n File \"/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/fields.py\", line 412, in _apply_validators\r\n v = validator(v)\r\n File \"/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/validators.py\", line 23, in str_validator\r\n if isinstance(v, (str, NoneType)):\r\nRecursionError: maximum recursion depth exceeded in __instancecheck__\r\n```\r\nThis line below causes this problem:\r\nhttps://github.com/samuelcolvin/pydantic/blob/master/pydantic/dataclasses.py#L13\r\n\r\nUPD: defining `__post_init__` in child model fixes the problem, but this is workaround.\r\n```py\r\nimport pydantic.dataclasses\r\n\r\[email protected]\r\nclass A:\r\n a: str = None\r\n\r\[email protected]\r\nclass B(A):\r\n b: str = None\r\n\r\n def __post_init__():\r\n pass\r\n\r\nB(a='a', b='b')\r\n```\n", "before_files": [{"content": "import dataclasses\n\nfrom pydantic import ValidationError\n\nfrom .main import create_model, validate_model\n\n\ndef post_init(self):\n d = validate_model(self.__pydantic_model__, self.__dict__)\n object.__setattr__(self, '__dict__', d)\n object.__setattr__(self, '__initialised__', True)\n if self.__post_init_original__:\n self.__post_init_original__()\n\n\ndef setattr_validate_assignment(self, name, value):\n if self.__initialised__:\n d = dict(self.__dict__)\n d.pop(name)\n value, error_ = self.__pydantic_model__.__fields__[name].validate(value, d, loc=name)\n if error_:\n raise ValidationError([error_])\n\n object.__setattr__(self, name, value)\n\n\ndef _process_class(_cls, init, repr, eq, order, unsafe_hash, frozen, validate_assignment):\n post_init_original = getattr(_cls, '__post_init__', None)\n _cls.__post_init__ = post_init\n cls = dataclasses._process_class(_cls, init, repr, eq, order, unsafe_hash, frozen)\n\n fields = {name: (field.type, field.default) for name, field in cls.__dataclass_fields__.items()}\n cls.__post_init_original__ = post_init_original\n cls.__pydantic_model__ = create_model(cls.__name__, **fields)\n cls.__initialised__ = False\n\n if validate_assignment and not frozen:\n cls.__setattr__ = setattr_validate_assignment\n return cls\n\n\ndef dataclass(\n _cls=None, *, init=True, repr=True, eq=True, order=False, unsafe_hash=False, frozen=False, validate_assignment=False\n):\n \"\"\"\n Like the python standard lib dataclasses but with type validation.\n\n Arguments are the same as for standard dataclasses, except for validate_assignment which has the same meaning\n as Config.validate_assignment.\n \"\"\"\n\n def wrap(cls):\n return _process_class(cls, init, repr, eq, order, unsafe_hash, frozen, validate_assignment)\n\n if _cls is None:\n return wrap\n\n return wrap(_cls)\n", "path": "pydantic/dataclasses.py"}], "after_files": [{"content": "import dataclasses\n\nfrom pydantic import ValidationError\n\nfrom .main import create_model, validate_model\n\n\ndef _pydantic_post_init(self):\n d = validate_model(self.__pydantic_model__, self.__dict__)\n object.__setattr__(self, '__dict__', d)\n object.__setattr__(self, '__initialised__', True)\n if self.__post_init_original__:\n self.__post_init_original__()\n\n\ndef setattr_validate_assignment(self, name, value):\n if self.__initialised__:\n d = dict(self.__dict__)\n d.pop(name)\n value, error_ = self.__pydantic_model__.__fields__[name].validate(value, d, loc=name)\n if error_:\n raise ValidationError([error_])\n\n object.__setattr__(self, name, value)\n\n\ndef _process_class(_cls, init, repr, eq, order, unsafe_hash, frozen, validate_assignment):\n post_init_original = getattr(_cls, '__post_init__', None)\n if post_init_original and post_init_original.__name__ == '_pydantic_post_init':\n post_init_original = None\n _cls.__post_init__ = _pydantic_post_init\n cls = dataclasses._process_class(_cls, init, repr, eq, order, unsafe_hash, frozen)\n\n fields = {name: (field.type, field.default) for name, field in cls.__dataclass_fields__.items()}\n cls.__post_init_original__ = post_init_original\n cls.__pydantic_model__ = create_model(cls.__name__, **fields)\n cls.__initialised__ = False\n\n if validate_assignment and not frozen:\n cls.__setattr__ = setattr_validate_assignment\n return cls\n\n\ndef dataclass(\n _cls=None, *, init=True, repr=True, eq=True, order=False, unsafe_hash=False, frozen=False, validate_assignment=False\n):\n \"\"\"\n Like the python standard lib dataclasses but with type validation.\n\n Arguments are the same as for standard dataclasses, except for validate_assignment which has the same meaning\n as Config.validate_assignment.\n \"\"\"\n\n def wrap(cls):\n return _process_class(cls, init, repr, eq, order, unsafe_hash, frozen, validate_assignment)\n\n if _cls is None:\n return wrap\n\n return wrap(_cls)\n", "path": "pydantic/dataclasses.py"}]} | 1,739 | 272 |
gh_patches_debug_18778 | rasdani/github-patches | git_diff | vacanza__python-holidays-1782 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The calculation of the lunar start date throws a TypeError exception for Thailand and Cambodia
version: 0.47
stack trace:
"python39\lib\site-packages\holidays\calendars\thai.py", line 233, in _get_start_date
return _ThaiLunisolar.START_DATE + td(days=delta_days)
TypeError: unsupported type for timedelta days component: numpy.int32
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `holidays/helpers.py`
Content:
```
1 # holidays
2 # --------
3 # A fast, efficient Python library for generating country, province and state
4 # specific sets of holidays on the fly. It aims to make determining whether a
5 # specific date is a holiday as fast and flexible as possible.
6 #
7 # Authors: Vacanza Team and individual contributors (see AUTHORS file)
8 # dr-prodigy <[email protected]> (c) 2017-2023
9 # ryanss <[email protected]> (c) 2014-2017
10 # Website: https://github.com/vacanza/python-holidays
11 # License: MIT (see LICENSE file)
12
13
14 def _normalize_arguments(cls, value):
15 """Normalize arguments.
16
17 :param cls:
18 A type of arguments to normalize.
19
20 :param value:
21 Either a single item or an iterable of `cls` type.
22
23 :return:
24 A set created from `value` argument.
25
26 """
27 if isinstance(value, cls):
28 return {value}
29
30 return set(value) if value is not None else set()
31
32
33 def _normalize_tuple(data):
34 """Normalize tuple.
35
36 :param data:
37 Either a tuple or a tuple of tuples.
38
39 :return:
40 An unchanged object for tuple of tuples, e.g., ((JAN, 10), (DEC, 31)).
41 An object put into a tuple otherwise, e.g., ((JAN, 10),).
42 """
43 return data if not data or isinstance(data[0], tuple) else (data,)
44
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/holidays/helpers.py b/holidays/helpers.py
--- a/holidays/helpers.py
+++ b/holidays/helpers.py
@@ -24,13 +24,19 @@
A set created from `value` argument.
"""
+ if value is None:
+ return set()
+
if isinstance(value, cls):
return {value}
- return set(value) if value is not None else set()
+ try:
+ return {v if isinstance(v, cls) else cls(v) for v in value}
+ except TypeError: # non-iterable
+ return {value if isinstance(value, cls) else cls(value)}
-def _normalize_tuple(data):
+def _normalize_tuple(value):
"""Normalize tuple.
:param data:
@@ -40,4 +46,4 @@
An unchanged object for tuple of tuples, e.g., ((JAN, 10), (DEC, 31)).
An object put into a tuple otherwise, e.g., ((JAN, 10),).
"""
- return data if not data or isinstance(data[0], tuple) else (data,)
+ return value if not value or isinstance(value[0], tuple) else (value,)
| {"golden_diff": "diff --git a/holidays/helpers.py b/holidays/helpers.py\n--- a/holidays/helpers.py\n+++ b/holidays/helpers.py\n@@ -24,13 +24,19 @@\n A set created from `value` argument.\n \n \"\"\"\n+ if value is None:\n+ return set()\n+\n if isinstance(value, cls):\n return {value}\n \n- return set(value) if value is not None else set()\n+ try:\n+ return {v if isinstance(v, cls) else cls(v) for v in value}\n+ except TypeError: # non-iterable\n+ return {value if isinstance(value, cls) else cls(value)}\n \n \n-def _normalize_tuple(data):\n+def _normalize_tuple(value):\n \"\"\"Normalize tuple.\n \n :param data:\n@@ -40,4 +46,4 @@\n An unchanged object for tuple of tuples, e.g., ((JAN, 10), (DEC, 31)).\n An object put into a tuple otherwise, e.g., ((JAN, 10),).\n \"\"\"\n- return data if not data or isinstance(data[0], tuple) else (data,)\n+ return value if not value or isinstance(value[0], tuple) else (value,)\n", "issue": "The calculation of the lunar start date throws a TypeError exception for Thailand and Cambodia\nversion: 0.47\r\nstack trace:\r\n\"python39\\lib\\site-packages\\holidays\\calendars\\thai.py\", line 233, in _get_start_date\r\n return _ThaiLunisolar.START_DATE + td(days=delta_days)\r\nTypeError: unsupported type for timedelta days component: numpy.int32\n", "before_files": [{"content": "# holidays\n# --------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Authors: Vacanza Team and individual contributors (see AUTHORS file)\n# dr-prodigy <[email protected]> (c) 2017-2023\n# ryanss <[email protected]> (c) 2014-2017\n# Website: https://github.com/vacanza/python-holidays\n# License: MIT (see LICENSE file)\n\n\ndef _normalize_arguments(cls, value):\n \"\"\"Normalize arguments.\n\n :param cls:\n A type of arguments to normalize.\n\n :param value:\n Either a single item or an iterable of `cls` type.\n\n :return:\n A set created from `value` argument.\n\n \"\"\"\n if isinstance(value, cls):\n return {value}\n\n return set(value) if value is not None else set()\n\n\ndef _normalize_tuple(data):\n \"\"\"Normalize tuple.\n\n :param data:\n Either a tuple or a tuple of tuples.\n\n :return:\n An unchanged object for tuple of tuples, e.g., ((JAN, 10), (DEC, 31)).\n An object put into a tuple otherwise, e.g., ((JAN, 10),).\n \"\"\"\n return data if not data or isinstance(data[0], tuple) else (data,)\n", "path": "holidays/helpers.py"}], "after_files": [{"content": "# holidays\n# --------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Authors: Vacanza Team and individual contributors (see AUTHORS file)\n# dr-prodigy <[email protected]> (c) 2017-2023\n# ryanss <[email protected]> (c) 2014-2017\n# Website: https://github.com/vacanza/python-holidays\n# License: MIT (see LICENSE file)\n\n\ndef _normalize_arguments(cls, value):\n \"\"\"Normalize arguments.\n\n :param cls:\n A type of arguments to normalize.\n\n :param value:\n Either a single item or an iterable of `cls` type.\n\n :return:\n A set created from `value` argument.\n\n \"\"\"\n if value is None:\n return set()\n\n if isinstance(value, cls):\n return {value}\n\n try:\n return {v if isinstance(v, cls) else cls(v) for v in value}\n except TypeError: # non-iterable\n return {value if isinstance(value, cls) else cls(value)}\n\n\ndef _normalize_tuple(value):\n \"\"\"Normalize tuple.\n\n :param data:\n Either a tuple or a tuple of tuples.\n\n :return:\n An unchanged object for tuple of tuples, e.g., ((JAN, 10), (DEC, 31)).\n An object put into a tuple otherwise, e.g., ((JAN, 10),).\n \"\"\"\n return value if not value or isinstance(value[0], tuple) else (value,)\n", "path": "holidays/helpers.py"}]} | 776 | 271 |
gh_patches_debug_13296 | rasdani/github-patches | git_diff | qtile__qtile-1687 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ampersands need to be escaped in WindowName widget
# Issue description
Ampersands in window names need to be changed in WindowName widget to "&" before being passed to Pango/Cairo
# Qtile version
0.15.1
# Stack traces
Exception: parse_markup() failed for b'Seth Lakeman - King & Country'
2020-04-27 19:12:00,744 ERROR libqtile hook.py:fire():L373 Error in hook focus_change
Traceback (most recent call last):
File "/usr/lib/python3.8/site-packages/libqtile/hook.py", line 371, in fire
i(*args, **kwargs)
File "/usr/lib/python3.8/site-packages/libqtile/widget/windowname.py", line 67, in update
self.text = "%s%s" % (state, w.name if w and w.name else " ")
File "/usr/lib/python3.8/site-packages/libqtile/widget/base.py", line 323, in text
self.layout.text = self.formatted_text
File "/usr/lib/python3.8/site-packages/libqtile/drawer.py", line 70, in text
attrlist, value, accel_char = pangocffi.parse_markup(value)
File "/usr/lib/python3.8/site-packages/libqtile/pangocffi.py", line 173, in parse_markup
raise Exception("parse_markup() failed for %s" % value)
Exception: parse_markup() failed for b'Seth Lakeman - King & Country'
# Configuration
N/A
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libqtile/widget/windowname.py`
Content:
```
1 # Copyright (c) 2008, 2010 Aldo Cortesi
2 # Copyright (c) 2010 matt
3 # Copyright (c) 2011 Mounier Florian
4 # Copyright (c) 2012 Tim Neumann
5 # Copyright (c) 2013 Craig Barnes
6 # Copyright (c) 2014 Sean Vig
7 # Copyright (c) 2014 Tycho Andersen
8 #
9 # Permission is hereby granted, free of charge, to any person obtaining a copy
10 # of this software and associated documentation files (the "Software"), to deal
11 # in the Software without restriction, including without limitation the rights
12 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
13 # copies of the Software, and to permit persons to whom the Software is
14 # furnished to do so, subject to the following conditions:
15 #
16 # The above copyright notice and this permission notice shall be included in
17 # all copies or substantial portions of the Software.
18 #
19 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
22 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
24 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 # SOFTWARE.
26
27 from libqtile import bar, hook
28 from libqtile.widget import base
29
30
31 class WindowName(base._TextBox):
32 """Displays the name of the window that currently has focus"""
33 orientations = base.ORIENTATION_HORIZONTAL
34 defaults = [
35 ('show_state', True, 'show window status before window name'),
36 ('for_current_screen', False, 'instead of this bars screen use currently active screen')
37 ]
38
39 def __init__(self, width=bar.STRETCH, **config):
40 base._TextBox.__init__(self, width=width, **config)
41 self.add_defaults(WindowName.defaults)
42
43 def _configure(self, qtile, bar):
44 base._TextBox._configure(self, qtile, bar)
45 hook.subscribe.client_name_updated(self.update)
46 hook.subscribe.focus_change(self.update)
47 hook.subscribe.float_change(self.update)
48
49 @hook.subscribe.current_screen_change
50 def on_screen_changed():
51 if self.for_current_screen:
52 self.update()
53
54 def update(self, *args):
55 if self.for_current_screen:
56 w = self.qtile.current_screen.group.current_window
57 else:
58 w = self.bar.screen.group.current_window
59 state = ''
60 if self.show_state and w is not None:
61 if w.maximized:
62 state = '[] '
63 elif w.minimized:
64 state = '_ '
65 elif w.floating:
66 state = 'V '
67 self.text = "%s%s" % (state, w.name if w and w.name else " ")
68 self.bar.draw()
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/libqtile/widget/windowname.py b/libqtile/widget/windowname.py
--- a/libqtile/widget/windowname.py
+++ b/libqtile/widget/windowname.py
@@ -24,7 +24,7 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
-from libqtile import bar, hook
+from libqtile import bar, hook, pangocffi
from libqtile.widget import base
@@ -64,5 +64,6 @@
state = '_ '
elif w.floating:
state = 'V '
- self.text = "%s%s" % (state, w.name if w and w.name else " ")
+ unescaped = "%s%s" % (state, w.name if w and w.name else " ")
+ self.text = pangocffi.markup_escape_text(unescaped)
self.bar.draw()
| {"golden_diff": "diff --git a/libqtile/widget/windowname.py b/libqtile/widget/windowname.py\n--- a/libqtile/widget/windowname.py\n+++ b/libqtile/widget/windowname.py\n@@ -24,7 +24,7 @@\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE.\n \n-from libqtile import bar, hook\n+from libqtile import bar, hook, pangocffi\n from libqtile.widget import base\n \n \n@@ -64,5 +64,6 @@\n state = '_ '\n elif w.floating:\n state = 'V '\n- self.text = \"%s%s\" % (state, w.name if w and w.name else \" \")\n+ unescaped = \"%s%s\" % (state, w.name if w and w.name else \" \")\n+ self.text = pangocffi.markup_escape_text(unescaped)\n self.bar.draw()\n", "issue": "Ampersands need to be escaped in WindowName widget\n# Issue description\r\nAmpersands in window names need to be changed in WindowName widget to \"&\" before being passed to Pango/Cairo\r\n\r\n# Qtile version\r\n0.15.1\r\n\r\n# Stack traces\r\nException: parse_markup() failed for b'Seth Lakeman - King & Country'\r\n2020-04-27 19:12:00,744 ERROR libqtile hook.py:fire():L373 Error in hook focus_change\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.8/site-packages/libqtile/hook.py\", line 371, in fire\r\n i(*args, **kwargs)\r\n File \"/usr/lib/python3.8/site-packages/libqtile/widget/windowname.py\", line 67, in update\r\n self.text = \"%s%s\" % (state, w.name if w and w.name else \" \")\r\n File \"/usr/lib/python3.8/site-packages/libqtile/widget/base.py\", line 323, in text\r\n self.layout.text = self.formatted_text\r\n File \"/usr/lib/python3.8/site-packages/libqtile/drawer.py\", line 70, in text\r\n attrlist, value, accel_char = pangocffi.parse_markup(value)\r\n File \"/usr/lib/python3.8/site-packages/libqtile/pangocffi.py\", line 173, in parse_markup\r\n raise Exception(\"parse_markup() failed for %s\" % value)\r\nException: parse_markup() failed for b'Seth Lakeman - King & Country'\r\n\r\n# Configuration\r\nN/A\n", "before_files": [{"content": "# Copyright (c) 2008, 2010 Aldo Cortesi\n# Copyright (c) 2010 matt\n# Copyright (c) 2011 Mounier Florian\n# Copyright (c) 2012 Tim Neumann\n# Copyright (c) 2013 Craig Barnes\n# Copyright (c) 2014 Sean Vig\n# Copyright (c) 2014 Tycho Andersen\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom libqtile import bar, hook\nfrom libqtile.widget import base\n\n\nclass WindowName(base._TextBox):\n \"\"\"Displays the name of the window that currently has focus\"\"\"\n orientations = base.ORIENTATION_HORIZONTAL\n defaults = [\n ('show_state', True, 'show window status before window name'),\n ('for_current_screen', False, 'instead of this bars screen use currently active screen')\n ]\n\n def __init__(self, width=bar.STRETCH, **config):\n base._TextBox.__init__(self, width=width, **config)\n self.add_defaults(WindowName.defaults)\n\n def _configure(self, qtile, bar):\n base._TextBox._configure(self, qtile, bar)\n hook.subscribe.client_name_updated(self.update)\n hook.subscribe.focus_change(self.update)\n hook.subscribe.float_change(self.update)\n\n @hook.subscribe.current_screen_change\n def on_screen_changed():\n if self.for_current_screen:\n self.update()\n\n def update(self, *args):\n if self.for_current_screen:\n w = self.qtile.current_screen.group.current_window\n else:\n w = self.bar.screen.group.current_window\n state = ''\n if self.show_state and w is not None:\n if w.maximized:\n state = '[] '\n elif w.minimized:\n state = '_ '\n elif w.floating:\n state = 'V '\n self.text = \"%s%s\" % (state, w.name if w and w.name else \" \")\n self.bar.draw()\n", "path": "libqtile/widget/windowname.py"}], "after_files": [{"content": "# Copyright (c) 2008, 2010 Aldo Cortesi\n# Copyright (c) 2010 matt\n# Copyright (c) 2011 Mounier Florian\n# Copyright (c) 2012 Tim Neumann\n# Copyright (c) 2013 Craig Barnes\n# Copyright (c) 2014 Sean Vig\n# Copyright (c) 2014 Tycho Andersen\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom libqtile import bar, hook, pangocffi\nfrom libqtile.widget import base\n\n\nclass WindowName(base._TextBox):\n \"\"\"Displays the name of the window that currently has focus\"\"\"\n orientations = base.ORIENTATION_HORIZONTAL\n defaults = [\n ('show_state', True, 'show window status before window name'),\n ('for_current_screen', False, 'instead of this bars screen use currently active screen')\n ]\n\n def __init__(self, width=bar.STRETCH, **config):\n base._TextBox.__init__(self, width=width, **config)\n self.add_defaults(WindowName.defaults)\n\n def _configure(self, qtile, bar):\n base._TextBox._configure(self, qtile, bar)\n hook.subscribe.client_name_updated(self.update)\n hook.subscribe.focus_change(self.update)\n hook.subscribe.float_change(self.update)\n\n @hook.subscribe.current_screen_change\n def on_screen_changed():\n if self.for_current_screen:\n self.update()\n\n def update(self, *args):\n if self.for_current_screen:\n w = self.qtile.current_screen.group.current_window\n else:\n w = self.bar.screen.group.current_window\n state = ''\n if self.show_state and w is not None:\n if w.maximized:\n state = '[] '\n elif w.minimized:\n state = '_ '\n elif w.floating:\n state = 'V '\n unescaped = \"%s%s\" % (state, w.name if w and w.name else \" \")\n self.text = pangocffi.markup_escape_text(unescaped)\n self.bar.draw()\n", "path": "libqtile/widget/windowname.py"}]} | 1,395 | 201 |
gh_patches_debug_10207 | rasdani/github-patches | git_diff | certbot__certbot-2248 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
VersionConflict: ConfigArgParse 0.9.3
Hello,
I tried renewing our certificates today, and I got an error from letsencrypt-auto. The error message was:
VersionConflict: (ConfigArgParse 0.9.3 (/home/bogdanb/.local/share/letsencrypt/lib/python2.7/site-packages), Requirement.parse('ConfigArgParse>=0.10.0'))
Please see the two attached files for the logs:
[execution.log.txt](https://github.com/letsencrypt/letsencrypt/files/97381/execution.log.txt)
[letencrypt.log.txt](https://github.com/letsencrypt/letsencrypt/files/97383/letencrypt.log.txt)
This is after I had deleted the ~/.local folder where letsencrypt stores its downloads. (I had the same exception before.) This is on an Ubuntu 15.04 machine:
$ uname -a
Linux qotilabs-dime 2.6.32-042stab112.15 #1 SMP Tue Oct 20 17:22:56 MSK 2015 x86_64 x86_64 x86_64 GNU/Linux
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import codecs
2 import os
3 import re
4 import sys
5
6 from setuptools import setup
7 from setuptools import find_packages
8
9 # Workaround for http://bugs.python.org/issue8876, see
10 # http://bugs.python.org/issue8876#msg208792
11 # This can be removed when using Python 2.7.9 or later:
12 # https://hg.python.org/cpython/raw-file/v2.7.9/Misc/NEWS
13 if os.path.abspath(__file__).split(os.path.sep)[1] == 'vagrant':
14 del os.link
15
16
17 def read_file(filename, encoding='utf8'):
18 """Read unicode from given file."""
19 with codecs.open(filename, encoding=encoding) as fd:
20 return fd.read()
21
22
23 here = os.path.abspath(os.path.dirname(__file__))
24
25 # read version number (and other metadata) from package init
26 init_fn = os.path.join(here, 'letsencrypt', '__init__.py')
27 meta = dict(re.findall(r"""__([a-z]+)__ = '([^']+)""", read_file(init_fn)))
28
29 readme = read_file(os.path.join(here, 'README.rst'))
30 changes = read_file(os.path.join(here, 'CHANGES.rst'))
31 version = meta['version']
32
33 # Please update tox.ini when modifying dependency version requirements
34 install_requires = [
35 'acme=={0}'.format(version),
36 'ConfigArgParse>=0.10.0', # python2.6 support, upstream #17
37 'configobj',
38 'cryptography>=0.7', # load_pem_x509_certificate
39 'parsedatetime',
40 'psutil>=2.1.0', # net_connections introduced in 2.1.0
41 'PyOpenSSL',
42 'pyrfc3339',
43 'python2-pythondialog>=3.2.2rc1', # Debian squeeze support, cf. #280
44 'pytz',
45 'setuptools', # pkg_resources
46 'six',
47 'zope.component',
48 'zope.interface',
49 ]
50
51 # env markers in extras_require cause problems with older pip: #517
52 # Keep in sync with conditional_requirements.py.
53 if sys.version_info < (2, 7):
54 install_requires.extend([
55 # only some distros recognize stdlib argparse as already satisfying
56 'argparse',
57 'mock<1.1.0',
58 ])
59 else:
60 install_requires.append('mock')
61
62 dev_extras = [
63 # Pin astroid==1.3.5, pylint==1.4.2 as a workaround for #289
64 'astroid==1.3.5',
65 'pylint==1.4.2', # upstream #248
66 'twine',
67 'wheel',
68 ]
69
70 docs_extras = [
71 'repoze.sphinx.autointerface',
72 'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags
73 'sphinx_rtd_theme',
74 'sphinxcontrib-programoutput',
75 ]
76
77 testing_extras = [
78 'coverage',
79 'nose',
80 'nosexcover',
81 'pep8',
82 'tox',
83 ]
84
85 setup(
86 name='letsencrypt',
87 version=version,
88 description="Let's Encrypt client",
89 long_description=readme, # later: + '\n\n' + changes
90 url='https://github.com/letsencrypt/letsencrypt',
91 author="Let's Encrypt Project",
92 author_email='[email protected]',
93 license='Apache License 2.0',
94 classifiers=[
95 'Development Status :: 3 - Alpha',
96 'Environment :: Console',
97 'Environment :: Console :: Curses',
98 'Intended Audience :: System Administrators',
99 'License :: OSI Approved :: Apache Software License',
100 'Operating System :: POSIX :: Linux',
101 'Programming Language :: Python',
102 'Programming Language :: Python :: 2',
103 'Programming Language :: Python :: 2.6',
104 'Programming Language :: Python :: 2.7',
105 'Topic :: Internet :: WWW/HTTP',
106 'Topic :: Security',
107 'Topic :: System :: Installation/Setup',
108 'Topic :: System :: Networking',
109 'Topic :: System :: Systems Administration',
110 'Topic :: Utilities',
111 ],
112
113 packages=find_packages(exclude=['docs', 'examples', 'tests', 'venv']),
114 include_package_data=True,
115
116 install_requires=install_requires,
117 extras_require={
118 'dev': dev_extras,
119 'docs': docs_extras,
120 'testing': testing_extras,
121 },
122
123 # to test all packages run "python setup.py test -s
124 # {acme,letsencrypt_apache,letsencrypt_nginx}"
125 test_suite='letsencrypt',
126
127 entry_points={
128 'console_scripts': [
129 'letsencrypt = letsencrypt.cli:main',
130 'letsencrypt-renewer = letsencrypt.renewer:main',
131 ],
132 'letsencrypt.plugins': [
133 'manual = letsencrypt.plugins.manual:Authenticator',
134 'null = letsencrypt.plugins.null:Installer',
135 'standalone = letsencrypt.plugins.standalone:Authenticator',
136 'webroot = letsencrypt.plugins.webroot:Authenticator',
137 ],
138 },
139 )
140
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -33,7 +33,10 @@
# Please update tox.ini when modifying dependency version requirements
install_requires = [
'acme=={0}'.format(version),
- 'ConfigArgParse>=0.10.0', # python2.6 support, upstream #17
+ # We technically need ConfigArgParse 0.10.0 for Python 2.6 support, but
+ # saying so here causes a runtime error against our temporary fork of 0.9.3
+ # in which we added 2.6 support (see #2243), so we relax the requirement.
+ 'ConfigArgParse>=0.9.3',
'configobj',
'cryptography>=0.7', # load_pem_x509_certificate
'parsedatetime',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -33,7 +33,10 @@\n # Please update tox.ini when modifying dependency version requirements\n install_requires = [\n 'acme=={0}'.format(version),\n- 'ConfigArgParse>=0.10.0', # python2.6 support, upstream #17\n+ # We technically need ConfigArgParse 0.10.0 for Python 2.6 support, but\n+ # saying so here causes a runtime error against our temporary fork of 0.9.3\n+ # in which we added 2.6 support (see #2243), so we relax the requirement.\n+ 'ConfigArgParse>=0.9.3',\n 'configobj',\n 'cryptography>=0.7', # load_pem_x509_certificate\n 'parsedatetime',\n", "issue": "VersionConflict: ConfigArgParse 0.9.3\nHello,\n\nI tried renewing our certificates today, and I got an error from letsencrypt-auto. The error message was:\n\nVersionConflict: (ConfigArgParse 0.9.3 (/home/bogdanb/.local/share/letsencrypt/lib/python2.7/site-packages), Requirement.parse('ConfigArgParse>=0.10.0'))\n\nPlease see the two attached files for the logs:\n[execution.log.txt](https://github.com/letsencrypt/letsencrypt/files/97381/execution.log.txt)\n[letencrypt.log.txt](https://github.com/letsencrypt/letsencrypt/files/97383/letencrypt.log.txt)\n\nThis is after I had deleted the ~/.local folder where letsencrypt stores its downloads. (I had the same exception before.) This is on an Ubuntu 15.04 machine:\n\n$ uname -a\nLinux qotilabs-dime 2.6.32-042stab112.15 #1 SMP Tue Oct 20 17:22:56 MSK 2015 x86_64 x86_64 x86_64 GNU/Linux\n\n", "before_files": [{"content": "import codecs\nimport os\nimport re\nimport sys\n\nfrom setuptools import setup\nfrom setuptools import find_packages\n\n# Workaround for http://bugs.python.org/issue8876, see\n# http://bugs.python.org/issue8876#msg208792\n# This can be removed when using Python 2.7.9 or later:\n# https://hg.python.org/cpython/raw-file/v2.7.9/Misc/NEWS\nif os.path.abspath(__file__).split(os.path.sep)[1] == 'vagrant':\n del os.link\n\n\ndef read_file(filename, encoding='utf8'):\n \"\"\"Read unicode from given file.\"\"\"\n with codecs.open(filename, encoding=encoding) as fd:\n return fd.read()\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n# read version number (and other metadata) from package init\ninit_fn = os.path.join(here, 'letsencrypt', '__init__.py')\nmeta = dict(re.findall(r\"\"\"__([a-z]+)__ = '([^']+)\"\"\", read_file(init_fn)))\n\nreadme = read_file(os.path.join(here, 'README.rst'))\nchanges = read_file(os.path.join(here, 'CHANGES.rst'))\nversion = meta['version']\n\n# Please update tox.ini when modifying dependency version requirements\ninstall_requires = [\n 'acme=={0}'.format(version),\n 'ConfigArgParse>=0.10.0', # python2.6 support, upstream #17\n 'configobj',\n 'cryptography>=0.7', # load_pem_x509_certificate\n 'parsedatetime',\n 'psutil>=2.1.0', # net_connections introduced in 2.1.0\n 'PyOpenSSL',\n 'pyrfc3339',\n 'python2-pythondialog>=3.2.2rc1', # Debian squeeze support, cf. #280\n 'pytz',\n 'setuptools', # pkg_resources\n 'six',\n 'zope.component',\n 'zope.interface',\n]\n\n# env markers in extras_require cause problems with older pip: #517\n# Keep in sync with conditional_requirements.py.\nif sys.version_info < (2, 7):\n install_requires.extend([\n # only some distros recognize stdlib argparse as already satisfying\n 'argparse',\n 'mock<1.1.0',\n ])\nelse:\n install_requires.append('mock')\n\ndev_extras = [\n # Pin astroid==1.3.5, pylint==1.4.2 as a workaround for #289\n 'astroid==1.3.5',\n 'pylint==1.4.2', # upstream #248\n 'twine',\n 'wheel',\n]\n\ndocs_extras = [\n 'repoze.sphinx.autointerface',\n 'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags\n 'sphinx_rtd_theme',\n 'sphinxcontrib-programoutput',\n]\n\ntesting_extras = [\n 'coverage',\n 'nose',\n 'nosexcover',\n 'pep8',\n 'tox',\n]\n\nsetup(\n name='letsencrypt',\n version=version,\n description=\"Let's Encrypt client\",\n long_description=readme, # later: + '\\n\\n' + changes\n url='https://github.com/letsencrypt/letsencrypt',\n author=\"Let's Encrypt Project\",\n author_email='[email protected]',\n license='Apache License 2.0',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'Environment :: Console :: Curses',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Security',\n 'Topic :: System :: Installation/Setup',\n 'Topic :: System :: Networking',\n 'Topic :: System :: Systems Administration',\n 'Topic :: Utilities',\n ],\n\n packages=find_packages(exclude=['docs', 'examples', 'tests', 'venv']),\n include_package_data=True,\n\n install_requires=install_requires,\n extras_require={\n 'dev': dev_extras,\n 'docs': docs_extras,\n 'testing': testing_extras,\n },\n\n # to test all packages run \"python setup.py test -s\n # {acme,letsencrypt_apache,letsencrypt_nginx}\"\n test_suite='letsencrypt',\n\n entry_points={\n 'console_scripts': [\n 'letsencrypt = letsencrypt.cli:main',\n 'letsencrypt-renewer = letsencrypt.renewer:main',\n ],\n 'letsencrypt.plugins': [\n 'manual = letsencrypt.plugins.manual:Authenticator',\n 'null = letsencrypt.plugins.null:Installer',\n 'standalone = letsencrypt.plugins.standalone:Authenticator',\n 'webroot = letsencrypt.plugins.webroot:Authenticator',\n ],\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "import codecs\nimport os\nimport re\nimport sys\n\nfrom setuptools import setup\nfrom setuptools import find_packages\n\n# Workaround for http://bugs.python.org/issue8876, see\n# http://bugs.python.org/issue8876#msg208792\n# This can be removed when using Python 2.7.9 or later:\n# https://hg.python.org/cpython/raw-file/v2.7.9/Misc/NEWS\nif os.path.abspath(__file__).split(os.path.sep)[1] == 'vagrant':\n del os.link\n\n\ndef read_file(filename, encoding='utf8'):\n \"\"\"Read unicode from given file.\"\"\"\n with codecs.open(filename, encoding=encoding) as fd:\n return fd.read()\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n# read version number (and other metadata) from package init\ninit_fn = os.path.join(here, 'letsencrypt', '__init__.py')\nmeta = dict(re.findall(r\"\"\"__([a-z]+)__ = '([^']+)\"\"\", read_file(init_fn)))\n\nreadme = read_file(os.path.join(here, 'README.rst'))\nchanges = read_file(os.path.join(here, 'CHANGES.rst'))\nversion = meta['version']\n\n# Please update tox.ini when modifying dependency version requirements\ninstall_requires = [\n 'acme=={0}'.format(version),\n # We technically need ConfigArgParse 0.10.0 for Python 2.6 support, but\n # saying so here causes a runtime error against our temporary fork of 0.9.3\n # in which we added 2.6 support (see #2243), so we relax the requirement.\n 'ConfigArgParse>=0.9.3',\n 'configobj',\n 'cryptography>=0.7', # load_pem_x509_certificate\n 'parsedatetime',\n 'psutil>=2.1.0', # net_connections introduced in 2.1.0\n 'PyOpenSSL',\n 'pyrfc3339',\n 'python2-pythondialog>=3.2.2rc1', # Debian squeeze support, cf. #280\n 'pytz',\n 'setuptools', # pkg_resources\n 'six',\n 'zope.component',\n 'zope.interface',\n]\n\n# env markers in extras_require cause problems with older pip: #517\nif sys.version_info < (2, 7):\n install_requires.extend([\n # only some distros recognize stdlib argparse as already satisfying\n 'argparse',\n 'mock<1.1.0',\n ])\nelse:\n install_requires.extend([\n 'mock',\n ])\n\ndev_extras = [\n # Pin astroid==1.3.5, pylint==1.4.2 as a workaround for #289\n 'astroid==1.3.5',\n 'pylint==1.4.2', # upstream #248\n 'twine',\n 'wheel',\n]\n\ndocs_extras = [\n 'repoze.sphinx.autointerface',\n 'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags\n 'sphinx_rtd_theme',\n 'sphinxcontrib-programoutput',\n]\n\ntesting_extras = [\n 'coverage',\n 'nose',\n 'nosexcover',\n 'pep8',\n 'tox',\n]\n\nsetup(\n name='letsencrypt',\n version=version,\n description=\"Let's Encrypt client\",\n long_description=readme, # later: + '\\n\\n' + changes\n url='https://github.com/letsencrypt/letsencrypt',\n author=\"Let's Encrypt Project\",\n author_email='[email protected]',\n license='Apache License 2.0',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'Environment :: Console :: Curses',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Security',\n 'Topic :: System :: Installation/Setup',\n 'Topic :: System :: Networking',\n 'Topic :: System :: Systems Administration',\n 'Topic :: Utilities',\n ],\n\n packages=find_packages(exclude=['docs', 'examples', 'tests', 'venv']),\n include_package_data=True,\n\n install_requires=install_requires,\n extras_require={\n 'dev': dev_extras,\n 'docs': docs_extras,\n 'testing': testing_extras,\n },\n\n tests_require=install_requires,\n # to test all packages run \"python setup.py test -s\n # {acme,letsencrypt_apache,letsencrypt_nginx}\"\n test_suite='letsencrypt',\n\n entry_points={\n 'console_scripts': [\n 'letsencrypt = letsencrypt.cli:main',\n 'letsencrypt-renewer = letsencrypt.renewer:main',\n ],\n 'letsencrypt.plugins': [\n 'manual = letsencrypt.plugins.manual:Authenticator',\n 'null = letsencrypt.plugins.null:Installer',\n 'standalone = letsencrypt.plugins.standalone:Authenticator',\n 'webroot = letsencrypt.plugins.webroot:Authenticator',\n ],\n },\n)\n", "path": "setup.py"}]} | 1,980 | 203 |
gh_patches_debug_15825 | rasdani/github-patches | git_diff | zulip__zulip-13771 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Creation of temporary files in requirements/ can cause provision to fail
An example to trigger this for me is was as follows:
* `cd requirements/`
* edit file using editor which creates temporary file in this location (eg vim, depending on configuration)
* `tools/provision`
* provision fails with an error like
```
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xcd in position 17: invalid continuation byte
```
This appears to be due to the venv management script not being able to handle the unexpected file produced by eg. vim.
This is not a major issue, but is a bit of a strange issue to debug if you are not expecting it or are new, and potentially could be easy to fix.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/lib/clean_venv_cache.py`
Content:
```
1 #!/usr/bin/env python3
2 import argparse
3 import os
4 import sys
5
6 from typing import Set
7
8 ZULIP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
9 sys.path.append(ZULIP_PATH)
10 from scripts.lib.hash_reqs import expand_reqs, hash_deps
11 from scripts.lib.zulip_tools import \
12 get_environment, get_recent_deployments, parse_cache_script_args, \
13 purge_unused_caches
14
15 ENV = get_environment()
16 VENV_CACHE_DIR = '/srv/zulip-venv-cache'
17 if ENV == "travis":
18 VENV_CACHE_DIR = os.path.join(os.environ["HOME"], "zulip-venv-cache")
19
20 def get_caches_in_use(threshold_days):
21 # type: (int) -> Set[str]
22 setups_to_check = set([ZULIP_PATH, ])
23 caches_in_use = set()
24
25 def add_current_venv_cache(venv_name: str) -> None:
26 CACHE_SYMLINK = os.path.join(os.path.dirname(ZULIP_PATH), venv_name)
27 CURRENT_CACHE = os.path.dirname(os.path.realpath(CACHE_SYMLINK))
28 caches_in_use.add(CURRENT_CACHE)
29
30 if ENV == "prod":
31 setups_to_check |= get_recent_deployments(threshold_days)
32 if ENV == "dev":
33 add_current_venv_cache("zulip-py3-venv")
34 add_current_venv_cache("zulip-thumbor-venv")
35
36 for path in setups_to_check:
37 reqs_dir = os.path.join(path, "requirements")
38 # If the target directory doesn't contain a requirements
39 # directory, skip it to avoid throwing an exception trying to
40 # list its requirements subdirectory.
41 if not os.path.exists(reqs_dir):
42 continue
43 for filename in os.listdir(reqs_dir):
44 requirements_file = os.path.join(reqs_dir, filename)
45 deps = expand_reqs(requirements_file)
46 hash_val = hash_deps(deps)
47 caches_in_use.add(os.path.join(VENV_CACHE_DIR, hash_val))
48
49 return caches_in_use
50
51 def main(args: argparse.Namespace) -> None:
52 caches_in_use = get_caches_in_use(args.threshold_days)
53 purge_unused_caches(
54 VENV_CACHE_DIR, caches_in_use, "venv cache", args)
55
56 if __name__ == "__main__":
57 args = parse_cache_script_args("This script cleans unused zulip venv caches.")
58 main(args)
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scripts/lib/clean_venv_cache.py b/scripts/lib/clean_venv_cache.py
--- a/scripts/lib/clean_venv_cache.py
+++ b/scripts/lib/clean_venv_cache.py
@@ -1,5 +1,6 @@
#!/usr/bin/env python3
import argparse
+import glob
import os
import sys
@@ -40,8 +41,8 @@
# list its requirements subdirectory.
if not os.path.exists(reqs_dir):
continue
- for filename in os.listdir(reqs_dir):
- requirements_file = os.path.join(reqs_dir, filename)
+ requirements_files = glob.glob(os.path.join(reqs_dir, "*.txt"))
+ for requirements_file in requirements_files:
deps = expand_reqs(requirements_file)
hash_val = hash_deps(deps)
caches_in_use.add(os.path.join(VENV_CACHE_DIR, hash_val))
| {"golden_diff": "diff --git a/scripts/lib/clean_venv_cache.py b/scripts/lib/clean_venv_cache.py\n--- a/scripts/lib/clean_venv_cache.py\n+++ b/scripts/lib/clean_venv_cache.py\n@@ -1,5 +1,6 @@\n #!/usr/bin/env python3\n import argparse\n+import glob\n import os\n import sys\n \n@@ -40,8 +41,8 @@\n # list its requirements subdirectory.\n if not os.path.exists(reqs_dir):\n continue\n- for filename in os.listdir(reqs_dir):\n- requirements_file = os.path.join(reqs_dir, filename)\n+ requirements_files = glob.glob(os.path.join(reqs_dir, \"*.txt\"))\n+ for requirements_file in requirements_files:\n deps = expand_reqs(requirements_file)\n hash_val = hash_deps(deps)\n caches_in_use.add(os.path.join(VENV_CACHE_DIR, hash_val))\n", "issue": "Creation of temporary files in requirements/ can cause provision to fail\nAn example to trigger this for me is was as follows:\r\n* `cd requirements/`\r\n* edit file using editor which creates temporary file in this location (eg vim, depending on configuration)\r\n* `tools/provision`\r\n* provision fails with an error like\r\n```\r\nUnicodeDecodeError: 'utf-8' codec can't decode byte 0xcd in position 17: invalid continuation byte\r\n```\r\n\r\nThis appears to be due to the venv management script not being able to handle the unexpected file produced by eg. vim.\r\n\r\nThis is not a major issue, but is a bit of a strange issue to debug if you are not expecting it or are new, and potentially could be easy to fix.\n", "before_files": [{"content": "#!/usr/bin/env python3\nimport argparse\nimport os\nimport sys\n\nfrom typing import Set\n\nZULIP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nsys.path.append(ZULIP_PATH)\nfrom scripts.lib.hash_reqs import expand_reqs, hash_deps\nfrom scripts.lib.zulip_tools import \\\n get_environment, get_recent_deployments, parse_cache_script_args, \\\n purge_unused_caches\n\nENV = get_environment()\nVENV_CACHE_DIR = '/srv/zulip-venv-cache'\nif ENV == \"travis\":\n VENV_CACHE_DIR = os.path.join(os.environ[\"HOME\"], \"zulip-venv-cache\")\n\ndef get_caches_in_use(threshold_days):\n # type: (int) -> Set[str]\n setups_to_check = set([ZULIP_PATH, ])\n caches_in_use = set()\n\n def add_current_venv_cache(venv_name: str) -> None:\n CACHE_SYMLINK = os.path.join(os.path.dirname(ZULIP_PATH), venv_name)\n CURRENT_CACHE = os.path.dirname(os.path.realpath(CACHE_SYMLINK))\n caches_in_use.add(CURRENT_CACHE)\n\n if ENV == \"prod\":\n setups_to_check |= get_recent_deployments(threshold_days)\n if ENV == \"dev\":\n add_current_venv_cache(\"zulip-py3-venv\")\n add_current_venv_cache(\"zulip-thumbor-venv\")\n\n for path in setups_to_check:\n reqs_dir = os.path.join(path, \"requirements\")\n # If the target directory doesn't contain a requirements\n # directory, skip it to avoid throwing an exception trying to\n # list its requirements subdirectory.\n if not os.path.exists(reqs_dir):\n continue\n for filename in os.listdir(reqs_dir):\n requirements_file = os.path.join(reqs_dir, filename)\n deps = expand_reqs(requirements_file)\n hash_val = hash_deps(deps)\n caches_in_use.add(os.path.join(VENV_CACHE_DIR, hash_val))\n\n return caches_in_use\n\ndef main(args: argparse.Namespace) -> None:\n caches_in_use = get_caches_in_use(args.threshold_days)\n purge_unused_caches(\n VENV_CACHE_DIR, caches_in_use, \"venv cache\", args)\n\nif __name__ == \"__main__\":\n args = parse_cache_script_args(\"This script cleans unused zulip venv caches.\")\n main(args)\n", "path": "scripts/lib/clean_venv_cache.py"}], "after_files": [{"content": "#!/usr/bin/env python3\nimport argparse\nimport glob\nimport os\nimport sys\n\nfrom typing import Set\n\nZULIP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nsys.path.append(ZULIP_PATH)\nfrom scripts.lib.hash_reqs import expand_reqs, hash_deps\nfrom scripts.lib.zulip_tools import \\\n get_environment, get_recent_deployments, parse_cache_script_args, \\\n purge_unused_caches\n\nENV = get_environment()\nVENV_CACHE_DIR = '/srv/zulip-venv-cache'\nif ENV == \"travis\":\n VENV_CACHE_DIR = os.path.join(os.environ[\"HOME\"], \"zulip-venv-cache\")\n\ndef get_caches_in_use(threshold_days):\n # type: (int) -> Set[str]\n setups_to_check = set([ZULIP_PATH, ])\n caches_in_use = set()\n\n def add_current_venv_cache(venv_name: str) -> None:\n CACHE_SYMLINK = os.path.join(os.path.dirname(ZULIP_PATH), venv_name)\n CURRENT_CACHE = os.path.dirname(os.path.realpath(CACHE_SYMLINK))\n caches_in_use.add(CURRENT_CACHE)\n\n if ENV == \"prod\":\n setups_to_check |= get_recent_deployments(threshold_days)\n if ENV == \"dev\":\n add_current_venv_cache(\"zulip-py3-venv\")\n add_current_venv_cache(\"zulip-thumbor-venv\")\n\n for path in setups_to_check:\n reqs_dir = os.path.join(path, \"requirements\")\n # If the target directory doesn't contain a requirements\n # directory, skip it to avoid throwing an exception trying to\n # list its requirements subdirectory.\n if not os.path.exists(reqs_dir):\n continue\n requirements_files = glob.glob(os.path.join(reqs_dir, \"*.txt\"))\n for requirements_file in requirements_files:\n deps = expand_reqs(requirements_file)\n hash_val = hash_deps(deps)\n caches_in_use.add(os.path.join(VENV_CACHE_DIR, hash_val))\n\n return caches_in_use\n\ndef main(args: argparse.Namespace) -> None:\n caches_in_use = get_caches_in_use(args.threshold_days)\n purge_unused_caches(\n VENV_CACHE_DIR, caches_in_use, \"venv cache\", args)\n\nif __name__ == \"__main__\":\n args = parse_cache_script_args(\"This script cleans unused zulip venv caches.\")\n main(args)\n", "path": "scripts/lib/clean_venv_cache.py"}]} | 1,060 | 198 |
gh_patches_debug_37464 | rasdani/github-patches | git_diff | hydroshare__hydroshare-5088 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
track users' full email domain
**Describe the feature you'd like and what it will do**
In HS v2.5.4, we don't track full user email domain in our metrics.
**Why is this feature important?**
We need more insight into how HS' ecosystem of tools are being used. This information should drive our continued development on existing tools and our consideration of additions for future use.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hs_tracking/middleware.py`
Content:
```
1 from django.utils.deprecation import MiddlewareMixin
2
3 from .models import Session
4 from . import utils
5 import re
6
7 RESOURCE_RE = re.compile('resource/([0-9a-f]{32})/') # parser for resource id
8 BAG_RE = re.compile('bags/([0-9a-f]{32})\.zip') # parser for resource id # noqa
9 LANDING_RE = re.compile('resource/([0-9a-f]{32})/$') # reference to resource home page
10 REST_RE = re.compile('/hsapi/') # reference to REST or internal
11 INTERNAL_RE = re.compile('/hsapi/_internal/') # reference to an internal page
12
13
14 def get_resource_id_from_url(path):
15 """ read a resource id from a URL """
16 m = RESOURCE_RE.search(path)
17 if m and m.group(1):
18 return m.group(1)
19 m = BAG_RE.search(path)
20 if m and m.group(1):
21 return m.group(1)
22 return None
23
24
25 def get_rest_from_url(path):
26 """ determine whether a URL is a REST call or not
27
28 This should always return boolean, not search result.
29 """
30 if REST_RE.search(path):
31 if INTERNAL_RE.search(path):
32 return False
33 else:
34 return True
35 else:
36 return False
37
38
39 def get_landing_from_url(path):
40 """ determine whether a URL is a landing page.
41
42 This should always return boolean, not search result.
43 """
44 if LANDING_RE.search(path):
45 return True
46 else:
47 return False
48
49
50 class Tracking(MiddlewareMixin):
51 """The default tracking middleware logs all successful responses as a 'visit' variable with
52 the URL path as its value."""
53
54 def process_response(self, request, response):
55
56 # filter out heartbeat messages
57 if request.path.startswith('/heartbeat/'):
58 return response
59
60 # filter out web crawlers
61 is_human = getattr(request, 'is_human', False)
62 if not is_human:
63 return response
64
65 # filter out everything that is not an OK response
66 if response.status_code != 200:
67 return response
68
69 if not hasattr(request, 'user'):
70 return response
71
72 # get user info that will be recorded in the visit log
73 session = Session.objects.for_request(request)
74 usertype = utils.get_user_type(session)
75 emaildomain = utils.get_user_email_domain(session)
76 ip = utils.get_client_ip(request)
77
78 # build the message string (key:value pairs)
79 msg = '|'.join([str(item) for item in
80 ['user_ip=%s' % ip,
81 'http_method=%s' % request.method,
82 'http_code=%s' % response.status_code,
83 'user_type=%s' % usertype,
84 'user_email_domain=%s' % emaildomain,
85 'request_url=%s' % request.path]])
86
87 resource_id = get_resource_id_from_url(request.path)
88 rest = get_rest_from_url(request.path)
89 landing = get_landing_from_url(request.path)
90
91 # save the activity in the database
92 session.record('visit', value=msg, resource_id=resource_id,
93 landing=landing, rest=rest)
94
95 return response
96
```
Path: `hs_tracking/utils.py`
Content:
```
1 import robot_detection
2 from ipware.ip import get_ip
3 from hs_tools_resource.models import RequestUrlBase, RequestUrlBaseAggregation, RequestUrlBaseFile
4 from urllib.parse import urlparse
5
6
7 def get_client_ip(request):
8 return get_ip(request)
9
10
11 def get_user_type(session):
12 try:
13 user = session.visitor.user
14 usertype = user.userprofile.user_type
15 except AttributeError:
16 usertype = None
17 return usertype
18
19
20 def get_user_email_domain(session):
21 try:
22 user = session.visitor.user
23 emaildomain = user.email.split('@')[-1]
24 shortdomain = '.'.join(emaildomain.split('.')[1:])
25 except AttributeError:
26 shortdomain = None
27 return shortdomain
28
29
30 def is_human(user_agent):
31 if robot_detection.is_robot(user_agent):
32 return False
33 return True
34
35
36 def get_std_log_fields(request, session=None):
37 """ returns a standard set of metadata that to each receiver function.
38 This ensures that all activities are reporting a consistent set of metrics
39 """
40 user_type = None
41 user_email = None
42 if session is not None:
43 user_type = get_user_type(session)
44 user_email = get_user_email_domain(session)
45
46 return {
47 'user_ip': get_client_ip(request),
48 'user_type': user_type,
49 'user_email_domain': user_email,
50 }
51
52
53 def authentic_redirect_url(url):
54 """ Validates a url scheme and netloc is in an existing web app
55 :param url: String of a url
56 :return: Boolean, True if the url exists in a web app
57 """
58 if not url:
59 return False
60 u = urlparse(url)
61 url_base = "{}://{}".format(u.scheme, u.netloc)
62 return RequestUrlBase.objects.filter(value__startswith=url_base).exists() \
63 or RequestUrlBaseAggregation.objects.filter(value__startswith=url_base).exists() \
64 or RequestUrlBaseFile.objects.filter(value__startswith=url_base).exists()
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/hs_tracking/middleware.py b/hs_tracking/middleware.py
--- a/hs_tracking/middleware.py
+++ b/hs_tracking/middleware.py
@@ -72,7 +72,7 @@
# get user info that will be recorded in the visit log
session = Session.objects.for_request(request)
usertype = utils.get_user_type(session)
- emaildomain = utils.get_user_email_domain(session)
+ email_tld = utils.get_user_email_tld(session)
ip = utils.get_client_ip(request)
# build the message string (key:value pairs)
@@ -81,7 +81,7 @@
'http_method=%s' % request.method,
'http_code=%s' % response.status_code,
'user_type=%s' % usertype,
- 'user_email_domain=%s' % emaildomain,
+ 'user_email_domain=%s' % email_tld,
'request_url=%s' % request.path]])
resource_id = get_resource_id_from_url(request.path)
diff --git a/hs_tracking/utils.py b/hs_tracking/utils.py
--- a/hs_tracking/utils.py
+++ b/hs_tracking/utils.py
@@ -21,10 +21,20 @@
try:
user = session.visitor.user
emaildomain = user.email.split('@')[-1]
- shortdomain = '.'.join(emaildomain.split('.')[1:])
except AttributeError:
- shortdomain = None
- return shortdomain
+ emaildomain = None
+ return emaildomain
+
+
+def get_user_email_tld(session, emaildomain=None):
+ try:
+ if not emaildomain:
+ emaildomain = get_user_email_domain(session)
+ if emaildomain:
+ shortdomain = '.'.join(emaildomain.split('.')[1:])
+ return shortdomain
+ except AttributeError:
+ return None
def is_human(user_agent):
@@ -38,15 +48,18 @@
This ensures that all activities are reporting a consistent set of metrics
"""
user_type = None
- user_email = None
+ user_email_tld = None
+ full_domain = None
if session is not None:
user_type = get_user_type(session)
- user_email = get_user_email_domain(session)
+ full_domain = get_user_email_domain(session)
+ user_email_tld = get_user_email_tld(session, full_domain)
return {
'user_ip': get_client_ip(request),
'user_type': user_type,
- 'user_email_domain': user_email,
+ 'user_email_domain': user_email_tld,
+ 'user_email_domain_full': full_domain
}
| {"golden_diff": "diff --git a/hs_tracking/middleware.py b/hs_tracking/middleware.py\n--- a/hs_tracking/middleware.py\n+++ b/hs_tracking/middleware.py\n@@ -72,7 +72,7 @@\n # get user info that will be recorded in the visit log\n session = Session.objects.for_request(request)\n usertype = utils.get_user_type(session)\n- emaildomain = utils.get_user_email_domain(session)\n+ email_tld = utils.get_user_email_tld(session)\n ip = utils.get_client_ip(request)\n \n # build the message string (key:value pairs)\n@@ -81,7 +81,7 @@\n 'http_method=%s' % request.method,\n 'http_code=%s' % response.status_code,\n 'user_type=%s' % usertype,\n- 'user_email_domain=%s' % emaildomain,\n+ 'user_email_domain=%s' % email_tld,\n 'request_url=%s' % request.path]])\n \n resource_id = get_resource_id_from_url(request.path)\ndiff --git a/hs_tracking/utils.py b/hs_tracking/utils.py\n--- a/hs_tracking/utils.py\n+++ b/hs_tracking/utils.py\n@@ -21,10 +21,20 @@\n try:\n user = session.visitor.user\n emaildomain = user.email.split('@')[-1]\n- shortdomain = '.'.join(emaildomain.split('.')[1:])\n except AttributeError:\n- shortdomain = None\n- return shortdomain\n+ emaildomain = None\n+ return emaildomain\n+\n+\n+def get_user_email_tld(session, emaildomain=None):\n+ try:\n+ if not emaildomain:\n+ emaildomain = get_user_email_domain(session)\n+ if emaildomain:\n+ shortdomain = '.'.join(emaildomain.split('.')[1:])\n+ return shortdomain\n+ except AttributeError:\n+ return None\n \n \n def is_human(user_agent):\n@@ -38,15 +48,18 @@\n This ensures that all activities are reporting a consistent set of metrics\n \"\"\"\n user_type = None\n- user_email = None\n+ user_email_tld = None\n+ full_domain = None\n if session is not None:\n user_type = get_user_type(session)\n- user_email = get_user_email_domain(session)\n+ full_domain = get_user_email_domain(session)\n+ user_email_tld = get_user_email_tld(session, full_domain)\n \n return {\n 'user_ip': get_client_ip(request),\n 'user_type': user_type,\n- 'user_email_domain': user_email,\n+ 'user_email_domain': user_email_tld,\n+ 'user_email_domain_full': full_domain\n }\n", "issue": "track users' full email domain\n**Describe the feature you'd like and what it will do**\r\nIn HS v2.5.4, we don't track full user email domain in our metrics.\r\n\r\n\r\n**Why is this feature important?**\r\nWe need more insight into how HS' ecosystem of tools are being used. This information should drive our continued development on existing tools and our consideration of additions for future use.\r\n\n", "before_files": [{"content": "from django.utils.deprecation import MiddlewareMixin\n\nfrom .models import Session\nfrom . import utils\nimport re\n\nRESOURCE_RE = re.compile('resource/([0-9a-f]{32})/') # parser for resource id\nBAG_RE = re.compile('bags/([0-9a-f]{32})\\.zip') # parser for resource id # noqa\nLANDING_RE = re.compile('resource/([0-9a-f]{32})/$') # reference to resource home page\nREST_RE = re.compile('/hsapi/') # reference to REST or internal\nINTERNAL_RE = re.compile('/hsapi/_internal/') # reference to an internal page\n\n\ndef get_resource_id_from_url(path):\n \"\"\" read a resource id from a URL \"\"\"\n m = RESOURCE_RE.search(path)\n if m and m.group(1):\n return m.group(1)\n m = BAG_RE.search(path)\n if m and m.group(1):\n return m.group(1)\n return None\n\n\ndef get_rest_from_url(path):\n \"\"\" determine whether a URL is a REST call or not\n\n This should always return boolean, not search result.\n \"\"\"\n if REST_RE.search(path):\n if INTERNAL_RE.search(path):\n return False\n else:\n return True\n else:\n return False\n\n\ndef get_landing_from_url(path):\n \"\"\" determine whether a URL is a landing page.\n\n This should always return boolean, not search result.\n \"\"\"\n if LANDING_RE.search(path):\n return True\n else:\n return False\n\n\nclass Tracking(MiddlewareMixin):\n \"\"\"The default tracking middleware logs all successful responses as a 'visit' variable with\n the URL path as its value.\"\"\"\n\n def process_response(self, request, response):\n\n # filter out heartbeat messages\n if request.path.startswith('/heartbeat/'):\n return response\n\n # filter out web crawlers\n is_human = getattr(request, 'is_human', False)\n if not is_human:\n return response\n\n # filter out everything that is not an OK response\n if response.status_code != 200:\n return response\n\n if not hasattr(request, 'user'):\n return response\n\n # get user info that will be recorded in the visit log\n session = Session.objects.for_request(request)\n usertype = utils.get_user_type(session)\n emaildomain = utils.get_user_email_domain(session)\n ip = utils.get_client_ip(request)\n\n # build the message string (key:value pairs)\n msg = '|'.join([str(item) for item in\n ['user_ip=%s' % ip,\n 'http_method=%s' % request.method,\n 'http_code=%s' % response.status_code,\n 'user_type=%s' % usertype,\n 'user_email_domain=%s' % emaildomain,\n 'request_url=%s' % request.path]])\n\n resource_id = get_resource_id_from_url(request.path)\n rest = get_rest_from_url(request.path)\n landing = get_landing_from_url(request.path)\n\n # save the activity in the database\n session.record('visit', value=msg, resource_id=resource_id,\n landing=landing, rest=rest)\n\n return response\n", "path": "hs_tracking/middleware.py"}, {"content": "import robot_detection\nfrom ipware.ip import get_ip\nfrom hs_tools_resource.models import RequestUrlBase, RequestUrlBaseAggregation, RequestUrlBaseFile\nfrom urllib.parse import urlparse\n\n\ndef get_client_ip(request):\n return get_ip(request)\n\n\ndef get_user_type(session):\n try:\n user = session.visitor.user\n usertype = user.userprofile.user_type\n except AttributeError:\n usertype = None\n return usertype\n\n\ndef get_user_email_domain(session):\n try:\n user = session.visitor.user\n emaildomain = user.email.split('@')[-1]\n shortdomain = '.'.join(emaildomain.split('.')[1:])\n except AttributeError:\n shortdomain = None\n return shortdomain\n\n\ndef is_human(user_agent):\n if robot_detection.is_robot(user_agent):\n return False\n return True\n\n\ndef get_std_log_fields(request, session=None):\n \"\"\" returns a standard set of metadata that to each receiver function.\n This ensures that all activities are reporting a consistent set of metrics\n \"\"\"\n user_type = None\n user_email = None\n if session is not None:\n user_type = get_user_type(session)\n user_email = get_user_email_domain(session)\n\n return {\n 'user_ip': get_client_ip(request),\n 'user_type': user_type,\n 'user_email_domain': user_email,\n }\n\n\ndef authentic_redirect_url(url):\n \"\"\" Validates a url scheme and netloc is in an existing web app\n :param url: String of a url\n :return: Boolean, True if the url exists in a web app\n \"\"\"\n if not url:\n return False\n u = urlparse(url)\n url_base = \"{}://{}\".format(u.scheme, u.netloc)\n return RequestUrlBase.objects.filter(value__startswith=url_base).exists() \\\n or RequestUrlBaseAggregation.objects.filter(value__startswith=url_base).exists() \\\n or RequestUrlBaseFile.objects.filter(value__startswith=url_base).exists()\n", "path": "hs_tracking/utils.py"}], "after_files": [{"content": "from django.utils.deprecation import MiddlewareMixin\n\nfrom .models import Session\nfrom . import utils\nimport re\n\nRESOURCE_RE = re.compile('resource/([0-9a-f]{32})/') # parser for resource id\nBAG_RE = re.compile('bags/([0-9a-f]{32})\\.zip') # parser for resource id # noqa\nLANDING_RE = re.compile('resource/([0-9a-f]{32})/$') # reference to resource home page\nREST_RE = re.compile('/hsapi/') # reference to REST or internal\nINTERNAL_RE = re.compile('/hsapi/_internal/') # reference to an internal page\n\n\ndef get_resource_id_from_url(path):\n \"\"\" read a resource id from a URL \"\"\"\n m = RESOURCE_RE.search(path)\n if m and m.group(1):\n return m.group(1)\n m = BAG_RE.search(path)\n if m and m.group(1):\n return m.group(1)\n return None\n\n\ndef get_rest_from_url(path):\n \"\"\" determine whether a URL is a REST call or not\n\n This should always return boolean, not search result.\n \"\"\"\n if REST_RE.search(path):\n if INTERNAL_RE.search(path):\n return False\n else:\n return True\n else:\n return False\n\n\ndef get_landing_from_url(path):\n \"\"\" determine whether a URL is a landing page.\n\n This should always return boolean, not search result.\n \"\"\"\n if LANDING_RE.search(path):\n return True\n else:\n return False\n\n\nclass Tracking(MiddlewareMixin):\n \"\"\"The default tracking middleware logs all successful responses as a 'visit' variable with\n the URL path as its value.\"\"\"\n\n def process_response(self, request, response):\n\n # filter out heartbeat messages\n if request.path.startswith('/heartbeat/'):\n return response\n\n # filter out web crawlers\n is_human = getattr(request, 'is_human', False)\n if not is_human:\n return response\n\n # filter out everything that is not an OK response\n if response.status_code != 200:\n return response\n\n if not hasattr(request, 'user'):\n return response\n\n # get user info that will be recorded in the visit log\n session = Session.objects.for_request(request)\n usertype = utils.get_user_type(session)\n email_tld = utils.get_user_email_tld(session)\n ip = utils.get_client_ip(request)\n\n # build the message string (key:value pairs)\n msg = '|'.join([str(item) for item in\n ['user_ip=%s' % ip,\n 'http_method=%s' % request.method,\n 'http_code=%s' % response.status_code,\n 'user_type=%s' % usertype,\n 'user_email_domain=%s' % email_tld,\n 'request_url=%s' % request.path]])\n\n resource_id = get_resource_id_from_url(request.path)\n rest = get_rest_from_url(request.path)\n landing = get_landing_from_url(request.path)\n\n # save the activity in the database\n session.record('visit', value=msg, resource_id=resource_id,\n landing=landing, rest=rest)\n\n return response\n", "path": "hs_tracking/middleware.py"}, {"content": "import robot_detection\nfrom ipware.ip import get_ip\nfrom hs_tools_resource.models import RequestUrlBase, RequestUrlBaseAggregation, RequestUrlBaseFile\nfrom urllib.parse import urlparse\n\n\ndef get_client_ip(request):\n return get_ip(request)\n\n\ndef get_user_type(session):\n try:\n user = session.visitor.user\n usertype = user.userprofile.user_type\n except AttributeError:\n usertype = None\n return usertype\n\n\ndef get_user_email_domain(session):\n try:\n user = session.visitor.user\n emaildomain = user.email.split('@')[-1]\n except AttributeError:\n emaildomain = None\n return emaildomain\n\n\ndef get_user_email_tld(session, emaildomain=None):\n try:\n if not emaildomain:\n emaildomain = get_user_email_domain(session)\n if emaildomain:\n shortdomain = '.'.join(emaildomain.split('.')[1:])\n return shortdomain\n except AttributeError:\n return None\n\n\ndef is_human(user_agent):\n if robot_detection.is_robot(user_agent):\n return False\n return True\n\n\ndef get_std_log_fields(request, session=None):\n \"\"\" returns a standard set of metadata that to each receiver function.\n This ensures that all activities are reporting a consistent set of metrics\n \"\"\"\n user_type = None\n user_email_tld = None\n full_domain = None\n if session is not None:\n user_type = get_user_type(session)\n full_domain = get_user_email_domain(session)\n user_email_tld = get_user_email_tld(session, full_domain)\n\n return {\n 'user_ip': get_client_ip(request),\n 'user_type': user_type,\n 'user_email_domain': user_email_tld,\n 'user_email_domain_full': full_domain\n }\n\n\ndef authentic_redirect_url(url):\n \"\"\" Validates a url scheme and netloc is in an existing web app\n :param url: String of a url\n :return: Boolean, True if the url exists in a web app\n \"\"\"\n if not url:\n return False\n u = urlparse(url)\n url_base = \"{}://{}\".format(u.scheme, u.netloc)\n return RequestUrlBase.objects.filter(value__startswith=url_base).exists() \\\n or RequestUrlBaseAggregation.objects.filter(value__startswith=url_base).exists() \\\n or RequestUrlBaseFile.objects.filter(value__startswith=url_base).exists()\n", "path": "hs_tracking/utils.py"}]} | 1,793 | 589 |
gh_patches_debug_15375 | rasdani/github-patches | git_diff | openstates__openstates-scrapers-1221 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make `LXMLMixin.lxmlize` use scraper's `session`, rather than a raw `request.get()`
Otherwise it slows down `--fast` scrapes because it doesn't use cached responses.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openstates/utils/lxmlize.py`
Content:
```
1 import requests
2 import lxml.html
3
4
5 class LXMLMixin(object):
6 """Mixin for adding LXML helper functions to Open States code."""
7
8 def lxmlize(self, url, raise_exceptions=False):
9 """Parses document into an LXML object and makes links absolute.
10
11 Args:
12 url (str): URL of the document to parse.
13 Returns:
14 Element: Document node representing the page.
15 """
16 try:
17 response = requests.get(url)
18 except requests.exceptions.SSLError:
19 self.warning('`self.lxmlize()` failed due to SSL error, trying'\
20 'an unverified `requests.get()`')
21 response = requests.get(url, verify=False)
22
23 if raise_exceptions:
24 response.raise_for_status()
25
26 page = lxml.html.fromstring(response.text)
27 page.make_links_absolute(url)
28
29 return page
30
31 def get_node(self, base_node, xpath_query):
32 """Searches for node in an element tree.
33
34 Attempts to return only the first node found for an xpath query. Meant
35 to cut down on exception handling boilerplate.
36
37 Args:
38 base_node (Element): Document node to begin querying from.
39 xpath_query (str): XPath query to define nodes to search for.
40 Returns:
41 Element: First node found that matches the query.
42 """
43 try:
44 node = base_node.xpath(xpath_query)[0]
45 except IndexError:
46 node = None
47
48 return node
49
50 def get_nodes(self, base_node, xpath_query):
51 """Searches for nodes in an element tree.
52
53 Attempts to return all nodes found for an xpath query. Meant to cut
54 down on exception handling boilerplate.
55
56 Args:
57 base_node (Element): Document node to begin querying from.
58 xpath_query (str): Xpath query to define nodes to search for.
59 Returns:
60 List[Element]: All nodes found that match the query.
61 """
62 return base_node.xpath(xpath_query)
63
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/openstates/utils/lxmlize.py b/openstates/utils/lxmlize.py
--- a/openstates/utils/lxmlize.py
+++ b/openstates/utils/lxmlize.py
@@ -14,11 +14,13 @@
Element: Document node representing the page.
"""
try:
- response = requests.get(url)
+ # This class is always mixed into subclasses of `billy.Scraper`,
+ # which have a `get` method defined.
+ response = self.get(url)
except requests.exceptions.SSLError:
self.warning('`self.lxmlize()` failed due to SSL error, trying'\
- 'an unverified `requests.get()`')
- response = requests.get(url, verify=False)
+ 'an unverified `self.get()` (i.e. `requests.get()`)')
+ response = self.get(url, verify=False)
if raise_exceptions:
response.raise_for_status()
| {"golden_diff": "diff --git a/openstates/utils/lxmlize.py b/openstates/utils/lxmlize.py\n--- a/openstates/utils/lxmlize.py\n+++ b/openstates/utils/lxmlize.py\n@@ -14,11 +14,13 @@\n Element: Document node representing the page.\n \"\"\"\n try:\n- response = requests.get(url)\n+ # This class is always mixed into subclasses of `billy.Scraper`,\n+ # which have a `get` method defined.\n+ response = self.get(url)\n except requests.exceptions.SSLError:\n self.warning('`self.lxmlize()` failed due to SSL error, trying'\\\n- 'an unverified `requests.get()`')\n- response = requests.get(url, verify=False)\n+ 'an unverified `self.get()` (i.e. `requests.get()`)')\n+ response = self.get(url, verify=False)\n \n if raise_exceptions:\n response.raise_for_status()\n", "issue": "Make `LXMLMixin.lxmlize` use scraper's `session`, rather than a raw `request.get()`\nOtherwise it slows down `--fast` scrapes because it doesn't use cached responses.\n", "before_files": [{"content": "import requests\nimport lxml.html\n\n\nclass LXMLMixin(object):\n \"\"\"Mixin for adding LXML helper functions to Open States code.\"\"\"\n\n def lxmlize(self, url, raise_exceptions=False):\n \"\"\"Parses document into an LXML object and makes links absolute.\n\n Args:\n url (str): URL of the document to parse.\n Returns:\n Element: Document node representing the page.\n \"\"\"\n try:\n response = requests.get(url)\n except requests.exceptions.SSLError:\n self.warning('`self.lxmlize()` failed due to SSL error, trying'\\\n 'an unverified `requests.get()`')\n response = requests.get(url, verify=False)\n\n if raise_exceptions:\n response.raise_for_status()\n\n page = lxml.html.fromstring(response.text)\n page.make_links_absolute(url)\n\n return page\n\n def get_node(self, base_node, xpath_query):\n \"\"\"Searches for node in an element tree.\n\n Attempts to return only the first node found for an xpath query. Meant\n to cut down on exception handling boilerplate.\n\n Args:\n base_node (Element): Document node to begin querying from.\n xpath_query (str): XPath query to define nodes to search for.\n Returns:\n Element: First node found that matches the query.\n \"\"\"\n try:\n node = base_node.xpath(xpath_query)[0]\n except IndexError:\n node = None\n\n return node\n\n def get_nodes(self, base_node, xpath_query):\n \"\"\"Searches for nodes in an element tree.\n\n Attempts to return all nodes found for an xpath query. Meant to cut\n down on exception handling boilerplate.\n\n Args:\n base_node (Element): Document node to begin querying from.\n xpath_query (str): Xpath query to define nodes to search for.\n Returns:\n List[Element]: All nodes found that match the query.\n \"\"\"\n return base_node.xpath(xpath_query)\n", "path": "openstates/utils/lxmlize.py"}], "after_files": [{"content": "import requests\nimport lxml.html\n\n\nclass LXMLMixin(object):\n \"\"\"Mixin for adding LXML helper functions to Open States code.\"\"\"\n\n def lxmlize(self, url, raise_exceptions=False):\n \"\"\"Parses document into an LXML object and makes links absolute.\n\n Args:\n url (str): URL of the document to parse.\n Returns:\n Element: Document node representing the page.\n \"\"\"\n try:\n # This class is always mixed into subclasses of `billy.Scraper`,\n # which have a `get` method defined.\n response = self.get(url)\n except requests.exceptions.SSLError:\n self.warning('`self.lxmlize()` failed due to SSL error, trying'\\\n 'an unverified `self.get()` (i.e. `requests.get()`)')\n response = self.get(url, verify=False)\n\n if raise_exceptions:\n response.raise_for_status()\n\n page = lxml.html.fromstring(response.text)\n page.make_links_absolute(url)\n\n return page\n\n def get_node(self, base_node, xpath_query):\n \"\"\"Searches for node in an element tree.\n\n Attempts to return only the first node found for an xpath query. Meant\n to cut down on exception handling boilerplate.\n\n Args:\n base_node (Element): Document node to begin querying from.\n xpath_query (str): XPath query to define nodes to search for.\n Returns:\n Element: First node found that matches the query.\n \"\"\"\n try:\n node = base_node.xpath(xpath_query)[0]\n except IndexError:\n node = None\n\n return node\n\n def get_nodes(self, base_node, xpath_query):\n \"\"\"Searches for nodes in an element tree.\n\n Attempts to return all nodes found for an xpath query. Meant to cut\n down on exception handling boilerplate.\n\n Args:\n base_node (Element): Document node to begin querying from.\n xpath_query (str): Xpath query to define nodes to search for.\n Returns:\n List[Element]: All nodes found that match the query.\n \"\"\"\n return base_node.xpath(xpath_query)\n", "path": "openstates/utils/lxmlize.py"}]} | 837 | 205 |
gh_patches_debug_13345 | rasdani/github-patches | git_diff | paperless-ngx__paperless-ngx-1645 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] 1.9 OCR output mode "skip_noarchive" is not valid
### Description
With version 1.9 the following env trow errors:
```ini
PAPERLESS_OCR_MODE=skip_noarchive
```
### Steps to reproduce
1. set above docker env variable
2. start container
### Webserver logs
```bash
Paperless-ngx docker container starting...
Mapping UID and GID for paperless:paperless to 50014:50014
Creating directory /tmp/paperless
Adjusting permissions of paperless files. This may take a while.
Waiting for PostgreSQL to start...
Waiting for Redis...
Connected to Redis broker.
Apply database migrations...
SystemCheckError: System check identified some issues:
ERRORS:
?: OCR output mode "skip_noarchive" is not valid
```
### Paperless-ngx version
1.9
### Host OS
Ubuntu 22.04 x86_64
### Installation method
Docker - official image
### Browser
_No response_
### Configuration changes
_No response_
### Other
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/paperless/checks.py`
Content:
```
1 import grp
2 import os
3 import pwd
4 import shutil
5 import stat
6
7 from django.conf import settings
8 from django.core.checks import Error
9 from django.core.checks import register
10 from django.core.checks import Warning
11
12 exists_message = "{} is set but doesn't exist."
13 exists_hint = "Create a directory at {}"
14 writeable_message = "{} is not writeable"
15 writeable_hint = (
16 "Set the permissions of {} to be writeable by the user running the "
17 "Paperless services"
18 )
19
20
21 def path_check(var, directory):
22 messages = []
23 if directory:
24 if not os.path.isdir(directory):
25 messages.append(
26 Error(exists_message.format(var), exists_hint.format(directory)),
27 )
28 else:
29 test_file = os.path.join(
30 directory,
31 f"__paperless_write_test_{os.getpid()}__",
32 )
33 try:
34 with open(test_file, "w"):
35 pass
36 except PermissionError:
37 dir_stat = os.stat(directory)
38 dir_mode = stat.filemode(dir_stat.st_mode)
39 dir_owner = pwd.getpwuid(dir_stat.st_uid).pw_name
40 dir_group = grp.getgrgid(dir_stat.st_gid).gr_name
41 messages.append(
42 Error(
43 writeable_message.format(var),
44 writeable_hint.format(
45 f"\n{dir_mode} {dir_owner} {dir_group} " f"{directory}\n",
46 ),
47 ),
48 )
49 finally:
50 if os.path.isfile(test_file):
51 os.remove(test_file)
52
53 return messages
54
55
56 @register()
57 def paths_check(app_configs, **kwargs):
58 """
59 Check the various paths for existence, readability and writeability
60 """
61
62 return (
63 path_check("PAPERLESS_DATA_DIR", settings.DATA_DIR)
64 + path_check("PAPERLESS_TRASH_DIR", settings.TRASH_DIR)
65 + path_check("PAPERLESS_MEDIA_ROOT", settings.MEDIA_ROOT)
66 + path_check("PAPERLESS_CONSUMPTION_DIR", settings.CONSUMPTION_DIR)
67 )
68
69
70 @register()
71 def binaries_check(app_configs, **kwargs):
72 """
73 Paperless requires the existence of a few binaries, so we do some checks
74 for those here.
75 """
76
77 error = "Paperless can't find {}. Without it, consumption is impossible."
78 hint = "Either it's not in your ${PATH} or it's not installed."
79
80 binaries = (settings.CONVERT_BINARY, "tesseract")
81
82 check_messages = []
83 for binary in binaries:
84 if shutil.which(binary) is None:
85 check_messages.append(Warning(error.format(binary), hint))
86
87 return check_messages
88
89
90 @register()
91 def debug_mode_check(app_configs, **kwargs):
92 if settings.DEBUG:
93 return [
94 Warning(
95 "DEBUG mode is enabled. Disable Debug mode. This is a serious "
96 "security issue, since it puts security overides in place which "
97 "are meant to be only used during development. This "
98 "also means that paperless will tell anyone various "
99 "debugging information when something goes wrong.",
100 ),
101 ]
102 else:
103 return []
104
105
106 @register()
107 def settings_values_check(app_configs, **kwargs):
108 """
109 Validates at least some of the user provided settings
110 """
111
112 def _ocrmypdf_settings_check():
113 """
114 Validates some of the arguments which will be provided to ocrmypdf
115 against the valid options. Use "ocrmypdf --help" to see the valid
116 inputs
117 """
118 msgs = []
119 if settings.OCR_OUTPUT_TYPE not in {
120 "pdfa",
121 "pdf",
122 "pdfa-1",
123 "pdfa-2",
124 "pdfa-3",
125 }:
126 msgs.append(
127 Error(f'OCR output type "{settings.OCR_OUTPUT_TYPE}" is not valid'),
128 )
129
130 if settings.OCR_MODE not in {"force", "skip", "redo_ocr"}:
131 msgs.append(Error(f'OCR output mode "{settings.OCR_MODE}" is not valid'))
132
133 if settings.OCR_CLEAN not in {"clean", "clean_final"}:
134 msgs.append(Error(f'OCR clean mode "{settings.OCR_CLEAN}" is not valid'))
135 return msgs
136
137 def _timezone_validate():
138 """
139 Validates the user provided timezone is a valid timezone
140 """
141 try:
142 import zoneinfo
143 except ImportError: # pragma: nocover
144 import backports.zoneinfo as zoneinfo
145 msgs = []
146 if settings.TIME_ZONE not in zoneinfo.available_timezones():
147 msgs.append(
148 Error(f'Timezone "{settings.TIME_ZONE}" is not a valid timezone'),
149 )
150 return msgs
151
152 return _ocrmypdf_settings_check() + _timezone_validate()
153
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/paperless/checks.py b/src/paperless/checks.py
--- a/src/paperless/checks.py
+++ b/src/paperless/checks.py
@@ -127,10 +127,10 @@
Error(f'OCR output type "{settings.OCR_OUTPUT_TYPE}" is not valid'),
)
- if settings.OCR_MODE not in {"force", "skip", "redo_ocr"}:
+ if settings.OCR_MODE not in {"force", "skip", "redo", "skip_noarchive"}:
msgs.append(Error(f'OCR output mode "{settings.OCR_MODE}" is not valid'))
- if settings.OCR_CLEAN not in {"clean", "clean_final"}:
+ if settings.OCR_CLEAN not in {"clean", "clean-final"}:
msgs.append(Error(f'OCR clean mode "{settings.OCR_CLEAN}" is not valid'))
return msgs
| {"golden_diff": "diff --git a/src/paperless/checks.py b/src/paperless/checks.py\n--- a/src/paperless/checks.py\n+++ b/src/paperless/checks.py\n@@ -127,10 +127,10 @@\n Error(f'OCR output type \"{settings.OCR_OUTPUT_TYPE}\" is not valid'),\n )\n \n- if settings.OCR_MODE not in {\"force\", \"skip\", \"redo_ocr\"}:\n+ if settings.OCR_MODE not in {\"force\", \"skip\", \"redo\", \"skip_noarchive\"}:\n msgs.append(Error(f'OCR output mode \"{settings.OCR_MODE}\" is not valid'))\n \n- if settings.OCR_CLEAN not in {\"clean\", \"clean_final\"}:\n+ if settings.OCR_CLEAN not in {\"clean\", \"clean-final\"}:\n msgs.append(Error(f'OCR clean mode \"{settings.OCR_CLEAN}\" is not valid'))\n return msgs\n", "issue": "[BUG] 1.9 OCR output mode \"skip_noarchive\" is not valid\n### Description\n\nWith version 1.9 the following env trow errors:\r\n```ini\r\nPAPERLESS_OCR_MODE=skip_noarchive\r\n```\n\n### Steps to reproduce\n\n1. set above docker env variable\r\n2. start container\n\n### Webserver logs\n\n```bash\nPaperless-ngx docker container starting...\r\nMapping UID and GID for paperless:paperless to 50014:50014\r\nCreating directory /tmp/paperless\r\nAdjusting permissions of paperless files. This may take a while.\r\nWaiting for PostgreSQL to start...\r\nWaiting for Redis...\r\nConnected to Redis broker.\r\nApply database migrations...\r\nSystemCheckError: System check identified some issues:\r\nERRORS:\r\n?: OCR output mode \"skip_noarchive\" is not valid\n```\n\n\n### Paperless-ngx version\n\n1.9\n\n### Host OS\n\nUbuntu 22.04 x86_64\n\n### Installation method\n\nDocker - official image\n\n### Browser\n\n_No response_\n\n### Configuration changes\n\n_No response_\n\n### Other\n\n_No response_\n", "before_files": [{"content": "import grp\nimport os\nimport pwd\nimport shutil\nimport stat\n\nfrom django.conf import settings\nfrom django.core.checks import Error\nfrom django.core.checks import register\nfrom django.core.checks import Warning\n\nexists_message = \"{} is set but doesn't exist.\"\nexists_hint = \"Create a directory at {}\"\nwriteable_message = \"{} is not writeable\"\nwriteable_hint = (\n \"Set the permissions of {} to be writeable by the user running the \"\n \"Paperless services\"\n)\n\n\ndef path_check(var, directory):\n messages = []\n if directory:\n if not os.path.isdir(directory):\n messages.append(\n Error(exists_message.format(var), exists_hint.format(directory)),\n )\n else:\n test_file = os.path.join(\n directory,\n f\"__paperless_write_test_{os.getpid()}__\",\n )\n try:\n with open(test_file, \"w\"):\n pass\n except PermissionError:\n dir_stat = os.stat(directory)\n dir_mode = stat.filemode(dir_stat.st_mode)\n dir_owner = pwd.getpwuid(dir_stat.st_uid).pw_name\n dir_group = grp.getgrgid(dir_stat.st_gid).gr_name\n messages.append(\n Error(\n writeable_message.format(var),\n writeable_hint.format(\n f\"\\n{dir_mode} {dir_owner} {dir_group} \" f\"{directory}\\n\",\n ),\n ),\n )\n finally:\n if os.path.isfile(test_file):\n os.remove(test_file)\n\n return messages\n\n\n@register()\ndef paths_check(app_configs, **kwargs):\n \"\"\"\n Check the various paths for existence, readability and writeability\n \"\"\"\n\n return (\n path_check(\"PAPERLESS_DATA_DIR\", settings.DATA_DIR)\n + path_check(\"PAPERLESS_TRASH_DIR\", settings.TRASH_DIR)\n + path_check(\"PAPERLESS_MEDIA_ROOT\", settings.MEDIA_ROOT)\n + path_check(\"PAPERLESS_CONSUMPTION_DIR\", settings.CONSUMPTION_DIR)\n )\n\n\n@register()\ndef binaries_check(app_configs, **kwargs):\n \"\"\"\n Paperless requires the existence of a few binaries, so we do some checks\n for those here.\n \"\"\"\n\n error = \"Paperless can't find {}. Without it, consumption is impossible.\"\n hint = \"Either it's not in your ${PATH} or it's not installed.\"\n\n binaries = (settings.CONVERT_BINARY, \"tesseract\")\n\n check_messages = []\n for binary in binaries:\n if shutil.which(binary) is None:\n check_messages.append(Warning(error.format(binary), hint))\n\n return check_messages\n\n\n@register()\ndef debug_mode_check(app_configs, **kwargs):\n if settings.DEBUG:\n return [\n Warning(\n \"DEBUG mode is enabled. Disable Debug mode. This is a serious \"\n \"security issue, since it puts security overides in place which \"\n \"are meant to be only used during development. This \"\n \"also means that paperless will tell anyone various \"\n \"debugging information when something goes wrong.\",\n ),\n ]\n else:\n return []\n\n\n@register()\ndef settings_values_check(app_configs, **kwargs):\n \"\"\"\n Validates at least some of the user provided settings\n \"\"\"\n\n def _ocrmypdf_settings_check():\n \"\"\"\n Validates some of the arguments which will be provided to ocrmypdf\n against the valid options. Use \"ocrmypdf --help\" to see the valid\n inputs\n \"\"\"\n msgs = []\n if settings.OCR_OUTPUT_TYPE not in {\n \"pdfa\",\n \"pdf\",\n \"pdfa-1\",\n \"pdfa-2\",\n \"pdfa-3\",\n }:\n msgs.append(\n Error(f'OCR output type \"{settings.OCR_OUTPUT_TYPE}\" is not valid'),\n )\n\n if settings.OCR_MODE not in {\"force\", \"skip\", \"redo_ocr\"}:\n msgs.append(Error(f'OCR output mode \"{settings.OCR_MODE}\" is not valid'))\n\n if settings.OCR_CLEAN not in {\"clean\", \"clean_final\"}:\n msgs.append(Error(f'OCR clean mode \"{settings.OCR_CLEAN}\" is not valid'))\n return msgs\n\n def _timezone_validate():\n \"\"\"\n Validates the user provided timezone is a valid timezone\n \"\"\"\n try:\n import zoneinfo\n except ImportError: # pragma: nocover\n import backports.zoneinfo as zoneinfo\n msgs = []\n if settings.TIME_ZONE not in zoneinfo.available_timezones():\n msgs.append(\n Error(f'Timezone \"{settings.TIME_ZONE}\" is not a valid timezone'),\n )\n return msgs\n\n return _ocrmypdf_settings_check() + _timezone_validate()\n", "path": "src/paperless/checks.py"}], "after_files": [{"content": "import grp\nimport os\nimport pwd\nimport shutil\nimport stat\n\nfrom django.conf import settings\nfrom django.core.checks import Error\nfrom django.core.checks import register\nfrom django.core.checks import Warning\n\nexists_message = \"{} is set but doesn't exist.\"\nexists_hint = \"Create a directory at {}\"\nwriteable_message = \"{} is not writeable\"\nwriteable_hint = (\n \"Set the permissions of {} to be writeable by the user running the \"\n \"Paperless services\"\n)\n\n\ndef path_check(var, directory):\n messages = []\n if directory:\n if not os.path.isdir(directory):\n messages.append(\n Error(exists_message.format(var), exists_hint.format(directory)),\n )\n else:\n test_file = os.path.join(\n directory,\n f\"__paperless_write_test_{os.getpid()}__\",\n )\n try:\n with open(test_file, \"w\"):\n pass\n except PermissionError:\n dir_stat = os.stat(directory)\n dir_mode = stat.filemode(dir_stat.st_mode)\n dir_owner = pwd.getpwuid(dir_stat.st_uid).pw_name\n dir_group = grp.getgrgid(dir_stat.st_gid).gr_name\n messages.append(\n Error(\n writeable_message.format(var),\n writeable_hint.format(\n f\"\\n{dir_mode} {dir_owner} {dir_group} \" f\"{directory}\\n\",\n ),\n ),\n )\n finally:\n if os.path.isfile(test_file):\n os.remove(test_file)\n\n return messages\n\n\n@register()\ndef paths_check(app_configs, **kwargs):\n \"\"\"\n Check the various paths for existence, readability and writeability\n \"\"\"\n\n return (\n path_check(\"PAPERLESS_DATA_DIR\", settings.DATA_DIR)\n + path_check(\"PAPERLESS_TRASH_DIR\", settings.TRASH_DIR)\n + path_check(\"PAPERLESS_MEDIA_ROOT\", settings.MEDIA_ROOT)\n + path_check(\"PAPERLESS_CONSUMPTION_DIR\", settings.CONSUMPTION_DIR)\n )\n\n\n@register()\ndef binaries_check(app_configs, **kwargs):\n \"\"\"\n Paperless requires the existence of a few binaries, so we do some checks\n for those here.\n \"\"\"\n\n error = \"Paperless can't find {}. Without it, consumption is impossible.\"\n hint = \"Either it's not in your ${PATH} or it's not installed.\"\n\n binaries = (settings.CONVERT_BINARY, \"tesseract\")\n\n check_messages = []\n for binary in binaries:\n if shutil.which(binary) is None:\n check_messages.append(Warning(error.format(binary), hint))\n\n return check_messages\n\n\n@register()\ndef debug_mode_check(app_configs, **kwargs):\n if settings.DEBUG:\n return [\n Warning(\n \"DEBUG mode is enabled. Disable Debug mode. This is a serious \"\n \"security issue, since it puts security overides in place which \"\n \"are meant to be only used during development. This \"\n \"also means that paperless will tell anyone various \"\n \"debugging information when something goes wrong.\",\n ),\n ]\n else:\n return []\n\n\n@register()\ndef settings_values_check(app_configs, **kwargs):\n \"\"\"\n Validates at least some of the user provided settings\n \"\"\"\n\n def _ocrmypdf_settings_check():\n \"\"\"\n Validates some of the arguments which will be provided to ocrmypdf\n against the valid options. Use \"ocrmypdf --help\" to see the valid\n inputs\n \"\"\"\n msgs = []\n if settings.OCR_OUTPUT_TYPE not in {\n \"pdfa\",\n \"pdf\",\n \"pdfa-1\",\n \"pdfa-2\",\n \"pdfa-3\",\n }:\n msgs.append(\n Error(f'OCR output type \"{settings.OCR_OUTPUT_TYPE}\" is not valid'),\n )\n\n if settings.OCR_MODE not in {\"force\", \"skip\", \"redo\", \"skip_noarchive\"}:\n msgs.append(Error(f'OCR output mode \"{settings.OCR_MODE}\" is not valid'))\n\n if settings.OCR_CLEAN not in {\"clean\", \"clean-final\"}:\n msgs.append(Error(f'OCR clean mode \"{settings.OCR_CLEAN}\" is not valid'))\n return msgs\n\n def _timezone_validate():\n \"\"\"\n Validates the user provided timezone is a valid timezone\n \"\"\"\n try:\n import zoneinfo\n except ImportError: # pragma: nocover\n import backports.zoneinfo as zoneinfo\n msgs = []\n if settings.TIME_ZONE not in zoneinfo.available_timezones():\n msgs.append(\n Error(f'Timezone \"{settings.TIME_ZONE}\" is not a valid timezone'),\n )\n return msgs\n\n return _ocrmypdf_settings_check() + _timezone_validate()\n", "path": "src/paperless/checks.py"}]} | 1,852 | 199 |
gh_patches_debug_5394 | rasdani/github-patches | git_diff | cupy__cupy-1717 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`cupy.nextafter` is wrong on a=0
`test_nextafter_combination` is failing.
`{'dtype_b': <type 'numpy.float32'>, 'dtype_a': <type 'numpy.bool_'>}`
```
x = array([[1.1754944e-38, 1.0000001e+00, 1.1754944e-38],
[1.0000001e+00, 1.1754944e-38, 1.0000000e+00]], dtype=float32)
y = array([[1.4012985e-45, 1.0000001e+00, 1.4012985e-45],
[1.0000001e+00, 1.4012985e-45, 1.0000000e+00]], dtype=float32)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cupy/math/floating.py`
Content:
```
1 from cupy import core
2 from cupy.math import ufunc
3
4
5 signbit = core.create_ufunc(
6 'cupy_signbit',
7 ('e->?', 'f->?', 'd->?'),
8 'out0 = signbit(in0)',
9 doc='''Tests elementwise if the sign bit is set (i.e. less than zero).
10
11 .. seealso:: :data:`numpy.signbit`
12
13 ''')
14
15
16 copysign = ufunc.create_math_ufunc(
17 'copysign', 2, 'cupy_copysign',
18 '''Returns the first argument with the sign bit of the second elementwise.
19
20 .. seealso:: :data:`numpy.copysign`
21
22 ''')
23
24
25 ldexp = core.create_ufunc(
26 'cupy_ldexp',
27 ('ei->e', 'fi->f', 'el->e', 'fl->f', 'di->d', 'dq->d'),
28 'out0 = ldexp(in0, in1)',
29 doc='''Computes ``x1 * 2 ** x2`` elementwise.
30
31 .. seealso:: :data:`numpy.ldexp`
32
33 ''')
34
35
36 frexp = core.create_ufunc(
37 'cupy_frexp',
38 ('e->ei', 'f->fi', 'd->di'),
39 'int nptr; out0 = frexp(in0, &nptr); out1 = nptr',
40 doc='''Decomposes each element to mantissa and two's exponent.
41
42 This ufunc outputs two arrays of the input dtype and the ``int`` dtype.
43
44 .. seealso:: :data:`numpy.frexp`
45
46 ''')
47
48
49 nextafter = ufunc.create_math_ufunc(
50 'nextafter', 2, 'cupy_nextafter',
51 '''Computes the nearest neighbor float values towards the second argument.
52
53 .. seealso:: :data:`numpy.nextafter`
54
55 ''')
56
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cupy/math/floating.py b/cupy/math/floating.py
--- a/cupy/math/floating.py
+++ b/cupy/math/floating.py
@@ -50,6 +50,11 @@
'nextafter', 2, 'cupy_nextafter',
'''Computes the nearest neighbor float values towards the second argument.
+ .. note::
+ For values that are close to zero (or denormal numbers),
+ results of :func:`cupy.nextafter` may be different from those of
+ :func:`numpy.nextafter`, because CuPy sets ``-ftz=true``.
+
.. seealso:: :data:`numpy.nextafter`
''')
| {"golden_diff": "diff --git a/cupy/math/floating.py b/cupy/math/floating.py\n--- a/cupy/math/floating.py\n+++ b/cupy/math/floating.py\n@@ -50,6 +50,11 @@\n 'nextafter', 2, 'cupy_nextafter',\n '''Computes the nearest neighbor float values towards the second argument.\n \n+ .. note::\n+ For values that are close to zero (or denormal numbers),\n+ results of :func:`cupy.nextafter` may be different from those of\n+ :func:`numpy.nextafter`, because CuPy sets ``-ftz=true``.\n+\n .. seealso:: :data:`numpy.nextafter`\n \n ''')\n", "issue": "`cupy.nextafter` is wrong on a=0\n`test_nextafter_combination` is failing.\r\n\r\n`{'dtype_b': <type 'numpy.float32'>, 'dtype_a': <type 'numpy.bool_'>}`\r\n```\r\nx = array([[1.1754944e-38, 1.0000001e+00, 1.1754944e-38],\r\n [1.0000001e+00, 1.1754944e-38, 1.0000000e+00]], dtype=float32)\r\ny = array([[1.4012985e-45, 1.0000001e+00, 1.4012985e-45],\r\n [1.0000001e+00, 1.4012985e-45, 1.0000000e+00]], dtype=float32)\r\n```\n", "before_files": [{"content": "from cupy import core\nfrom cupy.math import ufunc\n\n\nsignbit = core.create_ufunc(\n 'cupy_signbit',\n ('e->?', 'f->?', 'd->?'),\n 'out0 = signbit(in0)',\n doc='''Tests elementwise if the sign bit is set (i.e. less than zero).\n\n .. seealso:: :data:`numpy.signbit`\n\n ''')\n\n\ncopysign = ufunc.create_math_ufunc(\n 'copysign', 2, 'cupy_copysign',\n '''Returns the first argument with the sign bit of the second elementwise.\n\n .. seealso:: :data:`numpy.copysign`\n\n ''')\n\n\nldexp = core.create_ufunc(\n 'cupy_ldexp',\n ('ei->e', 'fi->f', 'el->e', 'fl->f', 'di->d', 'dq->d'),\n 'out0 = ldexp(in0, in1)',\n doc='''Computes ``x1 * 2 ** x2`` elementwise.\n\n .. seealso:: :data:`numpy.ldexp`\n\n ''')\n\n\nfrexp = core.create_ufunc(\n 'cupy_frexp',\n ('e->ei', 'f->fi', 'd->di'),\n 'int nptr; out0 = frexp(in0, &nptr); out1 = nptr',\n doc='''Decomposes each element to mantissa and two's exponent.\n\n This ufunc outputs two arrays of the input dtype and the ``int`` dtype.\n\n .. seealso:: :data:`numpy.frexp`\n\n ''')\n\n\nnextafter = ufunc.create_math_ufunc(\n 'nextafter', 2, 'cupy_nextafter',\n '''Computes the nearest neighbor float values towards the second argument.\n\n .. seealso:: :data:`numpy.nextafter`\n\n ''')\n", "path": "cupy/math/floating.py"}], "after_files": [{"content": "from cupy import core\nfrom cupy.math import ufunc\n\n\nsignbit = core.create_ufunc(\n 'cupy_signbit',\n ('e->?', 'f->?', 'd->?'),\n 'out0 = signbit(in0)',\n doc='''Tests elementwise if the sign bit is set (i.e. less than zero).\n\n .. seealso:: :data:`numpy.signbit`\n\n ''')\n\n\ncopysign = ufunc.create_math_ufunc(\n 'copysign', 2, 'cupy_copysign',\n '''Returns the first argument with the sign bit of the second elementwise.\n\n .. seealso:: :data:`numpy.copysign`\n\n ''')\n\n\nldexp = core.create_ufunc(\n 'cupy_ldexp',\n ('ei->e', 'fi->f', 'el->e', 'fl->f', 'di->d', 'dq->d'),\n 'out0 = ldexp(in0, in1)',\n doc='''Computes ``x1 * 2 ** x2`` elementwise.\n\n .. seealso:: :data:`numpy.ldexp`\n\n ''')\n\n\nfrexp = core.create_ufunc(\n 'cupy_frexp',\n ('e->ei', 'f->fi', 'd->di'),\n 'int nptr; out0 = frexp(in0, &nptr); out1 = nptr',\n doc='''Decomposes each element to mantissa and two's exponent.\n\n This ufunc outputs two arrays of the input dtype and the ``int`` dtype.\n\n .. seealso:: :data:`numpy.frexp`\n\n ''')\n\n\nnextafter = ufunc.create_math_ufunc(\n 'nextafter', 2, 'cupy_nextafter',\n '''Computes the nearest neighbor float values towards the second argument.\n\n .. note::\n For values that are close to zero (or denormal numbers),\n results of :func:`cupy.nextafter` may be different from those of\n :func:`numpy.nextafter`, because CuPy sets ``-ftz=true``.\n\n .. seealso:: :data:`numpy.nextafter`\n\n ''')\n", "path": "cupy/math/floating.py"}]} | 1,044 | 154 |
gh_patches_debug_34472 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3951 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider dominos_pizza_jp is broken
During the global build at 2021-06-30-14-42-26, spider **dominos_pizza_jp** failed with **0 features** and **0 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/logs/dominos_pizza_jp.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/output/dominos_pizza_jp.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/output/dominos_pizza_jp.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/dominos_pizza_jp.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import re
3
4 import scrapy
5
6 from locations.items import GeojsonPointItem
7
8
9 class DominosPizzaJPSpider(scrapy.Spider):
10 name = "dominos_pizza_jp"
11 item_attributes = {"brand": "Domino's", "brand_wikidata": "Q839466"}
12 allowed_domains = ["dominos.jp"]
13 start_urls = [
14 "https://www.dominos.jp/sitemap.aspx",
15 ]
16 download_delay = 0.3
17
18 def parse(self, response):
19 response.selector.remove_namespaces()
20 store_urls = response.xpath('//url/loc/text()[contains(.,"/store/")]').extract()
21 for url in store_urls:
22 yield scrapy.Request(url, callback=self.parse_store)
23
24 def parse_store(self, response):
25 ref = re.search(r".+/(.+?)/?(?:\.html|$)", response.url).group(1)
26
27 properties = {
28 "ref": ref,
29 "name": response.xpath(
30 'normalize-space(//div[@class="storetitle"][1]/text())'
31 ).extract_first(),
32 "addr_full": response.xpath(
33 'normalize-space(//span[@id="store-address-info"]/p/a/text())'
34 ).extract_first(),
35 "postcode": re.search(
36 r"([\d-]*)$",
37 response.xpath(
38 'normalize-space(//div[@class="store-details-text"][1]/p/text())'
39 ).extract_first(),
40 ).group(1),
41 "country": "JP",
42 "lat": response.xpath(
43 'normalize-space(//input[@id="store-lat"]/@value)'
44 ).extract_first(),
45 "lon": response.xpath(
46 'normalize-space(//input[@id="store-lon"]/@value)'
47 ).extract_first(),
48 "phone": re.search(
49 r"\s([\d-]*)$",
50 response.xpath('//div[@id="store-tel"]/a/text()').extract_first(),
51 ).group(1),
52 "website": response.url,
53 }
54
55 yield GeojsonPointItem(**properties)
56
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/dominos_pizza_jp.py b/locations/spiders/dominos_pizza_jp.py
--- a/locations/spiders/dominos_pizza_jp.py
+++ b/locations/spiders/dominos_pizza_jp.py
@@ -8,7 +8,11 @@
class DominosPizzaJPSpider(scrapy.Spider):
name = "dominos_pizza_jp"
- item_attributes = {"brand": "Domino's", "brand_wikidata": "Q839466"}
+ item_attributes = {
+ "brand": "Domino's",
+ "brand_wikidata": "Q839466",
+ "country": "JP",
+ }
allowed_domains = ["dominos.jp"]
start_urls = [
"https://www.dominos.jp/sitemap.aspx",
@@ -27,28 +31,24 @@
properties = {
"ref": ref,
"name": response.xpath(
- 'normalize-space(//div[@class="storetitle"][1]/text())'
+ 'normalize-space(//h1[@class="storetitle"][1]/text())'
).extract_first(),
"addr_full": response.xpath(
- 'normalize-space(//span[@id="store-address-info"]/p/a/text())'
+ 'normalize-space(//span[@id="store-address-info"]/p/text()[4])'
).extract_first(),
"postcode": re.search(
r"([\d-]*)$",
response.xpath(
- 'normalize-space(//div[@class="store-details-text"][1]/p/text())'
+ 'normalize-space(//div[@class="store-details-text"]/span/p/text()[2])'
).extract_first(),
).group(1),
- "country": "JP",
"lat": response.xpath(
'normalize-space(//input[@id="store-lat"]/@value)'
).extract_first(),
"lon": response.xpath(
'normalize-space(//input[@id="store-lon"]/@value)'
).extract_first(),
- "phone": re.search(
- r"\s([\d-]*)$",
- response.xpath('//div[@id="store-tel"]/a/text()').extract_first(),
- ).group(1),
+ "phone": response.xpath('//div[@id="store-tel"]/a/text()').extract_first(),
"website": response.url,
}
| {"golden_diff": "diff --git a/locations/spiders/dominos_pizza_jp.py b/locations/spiders/dominos_pizza_jp.py\n--- a/locations/spiders/dominos_pizza_jp.py\n+++ b/locations/spiders/dominos_pizza_jp.py\n@@ -8,7 +8,11 @@\n \n class DominosPizzaJPSpider(scrapy.Spider):\n name = \"dominos_pizza_jp\"\n- item_attributes = {\"brand\": \"Domino's\", \"brand_wikidata\": \"Q839466\"}\n+ item_attributes = {\n+ \"brand\": \"Domino's\",\n+ \"brand_wikidata\": \"Q839466\",\n+ \"country\": \"JP\",\n+ }\n allowed_domains = [\"dominos.jp\"]\n start_urls = [\n \"https://www.dominos.jp/sitemap.aspx\",\n@@ -27,28 +31,24 @@\n properties = {\n \"ref\": ref,\n \"name\": response.xpath(\n- 'normalize-space(//div[@class=\"storetitle\"][1]/text())'\n+ 'normalize-space(//h1[@class=\"storetitle\"][1]/text())'\n ).extract_first(),\n \"addr_full\": response.xpath(\n- 'normalize-space(//span[@id=\"store-address-info\"]/p/a/text())'\n+ 'normalize-space(//span[@id=\"store-address-info\"]/p/text()[4])'\n ).extract_first(),\n \"postcode\": re.search(\n r\"([\\d-]*)$\",\n response.xpath(\n- 'normalize-space(//div[@class=\"store-details-text\"][1]/p/text())'\n+ 'normalize-space(//div[@class=\"store-details-text\"]/span/p/text()[2])'\n ).extract_first(),\n ).group(1),\n- \"country\": \"JP\",\n \"lat\": response.xpath(\n 'normalize-space(//input[@id=\"store-lat\"]/@value)'\n ).extract_first(),\n \"lon\": response.xpath(\n 'normalize-space(//input[@id=\"store-lon\"]/@value)'\n ).extract_first(),\n- \"phone\": re.search(\n- r\"\\s([\\d-]*)$\",\n- response.xpath('//div[@id=\"store-tel\"]/a/text()').extract_first(),\n- ).group(1),\n+ \"phone\": response.xpath('//div[@id=\"store-tel\"]/a/text()').extract_first(),\n \"website\": response.url,\n }\n", "issue": "Spider dominos_pizza_jp is broken\nDuring the global build at 2021-06-30-14-42-26, spider **dominos_pizza_jp** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/logs/dominos_pizza_jp.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/output/dominos_pizza_jp.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/output/dominos_pizza_jp.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport re\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\n\n\nclass DominosPizzaJPSpider(scrapy.Spider):\n name = \"dominos_pizza_jp\"\n item_attributes = {\"brand\": \"Domino's\", \"brand_wikidata\": \"Q839466\"}\n allowed_domains = [\"dominos.jp\"]\n start_urls = [\n \"https://www.dominos.jp/sitemap.aspx\",\n ]\n download_delay = 0.3\n\n def parse(self, response):\n response.selector.remove_namespaces()\n store_urls = response.xpath('//url/loc/text()[contains(.,\"/store/\")]').extract()\n for url in store_urls:\n yield scrapy.Request(url, callback=self.parse_store)\n\n def parse_store(self, response):\n ref = re.search(r\".+/(.+?)/?(?:\\.html|$)\", response.url).group(1)\n\n properties = {\n \"ref\": ref,\n \"name\": response.xpath(\n 'normalize-space(//div[@class=\"storetitle\"][1]/text())'\n ).extract_first(),\n \"addr_full\": response.xpath(\n 'normalize-space(//span[@id=\"store-address-info\"]/p/a/text())'\n ).extract_first(),\n \"postcode\": re.search(\n r\"([\\d-]*)$\",\n response.xpath(\n 'normalize-space(//div[@class=\"store-details-text\"][1]/p/text())'\n ).extract_first(),\n ).group(1),\n \"country\": \"JP\",\n \"lat\": response.xpath(\n 'normalize-space(//input[@id=\"store-lat\"]/@value)'\n ).extract_first(),\n \"lon\": response.xpath(\n 'normalize-space(//input[@id=\"store-lon\"]/@value)'\n ).extract_first(),\n \"phone\": re.search(\n r\"\\s([\\d-]*)$\",\n response.xpath('//div[@id=\"store-tel\"]/a/text()').extract_first(),\n ).group(1),\n \"website\": response.url,\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/dominos_pizza_jp.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport re\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\n\n\nclass DominosPizzaJPSpider(scrapy.Spider):\n name = \"dominos_pizza_jp\"\n item_attributes = {\n \"brand\": \"Domino's\",\n \"brand_wikidata\": \"Q839466\",\n \"country\": \"JP\",\n }\n allowed_domains = [\"dominos.jp\"]\n start_urls = [\n \"https://www.dominos.jp/sitemap.aspx\",\n ]\n download_delay = 0.3\n\n def parse(self, response):\n response.selector.remove_namespaces()\n store_urls = response.xpath('//url/loc/text()[contains(.,\"/store/\")]').extract()\n for url in store_urls:\n yield scrapy.Request(url, callback=self.parse_store)\n\n def parse_store(self, response):\n ref = re.search(r\".+/(.+?)/?(?:\\.html|$)\", response.url).group(1)\n\n properties = {\n \"ref\": ref,\n \"name\": response.xpath(\n 'normalize-space(//h1[@class=\"storetitle\"][1]/text())'\n ).extract_first(),\n \"addr_full\": response.xpath(\n 'normalize-space(//span[@id=\"store-address-info\"]/p/text()[4])'\n ).extract_first(),\n \"postcode\": re.search(\n r\"([\\d-]*)$\",\n response.xpath(\n 'normalize-space(//div[@class=\"store-details-text\"]/span/p/text()[2])'\n ).extract_first(),\n ).group(1),\n \"lat\": response.xpath(\n 'normalize-space(//input[@id=\"store-lat\"]/@value)'\n ).extract_first(),\n \"lon\": response.xpath(\n 'normalize-space(//input[@id=\"store-lon\"]/@value)'\n ).extract_first(),\n \"phone\": response.xpath('//div[@id=\"store-tel\"]/a/text()').extract_first(),\n \"website\": response.url,\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/dominos_pizza_jp.py"}]} | 1,025 | 541 |
gh_patches_debug_23323 | rasdani/github-patches | git_diff | facebookresearch__xformers-326 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Rotary embeddings convert queries and keys to float32 when using mixed precision training
Not sure, if this is expected behaviour. The problem is that the cos_sin table in the RotaryEmbedding class is stored in float32 format, thus the returned queries and keys get converted to float32 aswell.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `xformers/components/positional_embedding/rotary.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
2 #
3 # This source code is licensed under the BSD license found in the
4 # LICENSE file in the root directory of this source tree.
5
6
7 # CREDITS: This implementation is inspired by GPT-NeoX https://github.com/EleutherAI/gpt-neox
8 # NOTE: Almost the same right now, moving parts to Triton is the next step
9
10 from typing import Tuple
11
12 import torch
13
14
15 def rotate_half(x):
16 x1, x2 = x.chunk(2, dim=-1)
17 return torch.cat((-x2, x1), dim=-1)
18
19
20 @torch.jit.script
21 def apply_rotary_pos_emb(x, cos, sin):
22 # NOTE: This could probably be moved to Triton
23
24 # Handle a possible sequence length mismatch in between q and k
25 cos = cos[:, :, : x.shape[-2], :]
26 sin = sin[:, :, : x.shape[-2], :]
27
28 return (x * cos) + (rotate_half(x) * sin)
29
30
31 class RotaryEmbedding(torch.nn.Module):
32 """
33 The rotary position embeddings from RoFormer_ (Su et. al).
34 A crucial insight from the method is that the query and keys are
35 transformed by rotation matrices which depend on the relative positions.
36
37 Other implementations are available in the Rotary Transformer repo_ and in
38 GPT-NeoX_, GPT-NeoX was an inspiration
39
40 .. _RoFormer: https://arxiv.org/abs/2104.09864
41 .. _repo: https://github.com/ZhuiyiTechnology/roformer
42 .. _GPT-NeoX: https://github.com/EleutherAI/gpt-neox
43
44
45 .. warning: Please note that this embedding is not registered on purpose, as it is transformative
46 (it does not create the embedding dimension) and will likely be picked up (imported) on a ad-hoc basis
47 """
48
49 def __init__(self, dim_model: int, *_, **__):
50 super().__init__()
51 # Generate and save the inverse frequency buffer (non trainable)
52 inv_freq = 1.0 / (10000 ** (torch.arange(0, dim_model, 2).float() / dim_model))
53 self.register_buffer("inv_freq", inv_freq)
54
55 self._seq_len_cached = None
56 self._cos_cached = None
57 self._sin_cached = None
58
59 def _update_cos_sin_tables(self, x, seq_dimension=1):
60 seq_len = x.shape[seq_dimension]
61
62 # Reset the tables if the sequence length has changed,
63 # or if we're on a new device (possibly due to tracing for instance)
64 if seq_len != self._seq_len_cached or self._cos_cached.device != x.device:
65 self._seq_len_cached = seq_len
66 t = torch.arange(x.shape[seq_dimension], device=x.device).type_as(
67 self.inv_freq
68 )
69 freqs = torch.einsum("i,j->ij", t, self.inv_freq)
70 emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
71
72 self._cos_cached = emb.cos()[None, None, :, :]
73 self._sin_cached = emb.sin()[None, None, :, :]
74
75 return self._cos_cached, self._sin_cached
76
77 def forward(
78 self, q: torch.Tensor, k: torch.Tensor
79 ) -> Tuple[torch.Tensor, torch.Tensor]:
80 self._cos_cached, self._sin_cached = self._update_cos_sin_tables(
81 k, seq_dimension=-2
82 )
83
84 return (
85 apply_rotary_pos_emb(q, self._cos_cached, self._sin_cached),
86 apply_rotary_pos_emb(k, self._cos_cached, self._sin_cached),
87 )
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/xformers/components/positional_embedding/rotary.py b/xformers/components/positional_embedding/rotary.py
--- a/xformers/components/positional_embedding/rotary.py
+++ b/xformers/components/positional_embedding/rotary.py
@@ -61,16 +61,20 @@
# Reset the tables if the sequence length has changed,
# or if we're on a new device (possibly due to tracing for instance)
- if seq_len != self._seq_len_cached or self._cos_cached.device != x.device:
+ if (
+ seq_len != self._seq_len_cached
+ or self._cos_cached.device != x.device
+ or self._cos_cached.dtype != x.dtype
+ ):
self._seq_len_cached = seq_len
- t = torch.arange(x.shape[seq_dimension], device=x.device).type_as(
- self.inv_freq
+ t = torch.arange(
+ x.shape[seq_dimension], device=x.device, dtype=torch.float32
)
- freqs = torch.einsum("i,j->ij", t, self.inv_freq)
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq.to(x.dtype))
emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
- self._cos_cached = emb.cos()[None, None, :, :]
- self._sin_cached = emb.sin()[None, None, :, :]
+ self._cos_cached = emb.cos()[None, None, :, :].to(x.dtype)
+ self._sin_cached = emb.sin()[None, None, :, :].to(x.dtype)
return self._cos_cached, self._sin_cached
| {"golden_diff": "diff --git a/xformers/components/positional_embedding/rotary.py b/xformers/components/positional_embedding/rotary.py\n--- a/xformers/components/positional_embedding/rotary.py\n+++ b/xformers/components/positional_embedding/rotary.py\n@@ -61,16 +61,20 @@\n \n # Reset the tables if the sequence length has changed,\n # or if we're on a new device (possibly due to tracing for instance)\n- if seq_len != self._seq_len_cached or self._cos_cached.device != x.device:\n+ if (\n+ seq_len != self._seq_len_cached\n+ or self._cos_cached.device != x.device\n+ or self._cos_cached.dtype != x.dtype\n+ ):\n self._seq_len_cached = seq_len\n- t = torch.arange(x.shape[seq_dimension], device=x.device).type_as(\n- self.inv_freq\n+ t = torch.arange(\n+ x.shape[seq_dimension], device=x.device, dtype=torch.float32\n )\n- freqs = torch.einsum(\"i,j->ij\", t, self.inv_freq)\n+ freqs = torch.einsum(\"i,j->ij\", t, self.inv_freq.to(x.dtype))\n emb = torch.cat((freqs, freqs), dim=-1).to(x.device)\n \n- self._cos_cached = emb.cos()[None, None, :, :]\n- self._sin_cached = emb.sin()[None, None, :, :]\n+ self._cos_cached = emb.cos()[None, None, :, :].to(x.dtype)\n+ self._sin_cached = emb.sin()[None, None, :, :].to(x.dtype)\n \n return self._cos_cached, self._sin_cached\n", "issue": "Rotary embeddings convert queries and keys to float32 when using mixed precision training\nNot sure, if this is expected behaviour. The problem is that the cos_sin table in the RotaryEmbedding class is stored in float32 format, thus the returned queries and keys get converted to float32 aswell.\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\n\n# CREDITS: This implementation is inspired by GPT-NeoX https://github.com/EleutherAI/gpt-neox\n# NOTE: Almost the same right now, moving parts to Triton is the next step\n\nfrom typing import Tuple\n\nimport torch\n\n\ndef rotate_half(x):\n x1, x2 = x.chunk(2, dim=-1)\n return torch.cat((-x2, x1), dim=-1)\n\n\[email protected]\ndef apply_rotary_pos_emb(x, cos, sin):\n # NOTE: This could probably be moved to Triton\n\n # Handle a possible sequence length mismatch in between q and k\n cos = cos[:, :, : x.shape[-2], :]\n sin = sin[:, :, : x.shape[-2], :]\n\n return (x * cos) + (rotate_half(x) * sin)\n\n\nclass RotaryEmbedding(torch.nn.Module):\n \"\"\"\n The rotary position embeddings from RoFormer_ (Su et. al).\n A crucial insight from the method is that the query and keys are\n transformed by rotation matrices which depend on the relative positions.\n\n Other implementations are available in the Rotary Transformer repo_ and in\n GPT-NeoX_, GPT-NeoX was an inspiration\n\n .. _RoFormer: https://arxiv.org/abs/2104.09864\n .. _repo: https://github.com/ZhuiyiTechnology/roformer\n .. _GPT-NeoX: https://github.com/EleutherAI/gpt-neox\n\n\n .. warning: Please note that this embedding is not registered on purpose, as it is transformative\n (it does not create the embedding dimension) and will likely be picked up (imported) on a ad-hoc basis\n \"\"\"\n\n def __init__(self, dim_model: int, *_, **__):\n super().__init__()\n # Generate and save the inverse frequency buffer (non trainable)\n inv_freq = 1.0 / (10000 ** (torch.arange(0, dim_model, 2).float() / dim_model))\n self.register_buffer(\"inv_freq\", inv_freq)\n\n self._seq_len_cached = None\n self._cos_cached = None\n self._sin_cached = None\n\n def _update_cos_sin_tables(self, x, seq_dimension=1):\n seq_len = x.shape[seq_dimension]\n\n # Reset the tables if the sequence length has changed,\n # or if we're on a new device (possibly due to tracing for instance)\n if seq_len != self._seq_len_cached or self._cos_cached.device != x.device:\n self._seq_len_cached = seq_len\n t = torch.arange(x.shape[seq_dimension], device=x.device).type_as(\n self.inv_freq\n )\n freqs = torch.einsum(\"i,j->ij\", t, self.inv_freq)\n emb = torch.cat((freqs, freqs), dim=-1).to(x.device)\n\n self._cos_cached = emb.cos()[None, None, :, :]\n self._sin_cached = emb.sin()[None, None, :, :]\n\n return self._cos_cached, self._sin_cached\n\n def forward(\n self, q: torch.Tensor, k: torch.Tensor\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n self._cos_cached, self._sin_cached = self._update_cos_sin_tables(\n k, seq_dimension=-2\n )\n\n return (\n apply_rotary_pos_emb(q, self._cos_cached, self._sin_cached),\n apply_rotary_pos_emb(k, self._cos_cached, self._sin_cached),\n )\n", "path": "xformers/components/positional_embedding/rotary.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\n\n# CREDITS: This implementation is inspired by GPT-NeoX https://github.com/EleutherAI/gpt-neox\n# NOTE: Almost the same right now, moving parts to Triton is the next step\n\nfrom typing import Tuple\n\nimport torch\n\n\ndef rotate_half(x):\n x1, x2 = x.chunk(2, dim=-1)\n return torch.cat((-x2, x1), dim=-1)\n\n\[email protected]\ndef apply_rotary_pos_emb(x, cos, sin):\n # NOTE: This could probably be moved to Triton\n\n # Handle a possible sequence length mismatch in between q and k\n cos = cos[:, :, : x.shape[-2], :]\n sin = sin[:, :, : x.shape[-2], :]\n\n return (x * cos) + (rotate_half(x) * sin)\n\n\nclass RotaryEmbedding(torch.nn.Module):\n \"\"\"\n The rotary position embeddings from RoFormer_ (Su et. al).\n A crucial insight from the method is that the query and keys are\n transformed by rotation matrices which depend on the relative positions.\n\n Other implementations are available in the Rotary Transformer repo_ and in\n GPT-NeoX_, GPT-NeoX was an inspiration\n\n .. _RoFormer: https://arxiv.org/abs/2104.09864\n .. _repo: https://github.com/ZhuiyiTechnology/roformer\n .. _GPT-NeoX: https://github.com/EleutherAI/gpt-neox\n\n\n .. warning: Please note that this embedding is not registered on purpose, as it is transformative\n (it does not create the embedding dimension) and will likely be picked up (imported) on a ad-hoc basis\n \"\"\"\n\n def __init__(self, dim_model: int, *_, **__):\n super().__init__()\n # Generate and save the inverse frequency buffer (non trainable)\n inv_freq = 1.0 / (10000 ** (torch.arange(0, dim_model, 2).float() / dim_model))\n self.register_buffer(\"inv_freq\", inv_freq)\n\n self._seq_len_cached = None\n self._cos_cached = None\n self._sin_cached = None\n\n def _update_cos_sin_tables(self, x, seq_dimension=1):\n seq_len = x.shape[seq_dimension]\n\n # Reset the tables if the sequence length has changed,\n # or if we're on a new device (possibly due to tracing for instance)\n if (\n seq_len != self._seq_len_cached\n or self._cos_cached.device != x.device\n or self._cos_cached.dtype != x.dtype\n ):\n self._seq_len_cached = seq_len\n t = torch.arange(\n x.shape[seq_dimension], device=x.device, dtype=torch.float32\n )\n freqs = torch.einsum(\"i,j->ij\", t, self.inv_freq.to(x.dtype))\n emb = torch.cat((freqs, freqs), dim=-1).to(x.device)\n\n self._cos_cached = emb.cos()[None, None, :, :].to(x.dtype)\n self._sin_cached = emb.sin()[None, None, :, :].to(x.dtype)\n\n return self._cos_cached, self._sin_cached\n\n def forward(\n self, q: torch.Tensor, k: torch.Tensor\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n self._cos_cached, self._sin_cached = self._update_cos_sin_tables(\n k, seq_dimension=-2\n )\n\n return (\n apply_rotary_pos_emb(q, self._cos_cached, self._sin_cached),\n apply_rotary_pos_emb(k, self._cos_cached, self._sin_cached),\n )\n", "path": "xformers/components/positional_embedding/rotary.py"}]} | 1,333 | 383 |
gh_patches_debug_13792 | rasdani/github-patches | git_diff | kubeflow__pipelines-7985 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[bug] TFJob launcher pipeline task fails when `delete_finished_tfjob` flag is `True`
https://github.com/kubeflow/pipelines/blob/127dab4d4671849d596c455f4619ea807d22f6ea/components/kubeflow/launcher/component.yaml#L19
When `delete_finished_tfjob` flag is `True`, TFJob launcher task fails with error below.
```
Traceback (most recent call last):
File "/ml/launch_tfjob.py", line 136, in <module>
main()
File "/ml/launch_tfjob.py", line 133, in main
tfjob.delete(args.name, args.namespace)
File "/ml/launch_crd.py", line 115, in delete
body)
TypeError: delete_namespaced_custom_object() takes exactly 6 arguments (7 given)
```
I think it's some kind of kubernetes client SDK version issue in `nikenano/launchernew:latest` container image.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `components/kubeflow/common/launch_crd.py`
Content:
```
1 # Copyright 2019 kubeflow.org.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 #
6 # http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 import datetime
15 import json
16 import logging
17 import multiprocessing
18 import time
19
20 from kubernetes import client as k8s_client
21 from kubernetes.client import rest
22
23 logger = logging.getLogger(__name__)
24
25 class K8sCR(object):
26 def __init__(self, group, plural, version, client):
27 self.group = group
28 self.plural = plural
29 self.version = version
30 self.client = k8s_client.CustomObjectsApi(client)
31
32 def wait_for_condition(self,
33 namespace,
34 name,
35 expected_conditions=[],
36 timeout=datetime.timedelta(days=365),
37 polling_interval=datetime.timedelta(seconds=30),
38 status_callback=None):
39 """Waits until any of the specified conditions occur.
40 Args:
41 namespace: namespace for the CR.
42 name: Name of the CR.
43 expected_conditions: A list of conditions. Function waits until any of the
44 supplied conditions is reached.
45 timeout: How long to wait for the CR.
46 polling_interval: How often to poll for the status of the CR.
47 status_callback: (Optional): Callable. If supplied this callable is
48 invoked after we poll the CR. Callable takes a single argument which
49 is the CR.
50 """
51 end_time = datetime.datetime.now() + timeout
52 while True:
53 try:
54 results = self.client.get_namespaced_custom_object(
55 self.group, self.version, namespace, self.plural, name)
56 except Exception as e:
57 logger.error("There was a problem waiting for %s/%s %s in namespace %s; Exception: %s",
58 self.group, self.plural, name, namespace, e)
59 raise
60
61 if results:
62 if status_callback:
63 status_callback(results)
64 expected, condition = self.is_expected_conditions(results, expected_conditions)
65 if expected:
66 logger.info("%s/%s %s in namespace %s has reached the expected condition: %s.",
67 self.group, self.plural, name, namespace, condition)
68 return results
69 else:
70 if condition:
71 logger.info("Current condition of %s/%s %s in namespace %s is %s.",
72 self.group, self.plural, name, namespace, condition)
73
74 if datetime.datetime.now() + polling_interval > end_time:
75 raise Exception(
76 "Timeout waiting for {0}/{1} {2} in namespace {3} to enter one of the "
77 "conditions {4}.".format(self.group, self.plural, name, namespace, expected_conditions))
78
79 time.sleep(polling_interval.seconds)
80
81 def is_expected_conditions(self, inst, expected_conditions):
82 conditions = inst.get('status', {}).get("conditions")
83 if not conditions:
84 return False, ""
85 if conditions[-1]["type"] in expected_conditions and conditions[-1]["status"] == "True":
86 return True, conditions[-1]["type"]
87 else:
88 return False, conditions[-1]["type"]
89
90 def create(self, spec):
91 """Create a CR.
92 Args:
93 spec: The spec for the CR.
94 """
95 try:
96 # Create a Resource
97 namespace = spec["metadata"].get("namespace", "default")
98 logger.info("Creating %s/%s %s in namespace %s.",
99 self.group, self.plural, spec["metadata"]["name"], namespace)
100 api_response = self.client.create_namespaced_custom_object(
101 self.group, self.version, namespace, self.plural, spec)
102 logger.info("Created %s/%s %s in namespace %s.",
103 self.group, self.plural, spec["metadata"]["name"], namespace)
104 return api_response
105 except rest.ApiException as e:
106 self._log_and_raise_exception(e, "create")
107
108 def delete(self, name, namespace):
109 try:
110 body = {
111 # Set garbage collection so that CR won't be deleted until all
112 # owned references are deleted.
113 "propagationPolicy": "Foreground",
114 }
115 logger.info("Deleteing %s/%s %s in namespace %s.",
116 self.group, self.plural, name, namespace)
117 api_response = self.client.delete_namespaced_custom_object(
118 self.group,
119 self.version,
120 namespace,
121 self.plural,
122 name,
123 body)
124 logger.info("Deleted %s/%s %s in namespace %s.",
125 self.group, self.plural, name, namespace)
126 return api_response
127 except rest.ApiException as e:
128 self._log_and_raise_exception(e, "delete")
129
130 def _log_and_raise_exception(self, ex, action):
131 message = ""
132 if ex.message:
133 message = ex.message
134 if ex.body:
135 try:
136 body = json.loads(ex.body)
137 message = body.get("message")
138 except ValueError:
139 logger.error("Exception when %s %s/%s: %s", action, self.group, self.plural, ex.body)
140 raise
141
142 logger.error("Exception when %s %s/%s: %s", action, self.group, self.plural, ex.body)
143 raise ex
144
145
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/components/kubeflow/common/launch_crd.py b/components/kubeflow/common/launch_crd.py
--- a/components/kubeflow/common/launch_crd.py
+++ b/components/kubeflow/common/launch_crd.py
@@ -115,12 +115,12 @@
logger.info("Deleteing %s/%s %s in namespace %s.",
self.group, self.plural, name, namespace)
api_response = self.client.delete_namespaced_custom_object(
- self.group,
- self.version,
- namespace,
- self.plural,
- name,
- body)
+ group=self.group,
+ version=self.version,
+ namespace=namespace,
+ plural=self.plural,
+ name=name,
+ body=body)
logger.info("Deleted %s/%s %s in namespace %s.",
self.group, self.plural, name, namespace)
return api_response
| {"golden_diff": "diff --git a/components/kubeflow/common/launch_crd.py b/components/kubeflow/common/launch_crd.py\n--- a/components/kubeflow/common/launch_crd.py\n+++ b/components/kubeflow/common/launch_crd.py\n@@ -115,12 +115,12 @@\n logger.info(\"Deleteing %s/%s %s in namespace %s.\",\n self.group, self.plural, name, namespace)\n api_response = self.client.delete_namespaced_custom_object(\n- self.group,\n- self.version,\n- namespace,\n- self.plural,\n- name,\n- body)\n+ group=self.group,\n+ version=self.version,\n+ namespace=namespace,\n+ plural=self.plural,\n+ name=name,\n+ body=body)\n logger.info(\"Deleted %s/%s %s in namespace %s.\",\n self.group, self.plural, name, namespace)\n return api_response\n", "issue": "[bug] TFJob launcher pipeline task fails when `delete_finished_tfjob` flag is `True`\nhttps://github.com/kubeflow/pipelines/blob/127dab4d4671849d596c455f4619ea807d22f6ea/components/kubeflow/launcher/component.yaml#L19\r\n\r\nWhen `delete_finished_tfjob` flag is `True`, TFJob launcher task fails with error below.\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/ml/launch_tfjob.py\", line 136, in <module>\r\n main()\r\n File \"/ml/launch_tfjob.py\", line 133, in main\r\n tfjob.delete(args.name, args.namespace)\r\n File \"/ml/launch_crd.py\", line 115, in delete\r\n body)\r\nTypeError: delete_namespaced_custom_object() takes exactly 6 arguments (7 given)\r\n```\r\n\r\nI think it's some kind of kubernetes client SDK version issue in `nikenano/launchernew:latest` container image.\n", "before_files": [{"content": "# Copyright 2019 kubeflow.org.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime\nimport json\nimport logging\nimport multiprocessing\nimport time\n\nfrom kubernetes import client as k8s_client\nfrom kubernetes.client import rest\n\nlogger = logging.getLogger(__name__)\n\nclass K8sCR(object):\n def __init__(self, group, plural, version, client):\n self.group = group\n self.plural = plural\n self.version = version\n self.client = k8s_client.CustomObjectsApi(client)\n\n def wait_for_condition(self,\n namespace,\n name,\n expected_conditions=[],\n timeout=datetime.timedelta(days=365),\n polling_interval=datetime.timedelta(seconds=30),\n status_callback=None):\n \"\"\"Waits until any of the specified conditions occur.\n Args:\n namespace: namespace for the CR.\n name: Name of the CR.\n expected_conditions: A list of conditions. Function waits until any of the\n supplied conditions is reached.\n timeout: How long to wait for the CR.\n polling_interval: How often to poll for the status of the CR.\n status_callback: (Optional): Callable. If supplied this callable is\n invoked after we poll the CR. Callable takes a single argument which\n is the CR.\n \"\"\"\n end_time = datetime.datetime.now() + timeout\n while True:\n try:\n results = self.client.get_namespaced_custom_object(\n self.group, self.version, namespace, self.plural, name)\n except Exception as e:\n logger.error(\"There was a problem waiting for %s/%s %s in namespace %s; Exception: %s\",\n self.group, self.plural, name, namespace, e)\n raise\n\n if results:\n if status_callback:\n status_callback(results)\n expected, condition = self.is_expected_conditions(results, expected_conditions)\n if expected:\n logger.info(\"%s/%s %s in namespace %s has reached the expected condition: %s.\",\n self.group, self.plural, name, namespace, condition)\n return results\n else:\n if condition:\n logger.info(\"Current condition of %s/%s %s in namespace %s is %s.\",\n self.group, self.plural, name, namespace, condition)\n\n if datetime.datetime.now() + polling_interval > end_time:\n raise Exception(\n \"Timeout waiting for {0}/{1} {2} in namespace {3} to enter one of the \"\n \"conditions {4}.\".format(self.group, self.plural, name, namespace, expected_conditions))\n\n time.sleep(polling_interval.seconds)\n\n def is_expected_conditions(self, inst, expected_conditions):\n conditions = inst.get('status', {}).get(\"conditions\")\n if not conditions:\n return False, \"\"\n if conditions[-1][\"type\"] in expected_conditions and conditions[-1][\"status\"] == \"True\":\n return True, conditions[-1][\"type\"]\n else:\n return False, conditions[-1][\"type\"]\n\n def create(self, spec):\n \"\"\"Create a CR.\n Args:\n spec: The spec for the CR.\n \"\"\"\n try:\n # Create a Resource\n namespace = spec[\"metadata\"].get(\"namespace\", \"default\")\n logger.info(\"Creating %s/%s %s in namespace %s.\",\n self.group, self.plural, spec[\"metadata\"][\"name\"], namespace)\n api_response = self.client.create_namespaced_custom_object(\n self.group, self.version, namespace, self.plural, spec)\n logger.info(\"Created %s/%s %s in namespace %s.\",\n self.group, self.plural, spec[\"metadata\"][\"name\"], namespace)\n return api_response\n except rest.ApiException as e:\n self._log_and_raise_exception(e, \"create\")\n\n def delete(self, name, namespace):\n try:\n body = {\n # Set garbage collection so that CR won't be deleted until all\n # owned references are deleted.\n \"propagationPolicy\": \"Foreground\",\n }\n logger.info(\"Deleteing %s/%s %s in namespace %s.\",\n self.group, self.plural, name, namespace)\n api_response = self.client.delete_namespaced_custom_object(\n self.group,\n self.version,\n namespace,\n self.plural,\n name,\n body)\n logger.info(\"Deleted %s/%s %s in namespace %s.\",\n self.group, self.plural, name, namespace)\n return api_response\n except rest.ApiException as e:\n self._log_and_raise_exception(e, \"delete\")\n\n def _log_and_raise_exception(self, ex, action):\n message = \"\"\n if ex.message:\n message = ex.message\n if ex.body:\n try:\n body = json.loads(ex.body)\n message = body.get(\"message\")\n except ValueError:\n logger.error(\"Exception when %s %s/%s: %s\", action, self.group, self.plural, ex.body)\n raise\n\n logger.error(\"Exception when %s %s/%s: %s\", action, self.group, self.plural, ex.body)\n raise ex\n\n", "path": "components/kubeflow/common/launch_crd.py"}], "after_files": [{"content": "# Copyright 2019 kubeflow.org.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime\nimport json\nimport logging\nimport multiprocessing\nimport time\n\nfrom kubernetes import client as k8s_client\nfrom kubernetes.client import rest\n\nlogger = logging.getLogger(__name__)\n\nclass K8sCR(object):\n def __init__(self, group, plural, version, client):\n self.group = group\n self.plural = plural\n self.version = version\n self.client = k8s_client.CustomObjectsApi(client)\n\n def wait_for_condition(self,\n namespace,\n name,\n expected_conditions=[],\n timeout=datetime.timedelta(days=365),\n polling_interval=datetime.timedelta(seconds=30),\n status_callback=None):\n \"\"\"Waits until any of the specified conditions occur.\n Args:\n namespace: namespace for the CR.\n name: Name of the CR.\n expected_conditions: A list of conditions. Function waits until any of the\n supplied conditions is reached.\n timeout: How long to wait for the CR.\n polling_interval: How often to poll for the status of the CR.\n status_callback: (Optional): Callable. If supplied this callable is\n invoked after we poll the CR. Callable takes a single argument which\n is the CR.\n \"\"\"\n end_time = datetime.datetime.now() + timeout\n while True:\n try:\n results = self.client.get_namespaced_custom_object(\n self.group, self.version, namespace, self.plural, name)\n except Exception as e:\n logger.error(\"There was a problem waiting for %s/%s %s in namespace %s; Exception: %s\",\n self.group, self.plural, name, namespace, e)\n raise\n\n if results:\n if status_callback:\n status_callback(results)\n expected, condition = self.is_expected_conditions(results, expected_conditions)\n if expected:\n logger.info(\"%s/%s %s in namespace %s has reached the expected condition: %s.\",\n self.group, self.plural, name, namespace, condition)\n return results\n else:\n if condition:\n logger.info(\"Current condition of %s/%s %s in namespace %s is %s.\",\n self.group, self.plural, name, namespace, condition)\n\n if datetime.datetime.now() + polling_interval > end_time:\n raise Exception(\n \"Timeout waiting for {0}/{1} {2} in namespace {3} to enter one of the \"\n \"conditions {4}.\".format(self.group, self.plural, name, namespace, expected_conditions))\n\n time.sleep(polling_interval.seconds)\n\n def is_expected_conditions(self, inst, expected_conditions):\n conditions = inst.get('status', {}).get(\"conditions\")\n if not conditions:\n return False, \"\"\n if conditions[-1][\"type\"] in expected_conditions and conditions[-1][\"status\"] == \"True\":\n return True, conditions[-1][\"type\"]\n else:\n return False, conditions[-1][\"type\"]\n\n def create(self, spec):\n \"\"\"Create a CR.\n Args:\n spec: The spec for the CR.\n \"\"\"\n try:\n # Create a Resource\n namespace = spec[\"metadata\"].get(\"namespace\", \"default\")\n logger.info(\"Creating %s/%s %s in namespace %s.\",\n self.group, self.plural, spec[\"metadata\"][\"name\"], namespace)\n api_response = self.client.create_namespaced_custom_object(\n self.group, self.version, namespace, self.plural, spec)\n logger.info(\"Created %s/%s %s in namespace %s.\",\n self.group, self.plural, spec[\"metadata\"][\"name\"], namespace)\n return api_response\n except rest.ApiException as e:\n self._log_and_raise_exception(e, \"create\")\n\n def delete(self, name, namespace):\n try:\n body = {\n # Set garbage collection so that CR won't be deleted until all\n # owned references are deleted.\n \"propagationPolicy\": \"Foreground\",\n }\n logger.info(\"Deleteing %s/%s %s in namespace %s.\",\n self.group, self.plural, name, namespace)\n api_response = self.client.delete_namespaced_custom_object(\n group=self.group,\n version=self.version,\n namespace=namespace,\n plural=self.plural,\n name=name,\n body=body)\n logger.info(\"Deleted %s/%s %s in namespace %s.\",\n self.group, self.plural, name, namespace)\n return api_response\n except rest.ApiException as e:\n self._log_and_raise_exception(e, \"delete\")\n\n def _log_and_raise_exception(self, ex, action):\n message = \"\"\n if ex.message:\n message = ex.message\n if ex.body:\n try:\n body = json.loads(ex.body)\n message = body.get(\"message\")\n except ValueError:\n logger.error(\"Exception when %s %s/%s: %s\", action, self.group, self.plural, ex.body)\n raise\n\n logger.error(\"Exception when %s %s/%s: %s\", action, self.group, self.plural, ex.body)\n raise ex\n\n", "path": "components/kubeflow/common/launch_crd.py"}]} | 2,035 | 210 |
gh_patches_debug_18196 | rasdani/github-patches | git_diff | joke2k__faker-1443 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Adding ABA Routing Number to Bank Provider
* Faker version:8.1.2
Is there any interest in adding ABA routing numbers to the Bank provider? https://en.wikipedia.org/wiki/ABA_routing_transit_number
This would be helpful for my use case and have code to generate but wanted to check before putting more effort in.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `faker/providers/bank/__init__.py`
Content:
```
1 import re
2 import string
3
4 from string import ascii_uppercase
5
6 from .. import BaseProvider
7
8 localized = True
9 default_locale = 'en_GB'
10
11
12 class Provider(BaseProvider):
13 """Implement default bank provider for Faker.
14
15 .. important::
16 Bank codes, account numbers, and other ID's generated by this provider
17 are only valid in form, i.e. they conform to some standard/format, are
18 of the expected lengths, and have valid checksums (where applicable).
19 Results generated that turn out to be valid in real life are purely
20 coincidental.
21
22 Sources:
23
24 - https://en.wikipedia.org/wiki/International_Bank_Account_Number
25 - https://www.theswiftcodes.com/swift-code-checker/
26 """
27
28 ALPHA = {c: str(ord(c) % 55) for c in string.ascii_uppercase}
29 bban_format = '????#############'
30 country_code = 'GB'
31
32 def bank_country(self):
33 """Generate the bank provider's ISO 3166-1 alpha-2 country code."""
34 return self.country_code
35
36 def bban(self):
37 """Generate a Basic Bank Account Number (BBAN)."""
38 temp = re.sub(r'\?',
39 lambda x: self.random_element(ascii_uppercase),
40 self.bban_format)
41 return self.numerify(temp)
42
43 def iban(self):
44 """Generate an International Bank Account Number (IBAN)."""
45 bban = self.bban()
46
47 check = bban + self.country_code + '00'
48 check = int(''.join(self.ALPHA.get(c, c) for c in check))
49 check = 98 - (check % 97)
50 check = str(check).zfill(2)
51
52 return self.country_code + check + bban
53
54 def swift8(self, use_dataset=False):
55 """Generate an 8-digit SWIFT code.
56
57 This method uses |swift| under the hood with the ``length`` argument set
58 to ``8`` and with the ``primary`` argument omitted. All 8-digit SWIFT
59 codes already refer to the primary branch/office.
60
61 :sample:
62 :sample: use_dataset=True
63 """
64 return self.swift(length=8, use_dataset=use_dataset)
65
66 def swift11(self, primary=None, use_dataset=False):
67 """Generate an 11-digit SWIFT code.
68
69 This method uses |swift| under the hood with the ``length`` argument set
70 to ``11``. If ``primary`` is set to ``True``, the SWIFT code will always
71 end with ``'XXX'``. All 11-digit SWIFT codes use this convention to
72 refer to the primary branch/office.
73
74 :sample:
75 :sample: use_dataset=True
76 """
77 return self.swift(length=11, primary=primary, use_dataset=use_dataset)
78
79 def swift(self, length=None, primary=None, use_dataset=False):
80 """Generate a SWIFT code.
81
82 SWIFT codes, reading from left to right, are composed of a 4 alphabet
83 character bank code, a 2 alphabet character country code, a 2
84 alphanumeric location code, and an optional 3 alphanumeric branch code.
85 This means SWIFT codes can only have 8 or 11 characters, so the value of
86 ``length`` can only be ``None`` or the integers ``8`` or ``11``. If the
87 value is ``None``, then a value of ``8`` or ``11`` will randomly be
88 assigned.
89
90 Because all 8-digit SWIFT codes already refer to the primary branch or
91 office, the ``primary`` argument only has an effect if the value of
92 ``length`` is ``11``. If ``primary`` is ``True`` and ``length`` is
93 ``11``, the 11-digit SWIFT codes generated will always end in ``'XXX'``
94 to denote that they belong to primary branches/offices.
95
96 For extra authenticity, localized providers may opt to include SWIFT
97 bank codes, location codes, and branch codes used in their respective
98 locales. If ``use_dataset`` is ``True``, this method will generate SWIFT
99 codes based on those locale-specific codes if included. If those codes
100 were not included, then it will behave as if ``use_dataset`` were
101 ``False``, and in that mode, all those codes will just be randomly
102 generated as per the specification.
103
104 :sample:
105 :sample: length=8
106 :sample: length=8, use_dataset=True
107 :sample: length=11
108 :sample: length=11, primary=True
109 :sample: length=11, use_dataset=True
110 :sample: length=11, primary=True, use_dataset=True
111 """
112 if length is None:
113 length = self.random_element((8, 11))
114 if length not in (8, 11):
115 raise AssertionError('length can only be 8 or 11')
116
117 if use_dataset and hasattr(self, 'swift_bank_codes'):
118 bank_code = self.random_element(self.swift_bank_codes)
119 else:
120 bank_code = self.lexify('????', letters=string.ascii_uppercase)
121
122 if use_dataset and hasattr(self, 'swift_location_codes'):
123 location_code = self.random_element(self.swift_location_codes)
124 else:
125 location_code = self.lexify('??', letters=string.ascii_uppercase + string.digits)
126
127 if length == 8:
128 return bank_code + self.country_code + location_code
129
130 if primary:
131 branch_code = 'XXX'
132 elif use_dataset and hasattr(self, 'swift_branch_codes'):
133 branch_code = self.random_element(self.swift_branch_codes)
134 else:
135 branch_code = self.lexify('???', letters=string.ascii_uppercase + string.digits)
136
137 return bank_code + self.country_code + location_code + branch_code
138
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/faker/providers/bank/__init__.py b/faker/providers/bank/__init__.py
--- a/faker/providers/bank/__init__.py
+++ b/faker/providers/bank/__init__.py
@@ -1,6 +1,7 @@
import re
import string
+from math import ceil
from string import ascii_uppercase
from .. import BaseProvider
@@ -29,6 +30,19 @@
bban_format = '????#############'
country_code = 'GB'
+ def aba(self):
+ """Generate an ABA routing transit number."""
+ fed_num = self.random_int(min=1, max=12)
+ rand = self.numerify('######')
+ aba = f"{fed_num:02}{rand}"
+
+ # calculate check digit
+ d = [int(n) for n in aba]
+ chk_digit = 3*(d[0] + d[3] + d[6]) + 7*(d[1] + d[4] + d[7]) + d[2] + d[5]
+ chk_digit = ceil(chk_digit/10)*10 - chk_digit
+
+ return f"{aba}{chk_digit}"
+
def bank_country(self):
"""Generate the bank provider's ISO 3166-1 alpha-2 country code."""
return self.country_code
| {"golden_diff": "diff --git a/faker/providers/bank/__init__.py b/faker/providers/bank/__init__.py\n--- a/faker/providers/bank/__init__.py\n+++ b/faker/providers/bank/__init__.py\n@@ -1,6 +1,7 @@\n import re\n import string\n \n+from math import ceil\n from string import ascii_uppercase\n \n from .. import BaseProvider\n@@ -29,6 +30,19 @@\n bban_format = '????#############'\n country_code = 'GB'\n \n+ def aba(self):\n+ \"\"\"Generate an ABA routing transit number.\"\"\"\n+ fed_num = self.random_int(min=1, max=12)\n+ rand = self.numerify('######')\n+ aba = f\"{fed_num:02}{rand}\"\n+\n+ # calculate check digit\n+ d = [int(n) for n in aba]\n+ chk_digit = 3*(d[0] + d[3] + d[6]) + 7*(d[1] + d[4] + d[7]) + d[2] + d[5]\n+ chk_digit = ceil(chk_digit/10)*10 - chk_digit\n+\n+ return f\"{aba}{chk_digit}\"\n+\n def bank_country(self):\n \"\"\"Generate the bank provider's ISO 3166-1 alpha-2 country code.\"\"\"\n return self.country_code\n", "issue": "Adding ABA Routing Number to Bank Provider\n* Faker version:8.1.2\r\n\r\n\r\nIs there any interest in adding ABA routing numbers to the Bank provider? https://en.wikipedia.org/wiki/ABA_routing_transit_number\r\n\r\nThis would be helpful for my use case and have code to generate but wanted to check before putting more effort in.\n", "before_files": [{"content": "import re\nimport string\n\nfrom string import ascii_uppercase\n\nfrom .. import BaseProvider\n\nlocalized = True\ndefault_locale = 'en_GB'\n\n\nclass Provider(BaseProvider):\n \"\"\"Implement default bank provider for Faker.\n\n .. important::\n Bank codes, account numbers, and other ID's generated by this provider\n are only valid in form, i.e. they conform to some standard/format, are\n of the expected lengths, and have valid checksums (where applicable).\n Results generated that turn out to be valid in real life are purely\n coincidental.\n\n Sources:\n\n - https://en.wikipedia.org/wiki/International_Bank_Account_Number\n - https://www.theswiftcodes.com/swift-code-checker/\n \"\"\"\n\n ALPHA = {c: str(ord(c) % 55) for c in string.ascii_uppercase}\n bban_format = '????#############'\n country_code = 'GB'\n\n def bank_country(self):\n \"\"\"Generate the bank provider's ISO 3166-1 alpha-2 country code.\"\"\"\n return self.country_code\n\n def bban(self):\n \"\"\"Generate a Basic Bank Account Number (BBAN).\"\"\"\n temp = re.sub(r'\\?',\n lambda x: self.random_element(ascii_uppercase),\n self.bban_format)\n return self.numerify(temp)\n\n def iban(self):\n \"\"\"Generate an International Bank Account Number (IBAN).\"\"\"\n bban = self.bban()\n\n check = bban + self.country_code + '00'\n check = int(''.join(self.ALPHA.get(c, c) for c in check))\n check = 98 - (check % 97)\n check = str(check).zfill(2)\n\n return self.country_code + check + bban\n\n def swift8(self, use_dataset=False):\n \"\"\"Generate an 8-digit SWIFT code.\n\n This method uses |swift| under the hood with the ``length`` argument set\n to ``8`` and with the ``primary`` argument omitted. All 8-digit SWIFT\n codes already refer to the primary branch/office.\n\n :sample:\n :sample: use_dataset=True\n \"\"\"\n return self.swift(length=8, use_dataset=use_dataset)\n\n def swift11(self, primary=None, use_dataset=False):\n \"\"\"Generate an 11-digit SWIFT code.\n\n This method uses |swift| under the hood with the ``length`` argument set\n to ``11``. If ``primary`` is set to ``True``, the SWIFT code will always\n end with ``'XXX'``. All 11-digit SWIFT codes use this convention to\n refer to the primary branch/office.\n\n :sample:\n :sample: use_dataset=True\n \"\"\"\n return self.swift(length=11, primary=primary, use_dataset=use_dataset)\n\n def swift(self, length=None, primary=None, use_dataset=False):\n \"\"\"Generate a SWIFT code.\n\n SWIFT codes, reading from left to right, are composed of a 4 alphabet\n character bank code, a 2 alphabet character country code, a 2\n alphanumeric location code, and an optional 3 alphanumeric branch code.\n This means SWIFT codes can only have 8 or 11 characters, so the value of\n ``length`` can only be ``None`` or the integers ``8`` or ``11``. If the\n value is ``None``, then a value of ``8`` or ``11`` will randomly be\n assigned.\n\n Because all 8-digit SWIFT codes already refer to the primary branch or\n office, the ``primary`` argument only has an effect if the value of\n ``length`` is ``11``. If ``primary`` is ``True`` and ``length`` is\n ``11``, the 11-digit SWIFT codes generated will always end in ``'XXX'``\n to denote that they belong to primary branches/offices.\n\n For extra authenticity, localized providers may opt to include SWIFT\n bank codes, location codes, and branch codes used in their respective\n locales. If ``use_dataset`` is ``True``, this method will generate SWIFT\n codes based on those locale-specific codes if included. If those codes\n were not included, then it will behave as if ``use_dataset`` were\n ``False``, and in that mode, all those codes will just be randomly\n generated as per the specification.\n\n :sample:\n :sample: length=8\n :sample: length=8, use_dataset=True\n :sample: length=11\n :sample: length=11, primary=True\n :sample: length=11, use_dataset=True\n :sample: length=11, primary=True, use_dataset=True\n \"\"\"\n if length is None:\n length = self.random_element((8, 11))\n if length not in (8, 11):\n raise AssertionError('length can only be 8 or 11')\n\n if use_dataset and hasattr(self, 'swift_bank_codes'):\n bank_code = self.random_element(self.swift_bank_codes)\n else:\n bank_code = self.lexify('????', letters=string.ascii_uppercase)\n\n if use_dataset and hasattr(self, 'swift_location_codes'):\n location_code = self.random_element(self.swift_location_codes)\n else:\n location_code = self.lexify('??', letters=string.ascii_uppercase + string.digits)\n\n if length == 8:\n return bank_code + self.country_code + location_code\n\n if primary:\n branch_code = 'XXX'\n elif use_dataset and hasattr(self, 'swift_branch_codes'):\n branch_code = self.random_element(self.swift_branch_codes)\n else:\n branch_code = self.lexify('???', letters=string.ascii_uppercase + string.digits)\n\n return bank_code + self.country_code + location_code + branch_code\n", "path": "faker/providers/bank/__init__.py"}], "after_files": [{"content": "import re\nimport string\n\nfrom math import ceil\nfrom string import ascii_uppercase\n\nfrom .. import BaseProvider\n\nlocalized = True\ndefault_locale = 'en_GB'\n\n\nclass Provider(BaseProvider):\n \"\"\"Implement default bank provider for Faker.\n\n .. important::\n Bank codes, account numbers, and other ID's generated by this provider\n are only valid in form, i.e. they conform to some standard/format, are\n of the expected lengths, and have valid checksums (where applicable).\n Results generated that turn out to be valid in real life are purely\n coincidental.\n\n Sources:\n\n - https://en.wikipedia.org/wiki/International_Bank_Account_Number\n - https://www.theswiftcodes.com/swift-code-checker/\n \"\"\"\n\n ALPHA = {c: str(ord(c) % 55) for c in string.ascii_uppercase}\n bban_format = '????#############'\n country_code = 'GB'\n\n def aba(self):\n \"\"\"Generate an ABA routing transit number.\"\"\"\n fed_num = self.random_int(min=1, max=12)\n rand = self.numerify('######')\n aba = f\"{fed_num:02}{rand}\"\n\n # calculate check digit\n d = [int(n) for n in aba]\n chk_digit = 3*(d[0] + d[3] + d[6]) + 7*(d[1] + d[4] + d[7]) + d[2] + d[5]\n chk_digit = ceil(chk_digit/10)*10 - chk_digit\n\n return f\"{aba}{chk_digit}\"\n\n def bank_country(self):\n \"\"\"Generate the bank provider's ISO 3166-1 alpha-2 country code.\"\"\"\n return self.country_code\n\n def bban(self):\n \"\"\"Generate a Basic Bank Account Number (BBAN).\"\"\"\n temp = re.sub(r'\\?',\n lambda x: self.random_element(ascii_uppercase),\n self.bban_format)\n return self.numerify(temp)\n\n def iban(self):\n \"\"\"Generate an International Bank Account Number (IBAN).\"\"\"\n bban = self.bban()\n\n check = bban + self.country_code + '00'\n check = int(''.join(self.ALPHA.get(c, c) for c in check))\n check = 98 - (check % 97)\n check = str(check).zfill(2)\n\n return self.country_code + check + bban\n\n def swift8(self, use_dataset=False):\n \"\"\"Generate an 8-digit SWIFT code.\n\n This method uses |swift| under the hood with the ``length`` argument set\n to ``8`` and with the ``primary`` argument omitted. All 8-digit SWIFT\n codes already refer to the primary branch/office.\n\n :sample:\n :sample: use_dataset=True\n \"\"\"\n return self.swift(length=8, use_dataset=use_dataset)\n\n def swift11(self, primary=None, use_dataset=False):\n \"\"\"Generate an 11-digit SWIFT code.\n\n This method uses |swift| under the hood with the ``length`` argument set\n to ``11``. If ``primary`` is set to ``True``, the SWIFT code will always\n end with ``'XXX'``. All 11-digit SWIFT codes use this convention to\n refer to the primary branch/office.\n\n :sample:\n :sample: use_dataset=True\n \"\"\"\n return self.swift(length=11, primary=primary, use_dataset=use_dataset)\n\n def swift(self, length=None, primary=None, use_dataset=False):\n \"\"\"Generate a SWIFT code.\n\n SWIFT codes, reading from left to right, are composed of a 4 alphabet\n character bank code, a 2 alphabet character country code, a 2\n alphanumeric location code, and an optional 3 alphanumeric branch code.\n This means SWIFT codes can only have 8 or 11 characters, so the value of\n ``length`` can only be ``None`` or the integers ``8`` or ``11``. If the\n value is ``None``, then a value of ``8`` or ``11`` will randomly be\n assigned.\n\n Because all 8-digit SWIFT codes already refer to the primary branch or\n office, the ``primary`` argument only has an effect if the value of\n ``length`` is ``11``. If ``primary`` is ``True`` and ``length`` is\n ``11``, the 11-digit SWIFT codes generated will always end in ``'XXX'``\n to denote that they belong to primary branches/offices.\n\n For extra authenticity, localized providers may opt to include SWIFT\n bank codes, location codes, and branch codes used in their respective\n locales. If ``use_dataset`` is ``True``, this method will generate SWIFT\n codes based on those locale-specific codes if included. If those codes\n were not included, then it will behave as if ``use_dataset`` were\n ``False``, and in that mode, all those codes will just be randomly\n generated as per the specification.\n\n :sample:\n :sample: length=8\n :sample: length=8, use_dataset=True\n :sample: length=11\n :sample: length=11, primary=True\n :sample: length=11, use_dataset=True\n :sample: length=11, primary=True, use_dataset=True\n \"\"\"\n if length is None:\n length = self.random_element((8, 11))\n if length not in (8, 11):\n raise AssertionError('length can only be 8 or 11')\n\n if use_dataset and hasattr(self, 'swift_bank_codes'):\n bank_code = self.random_element(self.swift_bank_codes)\n else:\n bank_code = self.lexify('????', letters=string.ascii_uppercase)\n\n if use_dataset and hasattr(self, 'swift_location_codes'):\n location_code = self.random_element(self.swift_location_codes)\n else:\n location_code = self.lexify('??', letters=string.ascii_uppercase + string.digits)\n\n if length == 8:\n return bank_code + self.country_code + location_code\n\n if primary:\n branch_code = 'XXX'\n elif use_dataset and hasattr(self, 'swift_branch_codes'):\n branch_code = self.random_element(self.swift_branch_codes)\n else:\n branch_code = self.lexify('???', letters=string.ascii_uppercase + string.digits)\n\n return bank_code + self.country_code + location_code + branch_code\n", "path": "faker/providers/bank/__init__.py"}]} | 1,928 | 308 |
gh_patches_debug_60855 | rasdani/github-patches | git_diff | airctic__icevision-500 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add tutorial with hard negative samples
## 📓 Documentation Update
"how to use an image as background annotation" is a common question. We can provide a tutorial showing how to do that
### Racoon and dogs
If you train a model on the racoon dataset and show the model a picture of a dog it will classify it as a racoon. We can add images of dogs to the dataset (without any annotations) and show how the difference of model performance in both scenarios.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `icevision/models/base_show_results.py`
Content:
```
1 __all__ = ["base_show_results"]
2
3 from icevision.imports import *
4 from icevision.utils import *
5 from icevision.core import *
6 from icevision.visualize import *
7 from icevision.data import *
8
9
10 def base_show_results(
11 predict_fn: callable,
12 build_infer_batch_fn: callable,
13 model: nn.Module,
14 dataset: Dataset,
15 class_map: Optional[ClassMap] = None,
16 num_samples: int = 6,
17 ncols: int = 3,
18 denormalize_fn: Optional[callable] = denormalize_imagenet,
19 show: bool = True,
20 ) -> None:
21 samples = [dataset[i] for i in range(num_samples)]
22 batch, samples = build_infer_batch_fn(samples)
23 preds = predict_fn(model, batch)
24
25 imgs = [sample["img"] for sample in samples]
26 show_preds(
27 imgs,
28 preds,
29 class_map=class_map,
30 denormalize_fn=denormalize_fn,
31 ncols=ncols,
32 show=show,
33 )
34
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/icevision/models/base_show_results.py b/icevision/models/base_show_results.py
--- a/icevision/models/base_show_results.py
+++ b/icevision/models/base_show_results.py
@@ -18,7 +18,7 @@
denormalize_fn: Optional[callable] = denormalize_imagenet,
show: bool = True,
) -> None:
- samples = [dataset[i] for i in range(num_samples)]
+ samples = random.choices(dataset, k=num_samples)
batch, samples = build_infer_batch_fn(samples)
preds = predict_fn(model, batch)
| {"golden_diff": "diff --git a/icevision/models/base_show_results.py b/icevision/models/base_show_results.py\n--- a/icevision/models/base_show_results.py\n+++ b/icevision/models/base_show_results.py\n@@ -18,7 +18,7 @@\n denormalize_fn: Optional[callable] = denormalize_imagenet,\n show: bool = True,\n ) -> None:\n- samples = [dataset[i] for i in range(num_samples)]\n+ samples = random.choices(dataset, k=num_samples)\n batch, samples = build_infer_batch_fn(samples)\n preds = predict_fn(model, batch)\n", "issue": "Add tutorial with hard negative samples\n## \ud83d\udcd3 Documentation Update\r\n\"how to use an image as background annotation\" is a common question. We can provide a tutorial showing how to do that\r\n\r\n### Racoon and dogs\r\nIf you train a model on the racoon dataset and show the model a picture of a dog it will classify it as a racoon. We can add images of dogs to the dataset (without any annotations) and show how the difference of model performance in both scenarios.\n", "before_files": [{"content": "__all__ = [\"base_show_results\"]\n\nfrom icevision.imports import *\nfrom icevision.utils import *\nfrom icevision.core import *\nfrom icevision.visualize import *\nfrom icevision.data import *\n\n\ndef base_show_results(\n predict_fn: callable,\n build_infer_batch_fn: callable,\n model: nn.Module,\n dataset: Dataset,\n class_map: Optional[ClassMap] = None,\n num_samples: int = 6,\n ncols: int = 3,\n denormalize_fn: Optional[callable] = denormalize_imagenet,\n show: bool = True,\n) -> None:\n samples = [dataset[i] for i in range(num_samples)]\n batch, samples = build_infer_batch_fn(samples)\n preds = predict_fn(model, batch)\n\n imgs = [sample[\"img\"] for sample in samples]\n show_preds(\n imgs,\n preds,\n class_map=class_map,\n denormalize_fn=denormalize_fn,\n ncols=ncols,\n show=show,\n )\n", "path": "icevision/models/base_show_results.py"}], "after_files": [{"content": "__all__ = [\"base_show_results\"]\n\nfrom icevision.imports import *\nfrom icevision.utils import *\nfrom icevision.core import *\nfrom icevision.visualize import *\nfrom icevision.data import *\n\n\ndef base_show_results(\n predict_fn: callable,\n build_infer_batch_fn: callable,\n model: nn.Module,\n dataset: Dataset,\n class_map: Optional[ClassMap] = None,\n num_samples: int = 6,\n ncols: int = 3,\n denormalize_fn: Optional[callable] = denormalize_imagenet,\n show: bool = True,\n) -> None:\n samples = random.choices(dataset, k=num_samples)\n batch, samples = build_infer_batch_fn(samples)\n preds = predict_fn(model, batch)\n\n imgs = [sample[\"img\"] for sample in samples]\n show_preds(\n imgs,\n preds,\n class_map=class_map,\n denormalize_fn=denormalize_fn,\n ncols=ncols,\n show=show,\n )\n", "path": "icevision/models/base_show_results.py"}]} | 643 | 133 |
gh_patches_debug_15676 | rasdani/github-patches | git_diff | WeblateOrg__weblate-7984 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Translation Memory Maintenance - Delete Entries
It would be good to be able to maintain the TM in one way or the other.
Perhaps
- [ ] Search & Replace in TM
- [ ] Search & Delete entries in TM
Or perhpas, as a "simple" (?) starting point, giving the translator the option to delete single entries from the TM when they see the result list in **Automatic Suggestions**. Like perhaps:
- [ ] Delete single entry in **Automatic Suggestions** view:

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `weblate/memory/machine.py`
Content:
```
1 #
2 # Copyright © 2012–2022 Michal Čihař <[email protected]>
3 #
4 # This file is part of Weblate <https://weblate.org/>
5 #
6 # This program is free software: you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation, either version 3 of the License, or
9 # (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with this program. If not, see <https://www.gnu.org/licenses/>.
18 #
19
20 from weblate.machinery.base import MachineTranslation, get_machinery_language
21 from weblate.memory.models import Memory
22
23
24 class WeblateMemory(MachineTranslation):
25 """Translation service using strings already translated in Weblate."""
26
27 name = "Weblate Translation Memory"
28 rank_boost = 2
29 cache_translations = False
30 same_languages = True
31 accounting_key = "internal"
32 do_cleanup = False
33
34 def convert_language(self, language):
35 """No conversion of language object."""
36 return get_machinery_language(language)
37
38 def is_supported(self, source, language):
39 """Any language is supported."""
40 return True
41
42 def is_rate_limited(self):
43 """This service has no rate limiting."""
44 return False
45
46 def download_translations(
47 self,
48 source,
49 language,
50 text: str,
51 unit,
52 user,
53 search: bool,
54 threshold: int = 75,
55 ):
56 """Download list of possible translations from a service."""
57 for result in Memory.objects.lookup(
58 source,
59 language,
60 text,
61 user,
62 unit.translation.component.project,
63 unit.translation.component.project.use_shared_tm,
64 ).iterator():
65 quality = self.comparer.similarity(text, result.source)
66 if quality < 10 or (quality < threshold and not search):
67 continue
68 yield {
69 "text": result.target,
70 "quality": quality,
71 "service": self.name,
72 "origin": result.get_origin_display(),
73 "source": result.source,
74 "show_quality": True,
75 }
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/weblate/memory/machine.py b/weblate/memory/machine.py
--- a/weblate/memory/machine.py
+++ b/weblate/memory/machine.py
@@ -17,6 +17,8 @@
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
+from django.urls import reverse
+
from weblate.machinery.base import MachineTranslation, get_machinery_language
from weblate.memory.models import Memory
@@ -72,4 +74,7 @@
"origin": result.get_origin_display(),
"source": result.source,
"show_quality": True,
+ "delete_url": reverse("api:memory-detail", kwargs={"pk": result.id})
+ if user is not None and user.has_perm("memory.delete", result)
+ else None,
}
| {"golden_diff": "diff --git a/weblate/memory/machine.py b/weblate/memory/machine.py\n--- a/weblate/memory/machine.py\n+++ b/weblate/memory/machine.py\n@@ -17,6 +17,8 @@\n # along with this program. If not, see <https://www.gnu.org/licenses/>.\n #\n \n+from django.urls import reverse\n+\n from weblate.machinery.base import MachineTranslation, get_machinery_language\n from weblate.memory.models import Memory\n \n@@ -72,4 +74,7 @@\n \"origin\": result.get_origin_display(),\n \"source\": result.source,\n \"show_quality\": True,\n+ \"delete_url\": reverse(\"api:memory-detail\", kwargs={\"pk\": result.id})\n+ if user is not None and user.has_perm(\"memory.delete\", result)\n+ else None,\n }\n", "issue": "Translation Memory Maintenance - Delete Entries\nIt would be good to be able to maintain the TM in one way or the other.\r\n\r\nPerhaps \r\n- [ ] Search & Replace in TM\r\n- [ ] Search & Delete entries in TM\r\n\r\nOr perhpas, as a \"simple\" (?) starting point, giving the translator the option to delete single entries from the TM when they see the result list in **Automatic Suggestions**. Like perhaps:\r\n\r\n- [ ] Delete single entry in **Automatic Suggestions** view:\r\n\r\n\r\n\n", "before_files": [{"content": "#\n# Copyright \u00a9 2012\u20132022 Michal \u010ciha\u0159 <[email protected]>\n#\n# This file is part of Weblate <https://weblate.org/>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <https://www.gnu.org/licenses/>.\n#\n\nfrom weblate.machinery.base import MachineTranslation, get_machinery_language\nfrom weblate.memory.models import Memory\n\n\nclass WeblateMemory(MachineTranslation):\n \"\"\"Translation service using strings already translated in Weblate.\"\"\"\n\n name = \"Weblate Translation Memory\"\n rank_boost = 2\n cache_translations = False\n same_languages = True\n accounting_key = \"internal\"\n do_cleanup = False\n\n def convert_language(self, language):\n \"\"\"No conversion of language object.\"\"\"\n return get_machinery_language(language)\n\n def is_supported(self, source, language):\n \"\"\"Any language is supported.\"\"\"\n return True\n\n def is_rate_limited(self):\n \"\"\"This service has no rate limiting.\"\"\"\n return False\n\n def download_translations(\n self,\n source,\n language,\n text: str,\n unit,\n user,\n search: bool,\n threshold: int = 75,\n ):\n \"\"\"Download list of possible translations from a service.\"\"\"\n for result in Memory.objects.lookup(\n source,\n language,\n text,\n user,\n unit.translation.component.project,\n unit.translation.component.project.use_shared_tm,\n ).iterator():\n quality = self.comparer.similarity(text, result.source)\n if quality < 10 or (quality < threshold and not search):\n continue\n yield {\n \"text\": result.target,\n \"quality\": quality,\n \"service\": self.name,\n \"origin\": result.get_origin_display(),\n \"source\": result.source,\n \"show_quality\": True,\n }\n", "path": "weblate/memory/machine.py"}], "after_files": [{"content": "#\n# Copyright \u00a9 2012\u20132022 Michal \u010ciha\u0159 <[email protected]>\n#\n# This file is part of Weblate <https://weblate.org/>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <https://www.gnu.org/licenses/>.\n#\n\nfrom django.urls import reverse\n\nfrom weblate.machinery.base import MachineTranslation, get_machinery_language\nfrom weblate.memory.models import Memory\n\n\nclass WeblateMemory(MachineTranslation):\n \"\"\"Translation service using strings already translated in Weblate.\"\"\"\n\n name = \"Weblate Translation Memory\"\n rank_boost = 2\n cache_translations = False\n same_languages = True\n accounting_key = \"internal\"\n do_cleanup = False\n\n def convert_language(self, language):\n \"\"\"No conversion of language object.\"\"\"\n return get_machinery_language(language)\n\n def is_supported(self, source, language):\n \"\"\"Any language is supported.\"\"\"\n return True\n\n def is_rate_limited(self):\n \"\"\"This service has no rate limiting.\"\"\"\n return False\n\n def download_translations(\n self,\n source,\n language,\n text: str,\n unit,\n user,\n search: bool,\n threshold: int = 75,\n ):\n \"\"\"Download list of possible translations from a service.\"\"\"\n for result in Memory.objects.lookup(\n source,\n language,\n text,\n user,\n unit.translation.component.project,\n unit.translation.component.project.use_shared_tm,\n ).iterator():\n quality = self.comparer.similarity(text, result.source)\n if quality < 10 or (quality < threshold and not search):\n continue\n yield {\n \"text\": result.target,\n \"quality\": quality,\n \"service\": self.name,\n \"origin\": result.get_origin_display(),\n \"source\": result.source,\n \"show_quality\": True,\n \"delete_url\": reverse(\"api:memory-detail\", kwargs={\"pk\": result.id})\n if user is not None and user.has_perm(\"memory.delete\", result)\n else None,\n }\n", "path": "weblate/memory/machine.py"}]} | 1,094 | 185 |
gh_patches_debug_39639 | rasdani/github-patches | git_diff | DataDog__dd-trace-py-959 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Stuck in "Waiting for GIL"
Hi,
We found a thread that seems blocked forever:
```
gdb python 30107
```
Here's the single thread:
```
(gdb) info threads
Id Target Id Frame
* 1 Thread 0x7f1dd852e4c0 (LWP 30107) "/opt/simon/rele" 0x00007f1dd7d2e146 in do_futex_wait.constprop () from /lib64/libpthread.so.0
```
Here's the backtrace:
```
raceback (most recent call first):
Waiting for the GIL
File "/opt/simon/releases/b038662ce7d140609b1e4883a3ea0edf77851537/venv/lib/python2.7/site-packages/ddtrace/sampler.py", line 88, in sample
with self._lock:
File "/opt/simon/releases/b038662ce7d140609b1e4883a3ea0edf77851537/venv/lib/python2.7/site-packages/ddtrace/tracer.py", line 251, in start_span
if self.priority_sampler.sample(span):
File "/opt/simon/releases/b038662ce7d140609b1e4883a3ea0edf77851537/venv/lib/python2.7/site-packages/ddtrace/tracer.py", line 376, in trace
span_type=span_type,
File "/opt/simon/releases/b038662ce7d140609b1e4883a3ea0edf77851537/venv/lib/python2.7/site-packages/ddtrace/contrib/django/cache.py", line 56, in wrapped
with tracer.trace('django.cache', span_type=TYPE, service=cache_service_name) as span:
```
I'm not sure how to troubleshoot from here. Have you seen this before, or have any suggestions on what I could look at? It's the only thread in process so nothing should be holding the GIL.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddtrace/sampler.py`
Content:
```
1 """Samplers manage the client-side trace sampling
2
3 Any `sampled = False` trace won't be written, and can be ignored by the instrumentation.
4 """
5 from threading import Lock
6
7 from .compat import iteritems
8 from .internal.logger import get_logger
9
10 log = get_logger(__name__)
11
12 MAX_TRACE_ID = 2 ** 64
13
14 # Has to be the same factor and key as the Agent to allow chained sampling
15 KNUTH_FACTOR = 1111111111111111111
16
17
18 class AllSampler(object):
19 """Sampler sampling all the traces"""
20
21 def sample(self, span):
22 return True
23
24
25 class RateSampler(object):
26 """Sampler based on a rate
27
28 Keep (100 * `sample_rate`)% of the traces.
29 It samples randomly, its main purpose is to reduce the instrumentation footprint.
30 """
31
32 def __init__(self, sample_rate=1):
33 if sample_rate <= 0:
34 log.error('sample_rate is negative or null, disable the Sampler')
35 sample_rate = 1
36 elif sample_rate > 1:
37 sample_rate = 1
38
39 self.set_sample_rate(sample_rate)
40
41 log.debug('initialized RateSampler, sample %s%% of traces', 100 * sample_rate)
42
43 def set_sample_rate(self, sample_rate):
44 self.sample_rate = sample_rate
45 self.sampling_id_threshold = sample_rate * MAX_TRACE_ID
46
47 def sample(self, span):
48 sampled = ((span.trace_id * KNUTH_FACTOR) % MAX_TRACE_ID) <= self.sampling_id_threshold
49
50 return sampled
51
52
53 def _key(service=None, env=None):
54 service = service or ''
55 env = env or ''
56 return 'service:' + service + ',env:' + env
57
58
59 _default_key = _key()
60
61
62 class RateByServiceSampler(object):
63 """Sampler based on a rate, by service
64
65 Keep (100 * `sample_rate`)% of the traces.
66 The sample rate is kept independently for each service/env tuple.
67 """
68
69 def __init__(self, sample_rate=1):
70 self._lock = Lock()
71 self._by_service_samplers = {}
72 self._by_service_samplers[_default_key] = RateSampler(sample_rate)
73
74 def _set_sample_rate_by_key(self, sample_rate, key):
75 with self._lock:
76 if key in self._by_service_samplers:
77 self._by_service_samplers[key].set_sample_rate(sample_rate)
78 else:
79 self._by_service_samplers[key] = RateSampler(sample_rate)
80
81 def set_sample_rate(self, sample_rate, service='', env=''):
82 self._set_sample_rate_by_key(sample_rate, _key(service, env))
83
84 def sample(self, span):
85 tags = span.tracer().tags
86 env = tags['env'] if 'env' in tags else None
87 key = _key(span.service, env)
88 with self._lock:
89 if key in self._by_service_samplers:
90 return self._by_service_samplers[key].sample(span)
91 return self._by_service_samplers[_default_key].sample(span)
92
93 def set_sample_rate_by_service(self, rate_by_service):
94 for key, sample_rate in iteritems(rate_by_service):
95 self._set_sample_rate_by_key(sample_rate, key)
96 with self._lock:
97 for key in list(self._by_service_samplers):
98 if key not in rate_by_service and key != _default_key:
99 del self._by_service_samplers[key]
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ddtrace/sampler.py b/ddtrace/sampler.py
--- a/ddtrace/sampler.py
+++ b/ddtrace/sampler.py
@@ -2,8 +2,6 @@
Any `sampled = False` trace won't be written, and can be ignored by the instrumentation.
"""
-from threading import Lock
-
from .compat import iteritems
from .internal.logger import get_logger
@@ -50,15 +48,6 @@
return sampled
-def _key(service=None, env=None):
- service = service or ''
- env = env or ''
- return 'service:' + service + ',env:' + env
-
-
-_default_key = _key()
-
-
class RateByServiceSampler(object):
"""Sampler based on a rate, by service
@@ -66,34 +55,40 @@
The sample rate is kept independently for each service/env tuple.
"""
+ @staticmethod
+ def _key(service=None, env=None):
+ """Compute a key with the same format used by the Datadog agent API."""
+ service = service or ''
+ env = env or ''
+ return 'service:' + service + ',env:' + env
+
def __init__(self, sample_rate=1):
- self._lock = Lock()
- self._by_service_samplers = {}
- self._by_service_samplers[_default_key] = RateSampler(sample_rate)
+ self.sample_rate = sample_rate
+ self._by_service_samplers = self._get_new_by_service_sampler()
- def _set_sample_rate_by_key(self, sample_rate, key):
- with self._lock:
- if key in self._by_service_samplers:
- self._by_service_samplers[key].set_sample_rate(sample_rate)
- else:
- self._by_service_samplers[key] = RateSampler(sample_rate)
+ def _get_new_by_service_sampler(self):
+ return {
+ self._default_key: RateSampler(self.sample_rate)
+ }
def set_sample_rate(self, sample_rate, service='', env=''):
- self._set_sample_rate_by_key(sample_rate, _key(service, env))
+ self._by_service_samplers[self._key(service, env)] = RateSampler(sample_rate)
def sample(self, span):
tags = span.tracer().tags
env = tags['env'] if 'env' in tags else None
- key = _key(span.service, env)
- with self._lock:
- if key in self._by_service_samplers:
- return self._by_service_samplers[key].sample(span)
- return self._by_service_samplers[_default_key].sample(span)
+ key = self._key(span.service, env)
+ return self._by_service_samplers.get(
+ key, self._by_service_samplers[self._default_key]
+ ).sample(span)
def set_sample_rate_by_service(self, rate_by_service):
+ new_by_service_samplers = self._get_new_by_service_sampler()
for key, sample_rate in iteritems(rate_by_service):
- self._set_sample_rate_by_key(sample_rate, key)
- with self._lock:
- for key in list(self._by_service_samplers):
- if key not in rate_by_service and key != _default_key:
- del self._by_service_samplers[key]
+ new_by_service_samplers[key] = RateSampler(sample_rate)
+
+ self._by_service_samplers = new_by_service_samplers
+
+
+# Default key for service with no specific rate
+RateByServiceSampler._default_key = RateByServiceSampler._key()
| {"golden_diff": "diff --git a/ddtrace/sampler.py b/ddtrace/sampler.py\n--- a/ddtrace/sampler.py\n+++ b/ddtrace/sampler.py\n@@ -2,8 +2,6 @@\n \n Any `sampled = False` trace won't be written, and can be ignored by the instrumentation.\n \"\"\"\n-from threading import Lock\n-\n from .compat import iteritems\n from .internal.logger import get_logger\n \n@@ -50,15 +48,6 @@\n return sampled\n \n \n-def _key(service=None, env=None):\n- service = service or ''\n- env = env or ''\n- return 'service:' + service + ',env:' + env\n-\n-\n-_default_key = _key()\n-\n-\n class RateByServiceSampler(object):\n \"\"\"Sampler based on a rate, by service\n \n@@ -66,34 +55,40 @@\n The sample rate is kept independently for each service/env tuple.\n \"\"\"\n \n+ @staticmethod\n+ def _key(service=None, env=None):\n+ \"\"\"Compute a key with the same format used by the Datadog agent API.\"\"\"\n+ service = service or ''\n+ env = env or ''\n+ return 'service:' + service + ',env:' + env\n+\n def __init__(self, sample_rate=1):\n- self._lock = Lock()\n- self._by_service_samplers = {}\n- self._by_service_samplers[_default_key] = RateSampler(sample_rate)\n+ self.sample_rate = sample_rate\n+ self._by_service_samplers = self._get_new_by_service_sampler()\n \n- def _set_sample_rate_by_key(self, sample_rate, key):\n- with self._lock:\n- if key in self._by_service_samplers:\n- self._by_service_samplers[key].set_sample_rate(sample_rate)\n- else:\n- self._by_service_samplers[key] = RateSampler(sample_rate)\n+ def _get_new_by_service_sampler(self):\n+ return {\n+ self._default_key: RateSampler(self.sample_rate)\n+ }\n \n def set_sample_rate(self, sample_rate, service='', env=''):\n- self._set_sample_rate_by_key(sample_rate, _key(service, env))\n+ self._by_service_samplers[self._key(service, env)] = RateSampler(sample_rate)\n \n def sample(self, span):\n tags = span.tracer().tags\n env = tags['env'] if 'env' in tags else None\n- key = _key(span.service, env)\n- with self._lock:\n- if key in self._by_service_samplers:\n- return self._by_service_samplers[key].sample(span)\n- return self._by_service_samplers[_default_key].sample(span)\n+ key = self._key(span.service, env)\n+ return self._by_service_samplers.get(\n+ key, self._by_service_samplers[self._default_key]\n+ ).sample(span)\n \n def set_sample_rate_by_service(self, rate_by_service):\n+ new_by_service_samplers = self._get_new_by_service_sampler()\n for key, sample_rate in iteritems(rate_by_service):\n- self._set_sample_rate_by_key(sample_rate, key)\n- with self._lock:\n- for key in list(self._by_service_samplers):\n- if key not in rate_by_service and key != _default_key:\n- del self._by_service_samplers[key]\n+ new_by_service_samplers[key] = RateSampler(sample_rate)\n+\n+ self._by_service_samplers = new_by_service_samplers\n+\n+\n+# Default key for service with no specific rate\n+RateByServiceSampler._default_key = RateByServiceSampler._key()\n", "issue": "Stuck in \"Waiting for GIL\"\nHi,\r\n\r\nWe found a thread that seems blocked forever:\r\n\r\n```\r\ngdb python 30107\r\n```\r\n\r\nHere's the single thread:\r\n\r\n```\r\n(gdb) info threads\r\n Id Target Id Frame\r\n* 1 Thread 0x7f1dd852e4c0 (LWP 30107) \"/opt/simon/rele\" 0x00007f1dd7d2e146 in do_futex_wait.constprop () from /lib64/libpthread.so.0\r\n```\r\n\r\nHere's the backtrace:\r\n\r\n```\r\nraceback (most recent call first):\r\n Waiting for the GIL\r\n File \"/opt/simon/releases/b038662ce7d140609b1e4883a3ea0edf77851537/venv/lib/python2.7/site-packages/ddtrace/sampler.py\", line 88, in sample\r\n with self._lock:\r\n File \"/opt/simon/releases/b038662ce7d140609b1e4883a3ea0edf77851537/venv/lib/python2.7/site-packages/ddtrace/tracer.py\", line 251, in start_span\r\n if self.priority_sampler.sample(span):\r\n File \"/opt/simon/releases/b038662ce7d140609b1e4883a3ea0edf77851537/venv/lib/python2.7/site-packages/ddtrace/tracer.py\", line 376, in trace\r\n span_type=span_type,\r\n File \"/opt/simon/releases/b038662ce7d140609b1e4883a3ea0edf77851537/venv/lib/python2.7/site-packages/ddtrace/contrib/django/cache.py\", line 56, in wrapped\r\n with tracer.trace('django.cache', span_type=TYPE, service=cache_service_name) as span:\r\n```\r\n\r\nI'm not sure how to troubleshoot from here. Have you seen this before, or have any suggestions on what I could look at? It's the only thread in process so nothing should be holding the GIL.\n", "before_files": [{"content": "\"\"\"Samplers manage the client-side trace sampling\n\nAny `sampled = False` trace won't be written, and can be ignored by the instrumentation.\n\"\"\"\nfrom threading import Lock\n\nfrom .compat import iteritems\nfrom .internal.logger import get_logger\n\nlog = get_logger(__name__)\n\nMAX_TRACE_ID = 2 ** 64\n\n# Has to be the same factor and key as the Agent to allow chained sampling\nKNUTH_FACTOR = 1111111111111111111\n\n\nclass AllSampler(object):\n \"\"\"Sampler sampling all the traces\"\"\"\n\n def sample(self, span):\n return True\n\n\nclass RateSampler(object):\n \"\"\"Sampler based on a rate\n\n Keep (100 * `sample_rate`)% of the traces.\n It samples randomly, its main purpose is to reduce the instrumentation footprint.\n \"\"\"\n\n def __init__(self, sample_rate=1):\n if sample_rate <= 0:\n log.error('sample_rate is negative or null, disable the Sampler')\n sample_rate = 1\n elif sample_rate > 1:\n sample_rate = 1\n\n self.set_sample_rate(sample_rate)\n\n log.debug('initialized RateSampler, sample %s%% of traces', 100 * sample_rate)\n\n def set_sample_rate(self, sample_rate):\n self.sample_rate = sample_rate\n self.sampling_id_threshold = sample_rate * MAX_TRACE_ID\n\n def sample(self, span):\n sampled = ((span.trace_id * KNUTH_FACTOR) % MAX_TRACE_ID) <= self.sampling_id_threshold\n\n return sampled\n\n\ndef _key(service=None, env=None):\n service = service or ''\n env = env or ''\n return 'service:' + service + ',env:' + env\n\n\n_default_key = _key()\n\n\nclass RateByServiceSampler(object):\n \"\"\"Sampler based on a rate, by service\n\n Keep (100 * `sample_rate`)% of the traces.\n The sample rate is kept independently for each service/env tuple.\n \"\"\"\n\n def __init__(self, sample_rate=1):\n self._lock = Lock()\n self._by_service_samplers = {}\n self._by_service_samplers[_default_key] = RateSampler(sample_rate)\n\n def _set_sample_rate_by_key(self, sample_rate, key):\n with self._lock:\n if key in self._by_service_samplers:\n self._by_service_samplers[key].set_sample_rate(sample_rate)\n else:\n self._by_service_samplers[key] = RateSampler(sample_rate)\n\n def set_sample_rate(self, sample_rate, service='', env=''):\n self._set_sample_rate_by_key(sample_rate, _key(service, env))\n\n def sample(self, span):\n tags = span.tracer().tags\n env = tags['env'] if 'env' in tags else None\n key = _key(span.service, env)\n with self._lock:\n if key in self._by_service_samplers:\n return self._by_service_samplers[key].sample(span)\n return self._by_service_samplers[_default_key].sample(span)\n\n def set_sample_rate_by_service(self, rate_by_service):\n for key, sample_rate in iteritems(rate_by_service):\n self._set_sample_rate_by_key(sample_rate, key)\n with self._lock:\n for key in list(self._by_service_samplers):\n if key not in rate_by_service and key != _default_key:\n del self._by_service_samplers[key]\n", "path": "ddtrace/sampler.py"}], "after_files": [{"content": "\"\"\"Samplers manage the client-side trace sampling\n\nAny `sampled = False` trace won't be written, and can be ignored by the instrumentation.\n\"\"\"\nfrom .compat import iteritems\nfrom .internal.logger import get_logger\n\nlog = get_logger(__name__)\n\nMAX_TRACE_ID = 2 ** 64\n\n# Has to be the same factor and key as the Agent to allow chained sampling\nKNUTH_FACTOR = 1111111111111111111\n\n\nclass AllSampler(object):\n \"\"\"Sampler sampling all the traces\"\"\"\n\n def sample(self, span):\n return True\n\n\nclass RateSampler(object):\n \"\"\"Sampler based on a rate\n\n Keep (100 * `sample_rate`)% of the traces.\n It samples randomly, its main purpose is to reduce the instrumentation footprint.\n \"\"\"\n\n def __init__(self, sample_rate=1):\n if sample_rate <= 0:\n log.error('sample_rate is negative or null, disable the Sampler')\n sample_rate = 1\n elif sample_rate > 1:\n sample_rate = 1\n\n self.set_sample_rate(sample_rate)\n\n log.debug('initialized RateSampler, sample %s%% of traces', 100 * sample_rate)\n\n def set_sample_rate(self, sample_rate):\n self.sample_rate = sample_rate\n self.sampling_id_threshold = sample_rate * MAX_TRACE_ID\n\n def sample(self, span):\n sampled = ((span.trace_id * KNUTH_FACTOR) % MAX_TRACE_ID) <= self.sampling_id_threshold\n\n return sampled\n\n\nclass RateByServiceSampler(object):\n \"\"\"Sampler based on a rate, by service\n\n Keep (100 * `sample_rate`)% of the traces.\n The sample rate is kept independently for each service/env tuple.\n \"\"\"\n\n @staticmethod\n def _key(service=None, env=None):\n \"\"\"Compute a key with the same format used by the Datadog agent API.\"\"\"\n service = service or ''\n env = env or ''\n return 'service:' + service + ',env:' + env\n\n def __init__(self, sample_rate=1):\n self.sample_rate = sample_rate\n self._by_service_samplers = self._get_new_by_service_sampler()\n\n def _get_new_by_service_sampler(self):\n return {\n self._default_key: RateSampler(self.sample_rate)\n }\n\n def set_sample_rate(self, sample_rate, service='', env=''):\n self._by_service_samplers[self._key(service, env)] = RateSampler(sample_rate)\n\n def sample(self, span):\n tags = span.tracer().tags\n env = tags['env'] if 'env' in tags else None\n key = self._key(span.service, env)\n return self._by_service_samplers.get(\n key, self._by_service_samplers[self._default_key]\n ).sample(span)\n\n def set_sample_rate_by_service(self, rate_by_service):\n new_by_service_samplers = self._get_new_by_service_sampler()\n for key, sample_rate in iteritems(rate_by_service):\n new_by_service_samplers[key] = RateSampler(sample_rate)\n\n self._by_service_samplers = new_by_service_samplers\n\n\n# Default key for service with no specific rate\nRateByServiceSampler._default_key = RateByServiceSampler._key()\n", "path": "ddtrace/sampler.py"}]} | 1,751 | 815 |
gh_patches_debug_49038 | rasdani/github-patches | git_diff | facebookresearch__hydra-907 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug] Hydra's override grammar allows quoted resolver args, but OmegaConf does not recognize them
# 🐛 Bug
## Description
Best explained with an example (assume a basic `my_app.py` that prints the resolved config):
```bash
python my_app.py +port='${env:PORT}' # works, may crash if PORT is not defined
python my_app.py +port='${env:PORT,80}' # crashes: not recognized by Hydra's grammar
python my_app.py +port='${env:PORT,"80"}' # accepted by Hydra but OmegaConf does not resolve it
```
This is because in the current version of OmegaConf, quotes are not allowed inside interpolations.
## Checklist
- [x] I checked on the latest version of Hydra
- [x] I created a minimal repro
## To reproduce
** Minimal Code/Config snippet to reproduce **
```python
from omegaconf import DictConfig, OmegaConf
import hydra
@hydra.main()
def my_app(cfg: DictConfig) -> None:
print(OmegaConf.to_yaml(cfg, resolve=True))
if __name__ == "__main__":
my_app()
```
Then try the examples above.
** Stack trace/error message **
Just showing the last example:
```
port: ${env:PORT,"80"}
```
## Expected Behavior
Hydra should allow the same characters as OmegaConf.
## System information
- **Hydra Version** : current master (7afee097)
- **Python version** : 3.8.3
- **Virtual environment type and version** : conda 4.8.3
- **Operating system** : MacOS
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hydra/core/override_parser/overrides_parser.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 import sys
3 from typing import Any, List, Optional
4
5 from antlr4.error.Errors import LexerNoViableAltException, RecognitionException
6
7 from hydra._internal.grammar import grammar_functions
8 from hydra._internal.grammar.functions import Functions
9 from hydra.core.config_loader import ConfigLoader
10 from hydra.core.override_parser.overrides_visitor import (
11 HydraErrorListener,
12 HydraOverrideVisitor,
13 )
14 from hydra.core.override_parser.types import Override
15 from hydra.errors import HydraException, OverrideParseException
16
17 try:
18 from hydra.grammar.gen.OverrideLexer import (
19 CommonTokenStream,
20 InputStream,
21 OverrideLexer,
22 )
23 from hydra.grammar.gen.OverrideParser import OverrideParser
24
25 except ModuleNotFoundError:
26 print(
27 "Error importing generated parsers, run `python setup.py antlr` to regenerate."
28 )
29 sys.exit(1)
30
31 # The set of parser rules that require the lexer to be in lexical mode `KEY`.
32 KEY_RULES = {"key", "override", "package", "packageOrGroup"}
33
34
35 class OverridesParser:
36 functions: Functions
37
38 @classmethod
39 def create(cls, config_loader: Optional[ConfigLoader] = None) -> "OverridesParser":
40 functions = create_functions()
41 return cls(functions=functions, config_loader=config_loader)
42
43 def __init__(
44 self, functions: Functions, config_loader: Optional[ConfigLoader] = None
45 ):
46 self.functions = functions
47 self.config_loader = config_loader
48
49 def parse_rule(self, s: str, rule_name: str) -> Any:
50 error_listener = HydraErrorListener()
51 istream = InputStream(s)
52 lexer = OverrideLexer(istream)
53 lexer.removeErrorListeners()
54 lexer.addErrorListener(error_listener)
55
56 # Set the lexer in the correct mode to parse the desired rule.
57 lexer_mode = "KEY" if rule_name in KEY_RULES else "VALUE"
58 lexer.mode(getattr(OverrideLexer, lexer_mode))
59
60 stream = CommonTokenStream(lexer)
61 parser = OverrideParser(stream)
62 parser.removeErrorListeners()
63 parser.addErrorListener(error_listener)
64 visitor = HydraOverrideVisitor(self.functions)
65 rule = getattr(parser, rule_name)
66 tree = rule()
67 ret = visitor.visit(tree)
68 if isinstance(ret, Override):
69 ret.input_line = s
70 return ret
71
72 def parse_override(self, s: str) -> Override:
73 ret = self.parse_rule(s, "override")
74 assert isinstance(ret, Override)
75 return ret
76
77 def parse_overrides(self, overrides: List[str]) -> List[Override]:
78 ret: List[Override] = []
79 for override in overrides:
80 try:
81 parsed = self.parse_rule(override, "override")
82 except HydraException as e:
83 cause = e.__cause__
84 if isinstance(cause, LexerNoViableAltException):
85 prefix = "LexerNoViableAltException: "
86 start = len(prefix) + cause.startIndex + 1
87 msg = f"{prefix}{override}" f"\n{'^'.rjust(start)}"
88 e.__cause__ = None
89 elif isinstance(cause, RecognitionException):
90 prefix = f"{e}"
91 msg = f"{prefix}"
92 e.__cause__ = None
93 else:
94 msg = f"Error parsing override '{override}'" f"\n{e}"
95 raise OverrideParseException(
96 override=override,
97 message=f"{msg}"
98 f"\nSee https://hydra.cc/docs/next/advanced/override_grammar/basic for details",
99 ) from e.__cause__
100 assert isinstance(parsed, Override)
101 parsed.config_loader = self.config_loader
102 ret.append(parsed)
103 return ret
104
105
106 def create_functions() -> Functions:
107 functions = Functions()
108 # casts
109 functions.register(name="int", func=grammar_functions.cast_int)
110 functions.register(name="str", func=grammar_functions.cast_str)
111 functions.register(name="bool", func=grammar_functions.cast_bool)
112 functions.register(name="float", func=grammar_functions.cast_float)
113 # sweeps
114 functions.register(name="choice", func=grammar_functions.choice)
115 functions.register(name="range", func=grammar_functions.range)
116 functions.register(name="interval", func=grammar_functions.interval)
117 # misc
118 functions.register(name="tag", func=grammar_functions.tag)
119 functions.register(name="sort", func=grammar_functions.sort)
120 functions.register(name="shuffle", func=grammar_functions.shuffle)
121 functions.register(name="glob", func=grammar_functions.glob)
122 return functions
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/hydra/core/override_parser/overrides_parser.py b/hydra/core/override_parser/overrides_parser.py
--- a/hydra/core/override_parser/overrides_parser.py
+++ b/hydra/core/override_parser/overrides_parser.py
@@ -54,8 +54,8 @@
lexer.addErrorListener(error_listener)
# Set the lexer in the correct mode to parse the desired rule.
- lexer_mode = "KEY" if rule_name in KEY_RULES else "VALUE"
- lexer.mode(getattr(OverrideLexer, lexer_mode))
+ if rule_name not in KEY_RULES:
+ lexer.mode(OverrideLexer.VALUE_MODE)
stream = CommonTokenStream(lexer)
parser = OverrideParser(stream)
| {"golden_diff": "diff --git a/hydra/core/override_parser/overrides_parser.py b/hydra/core/override_parser/overrides_parser.py\n--- a/hydra/core/override_parser/overrides_parser.py\n+++ b/hydra/core/override_parser/overrides_parser.py\n@@ -54,8 +54,8 @@\n lexer.addErrorListener(error_listener)\n \n # Set the lexer in the correct mode to parse the desired rule.\n- lexer_mode = \"KEY\" if rule_name in KEY_RULES else \"VALUE\"\n- lexer.mode(getattr(OverrideLexer, lexer_mode))\n+ if rule_name not in KEY_RULES:\n+ lexer.mode(OverrideLexer.VALUE_MODE)\n \n stream = CommonTokenStream(lexer)\n parser = OverrideParser(stream)\n", "issue": "[Bug] Hydra's override grammar allows quoted resolver args, but OmegaConf does not recognize them\n# \ud83d\udc1b Bug\r\n\r\n## Description\r\n\r\nBest explained with an example (assume a basic `my_app.py` that prints the resolved config):\r\n\r\n```bash\r\npython my_app.py +port='${env:PORT}' # works, may crash if PORT is not defined\r\npython my_app.py +port='${env:PORT,80}' # crashes: not recognized by Hydra's grammar\r\npython my_app.py +port='${env:PORT,\"80\"}' # accepted by Hydra but OmegaConf does not resolve it\r\n```\r\n\r\nThis is because in the current version of OmegaConf, quotes are not allowed inside interpolations.\r\n\r\n## Checklist\r\n- [x] I checked on the latest version of Hydra\r\n- [x] I created a minimal repro\r\n\r\n## To reproduce\r\n\r\n** Minimal Code/Config snippet to reproduce **\r\n\r\n```python\r\nfrom omegaconf import DictConfig, OmegaConf\r\nimport hydra\r\n\r\[email protected]()\r\ndef my_app(cfg: DictConfig) -> None:\r\n print(OmegaConf.to_yaml(cfg, resolve=True))\r\n\r\nif __name__ == \"__main__\":\r\n my_app()\r\n```\r\n\r\nThen try the examples above.\r\n\r\n** Stack trace/error message **\r\n\r\nJust showing the last example:\r\n```\r\nport: ${env:PORT,\"80\"}\r\n```\r\n\r\n## Expected Behavior\r\nHydra should allow the same characters as OmegaConf.\r\n\r\n## System information\r\n- **Hydra Version** : current master (7afee097)\r\n- **Python version** : 3.8.3\r\n- **Virtual environment type and version** : conda 4.8.3\r\n- **Operating system** : MacOS\r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport sys\nfrom typing import Any, List, Optional\n\nfrom antlr4.error.Errors import LexerNoViableAltException, RecognitionException\n\nfrom hydra._internal.grammar import grammar_functions\nfrom hydra._internal.grammar.functions import Functions\nfrom hydra.core.config_loader import ConfigLoader\nfrom hydra.core.override_parser.overrides_visitor import (\n HydraErrorListener,\n HydraOverrideVisitor,\n)\nfrom hydra.core.override_parser.types import Override\nfrom hydra.errors import HydraException, OverrideParseException\n\ntry:\n from hydra.grammar.gen.OverrideLexer import (\n CommonTokenStream,\n InputStream,\n OverrideLexer,\n )\n from hydra.grammar.gen.OverrideParser import OverrideParser\n\nexcept ModuleNotFoundError:\n print(\n \"Error importing generated parsers, run `python setup.py antlr` to regenerate.\"\n )\n sys.exit(1)\n\n# The set of parser rules that require the lexer to be in lexical mode `KEY`.\nKEY_RULES = {\"key\", \"override\", \"package\", \"packageOrGroup\"}\n\n\nclass OverridesParser:\n functions: Functions\n\n @classmethod\n def create(cls, config_loader: Optional[ConfigLoader] = None) -> \"OverridesParser\":\n functions = create_functions()\n return cls(functions=functions, config_loader=config_loader)\n\n def __init__(\n self, functions: Functions, config_loader: Optional[ConfigLoader] = None\n ):\n self.functions = functions\n self.config_loader = config_loader\n\n def parse_rule(self, s: str, rule_name: str) -> Any:\n error_listener = HydraErrorListener()\n istream = InputStream(s)\n lexer = OverrideLexer(istream)\n lexer.removeErrorListeners()\n lexer.addErrorListener(error_listener)\n\n # Set the lexer in the correct mode to parse the desired rule.\n lexer_mode = \"KEY\" if rule_name in KEY_RULES else \"VALUE\"\n lexer.mode(getattr(OverrideLexer, lexer_mode))\n\n stream = CommonTokenStream(lexer)\n parser = OverrideParser(stream)\n parser.removeErrorListeners()\n parser.addErrorListener(error_listener)\n visitor = HydraOverrideVisitor(self.functions)\n rule = getattr(parser, rule_name)\n tree = rule()\n ret = visitor.visit(tree)\n if isinstance(ret, Override):\n ret.input_line = s\n return ret\n\n def parse_override(self, s: str) -> Override:\n ret = self.parse_rule(s, \"override\")\n assert isinstance(ret, Override)\n return ret\n\n def parse_overrides(self, overrides: List[str]) -> List[Override]:\n ret: List[Override] = []\n for override in overrides:\n try:\n parsed = self.parse_rule(override, \"override\")\n except HydraException as e:\n cause = e.__cause__\n if isinstance(cause, LexerNoViableAltException):\n prefix = \"LexerNoViableAltException: \"\n start = len(prefix) + cause.startIndex + 1\n msg = f\"{prefix}{override}\" f\"\\n{'^'.rjust(start)}\"\n e.__cause__ = None\n elif isinstance(cause, RecognitionException):\n prefix = f\"{e}\"\n msg = f\"{prefix}\"\n e.__cause__ = None\n else:\n msg = f\"Error parsing override '{override}'\" f\"\\n{e}\"\n raise OverrideParseException(\n override=override,\n message=f\"{msg}\"\n f\"\\nSee https://hydra.cc/docs/next/advanced/override_grammar/basic for details\",\n ) from e.__cause__\n assert isinstance(parsed, Override)\n parsed.config_loader = self.config_loader\n ret.append(parsed)\n return ret\n\n\ndef create_functions() -> Functions:\n functions = Functions()\n # casts\n functions.register(name=\"int\", func=grammar_functions.cast_int)\n functions.register(name=\"str\", func=grammar_functions.cast_str)\n functions.register(name=\"bool\", func=grammar_functions.cast_bool)\n functions.register(name=\"float\", func=grammar_functions.cast_float)\n # sweeps\n functions.register(name=\"choice\", func=grammar_functions.choice)\n functions.register(name=\"range\", func=grammar_functions.range)\n functions.register(name=\"interval\", func=grammar_functions.interval)\n # misc\n functions.register(name=\"tag\", func=grammar_functions.tag)\n functions.register(name=\"sort\", func=grammar_functions.sort)\n functions.register(name=\"shuffle\", func=grammar_functions.shuffle)\n functions.register(name=\"glob\", func=grammar_functions.glob)\n return functions\n", "path": "hydra/core/override_parser/overrides_parser.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport sys\nfrom typing import Any, List, Optional\n\nfrom antlr4.error.Errors import LexerNoViableAltException, RecognitionException\n\nfrom hydra._internal.grammar import grammar_functions\nfrom hydra._internal.grammar.functions import Functions\nfrom hydra.core.config_loader import ConfigLoader\nfrom hydra.core.override_parser.overrides_visitor import (\n HydraErrorListener,\n HydraOverrideVisitor,\n)\nfrom hydra.core.override_parser.types import Override\nfrom hydra.errors import HydraException, OverrideParseException\n\ntry:\n from hydra.grammar.gen.OverrideLexer import (\n CommonTokenStream,\n InputStream,\n OverrideLexer,\n )\n from hydra.grammar.gen.OverrideParser import OverrideParser\n\nexcept ModuleNotFoundError:\n print(\n \"Error importing generated parsers, run `python setup.py antlr` to regenerate.\"\n )\n sys.exit(1)\n\n# The set of parser rules that require the lexer to be in lexical mode `KEY`.\nKEY_RULES = {\"key\", \"override\", \"package\", \"packageOrGroup\"}\n\n\nclass OverridesParser:\n functions: Functions\n\n @classmethod\n def create(cls, config_loader: Optional[ConfigLoader] = None) -> \"OverridesParser\":\n functions = create_functions()\n return cls(functions=functions, config_loader=config_loader)\n\n def __init__(\n self, functions: Functions, config_loader: Optional[ConfigLoader] = None\n ):\n self.functions = functions\n self.config_loader = config_loader\n\n def parse_rule(self, s: str, rule_name: str) -> Any:\n error_listener = HydraErrorListener()\n istream = InputStream(s)\n lexer = OverrideLexer(istream)\n lexer.removeErrorListeners()\n lexer.addErrorListener(error_listener)\n\n # Set the lexer in the correct mode to parse the desired rule.\n if rule_name not in KEY_RULES:\n lexer.mode(OverrideLexer.VALUE_MODE)\n\n stream = CommonTokenStream(lexer)\n parser = OverrideParser(stream)\n parser.removeErrorListeners()\n parser.addErrorListener(error_listener)\n visitor = HydraOverrideVisitor(self.functions)\n rule = getattr(parser, rule_name)\n tree = rule()\n ret = visitor.visit(tree)\n if isinstance(ret, Override):\n ret.input_line = s\n return ret\n\n def parse_override(self, s: str) -> Override:\n ret = self.parse_rule(s, \"override\")\n assert isinstance(ret, Override)\n return ret\n\n def parse_overrides(self, overrides: List[str]) -> List[Override]:\n ret: List[Override] = []\n for override in overrides:\n try:\n parsed = self.parse_rule(override, \"override\")\n except HydraException as e:\n cause = e.__cause__\n if isinstance(cause, LexerNoViableAltException):\n prefix = \"LexerNoViableAltException: \"\n start = len(prefix) + cause.startIndex + 1\n msg = f\"{prefix}{override}\" f\"\\n{'^'.rjust(start)}\"\n e.__cause__ = None\n elif isinstance(cause, RecognitionException):\n prefix = f\"{e}\"\n msg = f\"{prefix}\"\n e.__cause__ = None\n else:\n msg = f\"Error parsing override '{override}'\" f\"\\n{e}\"\n raise OverrideParseException(\n override=override,\n message=f\"{msg}\"\n f\"\\nSee https://hydra.cc/docs/next/advanced/override_grammar/basic for details\",\n ) from e.__cause__\n assert isinstance(parsed, Override)\n parsed.config_loader = self.config_loader\n ret.append(parsed)\n return ret\n\n\ndef create_functions() -> Functions:\n functions = Functions()\n # casts\n functions.register(name=\"int\", func=grammar_functions.cast_int)\n functions.register(name=\"str\", func=grammar_functions.cast_str)\n functions.register(name=\"bool\", func=grammar_functions.cast_bool)\n functions.register(name=\"float\", func=grammar_functions.cast_float)\n # sweeps\n functions.register(name=\"choice\", func=grammar_functions.choice)\n functions.register(name=\"range\", func=grammar_functions.range)\n functions.register(name=\"interval\", func=grammar_functions.interval)\n # misc\n functions.register(name=\"tag\", func=grammar_functions.tag)\n functions.register(name=\"sort\", func=grammar_functions.sort)\n functions.register(name=\"shuffle\", func=grammar_functions.shuffle)\n functions.register(name=\"glob\", func=grammar_functions.glob)\n return functions\n", "path": "hydra/core/override_parser/overrides_parser.py"}]} | 1,857 | 162 |
gh_patches_debug_38850 | rasdani/github-patches | git_diff | sanic-org__sanic-2170 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
deprecate CompositionView ?
Currently sanic offers a class called `CompositionView`
I really am struggling to find any utility in this class, since
```python
from sanic.views import CompositionView
def get_handler(request):
return text("I am a get method")
view = CompositionView()
view.add(["GET"], get_handler)
view.add(["POST", "PUT"], lambda request: text("I am a post/put method"))
# Use the new view to handle requests to the base URL
app.add_route(view, "/")
```
Seems much more confusing to me than
```python
def get_handler(request):
return text("I am a get method")
app.route("/", methods=["GET"])(get_handler)
app.route("/", methods=["POST", "PUT"])(lambda request: text("I am a post/put method"))
```
Can anyone offer a compelling use case for CompositionView?
If not, I would suggest to deprecate it
https://github.com/sanic-org/sanic/blob/master/sanic/views.py
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sanic/views.py`
Content:
```
1 from typing import Any, Callable, List
2
3 from sanic.constants import HTTP_METHODS
4 from sanic.exceptions import InvalidUsage
5
6
7 class HTTPMethodView:
8 """Simple class based implementation of view for the sanic.
9 You should implement methods (get, post, put, patch, delete) for the class
10 to every HTTP method you want to support.
11
12 For example:
13
14 .. code-block:: python
15
16 class DummyView(HTTPMethodView):
17 def get(self, request, *args, **kwargs):
18 return text('I am get method')
19 def put(self, request, *args, **kwargs):
20 return text('I am put method')
21
22 If someone tries to use a non-implemented method, there will be a
23 405 response.
24
25 If you need any url params just mention them in method definition:
26
27 .. code-block:: python
28
29 class DummyView(HTTPMethodView):
30 def get(self, request, my_param_here, *args, **kwargs):
31 return text('I am get method with %s' % my_param_here)
32
33 To add the view into the routing you could use
34
35 1) ``app.add_route(DummyView.as_view(), '/')``, OR
36 2) ``app.route('/')(DummyView.as_view())``
37
38 To add any decorator you could set it into decorators variable
39 """
40
41 decorators: List[Callable[[Callable[..., Any]], Callable[..., Any]]] = []
42
43 def dispatch_request(self, request, *args, **kwargs):
44 handler = getattr(self, request.method.lower(), None)
45 return handler(request, *args, **kwargs)
46
47 @classmethod
48 def as_view(cls, *class_args, **class_kwargs):
49 """Return view function for use with the routing system, that
50 dispatches request to appropriate handler method.
51 """
52
53 def view(*args, **kwargs):
54 self = view.view_class(*class_args, **class_kwargs)
55 return self.dispatch_request(*args, **kwargs)
56
57 if cls.decorators:
58 view.__module__ = cls.__module__
59 for decorator in cls.decorators:
60 view = decorator(view)
61
62 view.view_class = cls
63 view.__doc__ = cls.__doc__
64 view.__module__ = cls.__module__
65 view.__name__ = cls.__name__
66 return view
67
68
69 def stream(func):
70 func.is_stream = True
71 return func
72
73
74 class CompositionView:
75 """Simple method-function mapped view for the sanic.
76 You can add handler functions to methods (get, post, put, patch, delete)
77 for every HTTP method you want to support.
78
79 For example:
80
81 .. code-block:: python
82
83 view = CompositionView()
84 view.add(['GET'], lambda request: text('I am get method'))
85 view.add(['POST', 'PUT'], lambda request: text('I am post/put method'))
86
87 If someone tries to use a non-implemented method, there will be a
88 405 response.
89 """
90
91 def __init__(self):
92 self.handlers = {}
93 self.name = self.__class__.__name__
94
95 def __name__(self):
96 return self.name
97
98 def add(self, methods, handler, stream=False):
99 if stream:
100 handler.is_stream = stream
101 for method in methods:
102 if method not in HTTP_METHODS:
103 raise InvalidUsage(f"{method} is not a valid HTTP method.")
104
105 if method in self.handlers:
106 raise InvalidUsage(f"Method {method} is already registered.")
107 self.handlers[method] = handler
108
109 def __call__(self, request, *args, **kwargs):
110 handler = self.handlers[request.method.upper()]
111 return handler(request, *args, **kwargs)
112
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sanic/views.py b/sanic/views.py
--- a/sanic/views.py
+++ b/sanic/views.py
@@ -1,9 +1,25 @@
-from typing import Any, Callable, List
+from __future__ import annotations
+
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Callable,
+ Iterable,
+ List,
+ Optional,
+ Union,
+)
+from warnings import warn
from sanic.constants import HTTP_METHODS
from sanic.exceptions import InvalidUsage
+if TYPE_CHECKING:
+ from sanic import Sanic
+ from sanic.blueprints import Blueprint
+
+
class HTTPMethodView:
"""Simple class based implementation of view for the sanic.
You should implement methods (get, post, put, patch, delete) for the class
@@ -40,6 +56,31 @@
decorators: List[Callable[[Callable[..., Any]], Callable[..., Any]]] = []
+ def __init_subclass__(
+ cls,
+ attach: Optional[Union[Sanic, Blueprint]] = None,
+ uri: str = "",
+ methods: Iterable[str] = frozenset({"GET"}),
+ host: Optional[str] = None,
+ strict_slashes: Optional[bool] = None,
+ version: Optional[int] = None,
+ name: Optional[str] = None,
+ stream: bool = False,
+ version_prefix: str = "/v",
+ ) -> None:
+ if attach:
+ cls.attach(
+ attach,
+ uri=uri,
+ methods=methods,
+ host=host,
+ strict_slashes=strict_slashes,
+ version=version,
+ name=name,
+ stream=stream,
+ version_prefix=version_prefix,
+ )
+
def dispatch_request(self, request, *args, **kwargs):
handler = getattr(self, request.method.lower(), None)
return handler(request, *args, **kwargs)
@@ -65,6 +106,31 @@
view.__name__ = cls.__name__
return view
+ @classmethod
+ def attach(
+ cls,
+ to: Union[Sanic, Blueprint],
+ uri: str,
+ methods: Iterable[str] = frozenset({"GET"}),
+ host: Optional[str] = None,
+ strict_slashes: Optional[bool] = None,
+ version: Optional[int] = None,
+ name: Optional[str] = None,
+ stream: bool = False,
+ version_prefix: str = "/v",
+ ) -> None:
+ to.add_route(
+ cls.as_view(),
+ uri=uri,
+ methods=methods,
+ host=host,
+ strict_slashes=strict_slashes,
+ version=version,
+ name=name,
+ stream=stream,
+ version_prefix=version_prefix,
+ )
+
def stream(func):
func.is_stream = True
@@ -91,6 +157,11 @@
def __init__(self):
self.handlers = {}
self.name = self.__class__.__name__
+ warn(
+ "CompositionView has been deprecated and will be removed in "
+ "v21.12. Please update your view to HTTPMethodView.",
+ DeprecationWarning,
+ )
def __name__(self):
return self.name
| {"golden_diff": "diff --git a/sanic/views.py b/sanic/views.py\n--- a/sanic/views.py\n+++ b/sanic/views.py\n@@ -1,9 +1,25 @@\n-from typing import Any, Callable, List\n+from __future__ import annotations\n+\n+from typing import (\n+ TYPE_CHECKING,\n+ Any,\n+ Callable,\n+ Iterable,\n+ List,\n+ Optional,\n+ Union,\n+)\n+from warnings import warn\n \n from sanic.constants import HTTP_METHODS\n from sanic.exceptions import InvalidUsage\n \n \n+if TYPE_CHECKING:\n+ from sanic import Sanic\n+ from sanic.blueprints import Blueprint\n+\n+\n class HTTPMethodView:\n \"\"\"Simple class based implementation of view for the sanic.\n You should implement methods (get, post, put, patch, delete) for the class\n@@ -40,6 +56,31 @@\n \n decorators: List[Callable[[Callable[..., Any]], Callable[..., Any]]] = []\n \n+ def __init_subclass__(\n+ cls,\n+ attach: Optional[Union[Sanic, Blueprint]] = None,\n+ uri: str = \"\",\n+ methods: Iterable[str] = frozenset({\"GET\"}),\n+ host: Optional[str] = None,\n+ strict_slashes: Optional[bool] = None,\n+ version: Optional[int] = None,\n+ name: Optional[str] = None,\n+ stream: bool = False,\n+ version_prefix: str = \"/v\",\n+ ) -> None:\n+ if attach:\n+ cls.attach(\n+ attach,\n+ uri=uri,\n+ methods=methods,\n+ host=host,\n+ strict_slashes=strict_slashes,\n+ version=version,\n+ name=name,\n+ stream=stream,\n+ version_prefix=version_prefix,\n+ )\n+\n def dispatch_request(self, request, *args, **kwargs):\n handler = getattr(self, request.method.lower(), None)\n return handler(request, *args, **kwargs)\n@@ -65,6 +106,31 @@\n view.__name__ = cls.__name__\n return view\n \n+ @classmethod\n+ def attach(\n+ cls,\n+ to: Union[Sanic, Blueprint],\n+ uri: str,\n+ methods: Iterable[str] = frozenset({\"GET\"}),\n+ host: Optional[str] = None,\n+ strict_slashes: Optional[bool] = None,\n+ version: Optional[int] = None,\n+ name: Optional[str] = None,\n+ stream: bool = False,\n+ version_prefix: str = \"/v\",\n+ ) -> None:\n+ to.add_route(\n+ cls.as_view(),\n+ uri=uri,\n+ methods=methods,\n+ host=host,\n+ strict_slashes=strict_slashes,\n+ version=version,\n+ name=name,\n+ stream=stream,\n+ version_prefix=version_prefix,\n+ )\n+\n \n def stream(func):\n func.is_stream = True\n@@ -91,6 +157,11 @@\n def __init__(self):\n self.handlers = {}\n self.name = self.__class__.__name__\n+ warn(\n+ \"CompositionView has been deprecated and will be removed in \"\n+ \"v21.12. Please update your view to HTTPMethodView.\",\n+ DeprecationWarning,\n+ )\n \n def __name__(self):\n return self.name\n", "issue": "deprecate CompositionView ? \nCurrently sanic offers a class called `CompositionView`\r\n\r\nI really am struggling to find any utility in this class, since \r\n\r\n```python\r\nfrom sanic.views import CompositionView\r\n\r\ndef get_handler(request):\r\n return text(\"I am a get method\")\r\n\r\nview = CompositionView()\r\nview.add([\"GET\"], get_handler)\r\nview.add([\"POST\", \"PUT\"], lambda request: text(\"I am a post/put method\"))\r\n\r\n# Use the new view to handle requests to the base URL\r\napp.add_route(view, \"/\")\r\n```\r\n\r\n\r\nSeems much more confusing to me than\r\n\r\n```python\r\ndef get_handler(request):\r\n return text(\"I am a get method\")\r\n\r\napp.route(\"/\", methods=[\"GET\"])(get_handler)\r\napp.route(\"/\", methods=[\"POST\", \"PUT\"])(lambda request: text(\"I am a post/put method\"))\r\n```\r\n\r\nCan anyone offer a compelling use case for CompositionView?\r\n\r\nIf not, I would suggest to deprecate it \r\n\r\n\r\nhttps://github.com/sanic-org/sanic/blob/master/sanic/views.py\n", "before_files": [{"content": "from typing import Any, Callable, List\n\nfrom sanic.constants import HTTP_METHODS\nfrom sanic.exceptions import InvalidUsage\n\n\nclass HTTPMethodView:\n \"\"\"Simple class based implementation of view for the sanic.\n You should implement methods (get, post, put, patch, delete) for the class\n to every HTTP method you want to support.\n\n For example:\n\n .. code-block:: python\n\n class DummyView(HTTPMethodView):\n def get(self, request, *args, **kwargs):\n return text('I am get method')\n def put(self, request, *args, **kwargs):\n return text('I am put method')\n\n If someone tries to use a non-implemented method, there will be a\n 405 response.\n\n If you need any url params just mention them in method definition:\n\n .. code-block:: python\n\n class DummyView(HTTPMethodView):\n def get(self, request, my_param_here, *args, **kwargs):\n return text('I am get method with %s' % my_param_here)\n\n To add the view into the routing you could use\n\n 1) ``app.add_route(DummyView.as_view(), '/')``, OR\n 2) ``app.route('/')(DummyView.as_view())``\n\n To add any decorator you could set it into decorators variable\n \"\"\"\n\n decorators: List[Callable[[Callable[..., Any]], Callable[..., Any]]] = []\n\n def dispatch_request(self, request, *args, **kwargs):\n handler = getattr(self, request.method.lower(), None)\n return handler(request, *args, **kwargs)\n\n @classmethod\n def as_view(cls, *class_args, **class_kwargs):\n \"\"\"Return view function for use with the routing system, that\n dispatches request to appropriate handler method.\n \"\"\"\n\n def view(*args, **kwargs):\n self = view.view_class(*class_args, **class_kwargs)\n return self.dispatch_request(*args, **kwargs)\n\n if cls.decorators:\n view.__module__ = cls.__module__\n for decorator in cls.decorators:\n view = decorator(view)\n\n view.view_class = cls\n view.__doc__ = cls.__doc__\n view.__module__ = cls.__module__\n view.__name__ = cls.__name__\n return view\n\n\ndef stream(func):\n func.is_stream = True\n return func\n\n\nclass CompositionView:\n \"\"\"Simple method-function mapped view for the sanic.\n You can add handler functions to methods (get, post, put, patch, delete)\n for every HTTP method you want to support.\n\n For example:\n\n .. code-block:: python\n\n view = CompositionView()\n view.add(['GET'], lambda request: text('I am get method'))\n view.add(['POST', 'PUT'], lambda request: text('I am post/put method'))\n\n If someone tries to use a non-implemented method, there will be a\n 405 response.\n \"\"\"\n\n def __init__(self):\n self.handlers = {}\n self.name = self.__class__.__name__\n\n def __name__(self):\n return self.name\n\n def add(self, methods, handler, stream=False):\n if stream:\n handler.is_stream = stream\n for method in methods:\n if method not in HTTP_METHODS:\n raise InvalidUsage(f\"{method} is not a valid HTTP method.\")\n\n if method in self.handlers:\n raise InvalidUsage(f\"Method {method} is already registered.\")\n self.handlers[method] = handler\n\n def __call__(self, request, *args, **kwargs):\n handler = self.handlers[request.method.upper()]\n return handler(request, *args, **kwargs)\n", "path": "sanic/views.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Iterable,\n List,\n Optional,\n Union,\n)\nfrom warnings import warn\n\nfrom sanic.constants import HTTP_METHODS\nfrom sanic.exceptions import InvalidUsage\n\n\nif TYPE_CHECKING:\n from sanic import Sanic\n from sanic.blueprints import Blueprint\n\n\nclass HTTPMethodView:\n \"\"\"Simple class based implementation of view for the sanic.\n You should implement methods (get, post, put, patch, delete) for the class\n to every HTTP method you want to support.\n\n For example:\n\n .. code-block:: python\n\n class DummyView(HTTPMethodView):\n def get(self, request, *args, **kwargs):\n return text('I am get method')\n def put(self, request, *args, **kwargs):\n return text('I am put method')\n\n If someone tries to use a non-implemented method, there will be a\n 405 response.\n\n If you need any url params just mention them in method definition:\n\n .. code-block:: python\n\n class DummyView(HTTPMethodView):\n def get(self, request, my_param_here, *args, **kwargs):\n return text('I am get method with %s' % my_param_here)\n\n To add the view into the routing you could use\n\n 1) ``app.add_route(DummyView.as_view(), '/')``, OR\n 2) ``app.route('/')(DummyView.as_view())``\n\n To add any decorator you could set it into decorators variable\n \"\"\"\n\n decorators: List[Callable[[Callable[..., Any]], Callable[..., Any]]] = []\n\n def __init_subclass__(\n cls,\n attach: Optional[Union[Sanic, Blueprint]] = None,\n uri: str = \"\",\n methods: Iterable[str] = frozenset({\"GET\"}),\n host: Optional[str] = None,\n strict_slashes: Optional[bool] = None,\n version: Optional[int] = None,\n name: Optional[str] = None,\n stream: bool = False,\n version_prefix: str = \"/v\",\n ) -> None:\n if attach:\n cls.attach(\n attach,\n uri=uri,\n methods=methods,\n host=host,\n strict_slashes=strict_slashes,\n version=version,\n name=name,\n stream=stream,\n version_prefix=version_prefix,\n )\n\n def dispatch_request(self, request, *args, **kwargs):\n handler = getattr(self, request.method.lower(), None)\n return handler(request, *args, **kwargs)\n\n @classmethod\n def as_view(cls, *class_args, **class_kwargs):\n \"\"\"Return view function for use with the routing system, that\n dispatches request to appropriate handler method.\n \"\"\"\n\n def view(*args, **kwargs):\n self = view.view_class(*class_args, **class_kwargs)\n return self.dispatch_request(*args, **kwargs)\n\n if cls.decorators:\n view.__module__ = cls.__module__\n for decorator in cls.decorators:\n view = decorator(view)\n\n view.view_class = cls\n view.__doc__ = cls.__doc__\n view.__module__ = cls.__module__\n view.__name__ = cls.__name__\n return view\n\n @classmethod\n def attach(\n cls,\n to: Union[Sanic, Blueprint],\n uri: str,\n methods: Iterable[str] = frozenset({\"GET\"}),\n host: Optional[str] = None,\n strict_slashes: Optional[bool] = None,\n version: Optional[int] = None,\n name: Optional[str] = None,\n stream: bool = False,\n version_prefix: str = \"/v\",\n ) -> None:\n to.add_route(\n cls.as_view(),\n uri=uri,\n methods=methods,\n host=host,\n strict_slashes=strict_slashes,\n version=version,\n name=name,\n stream=stream,\n version_prefix=version_prefix,\n )\n\n\ndef stream(func):\n func.is_stream = True\n return func\n\n\nclass CompositionView:\n \"\"\"Simple method-function mapped view for the sanic.\n You can add handler functions to methods (get, post, put, patch, delete)\n for every HTTP method you want to support.\n\n For example:\n\n .. code-block:: python\n\n view = CompositionView()\n view.add(['GET'], lambda request: text('I am get method'))\n view.add(['POST', 'PUT'], lambda request: text('I am post/put method'))\n\n If someone tries to use a non-implemented method, there will be a\n 405 response.\n \"\"\"\n\n def __init__(self):\n self.handlers = {}\n self.name = self.__class__.__name__\n warn(\n \"CompositionView has been deprecated and will be removed in \"\n \"v21.12. Please update your view to HTTPMethodView.\",\n DeprecationWarning,\n )\n\n def __name__(self):\n return self.name\n\n def add(self, methods, handler, stream=False):\n if stream:\n handler.is_stream = stream\n for method in methods:\n if method not in HTTP_METHODS:\n raise InvalidUsage(f\"{method} is not a valid HTTP method.\")\n\n if method in self.handlers:\n raise InvalidUsage(f\"Method {method} is already registered.\")\n self.handlers[method] = handler\n\n def __call__(self, request, *args, **kwargs):\n handler = self.handlers[request.method.upper()]\n return handler(request, *args, **kwargs)\n", "path": "sanic/views.py"}]} | 1,516 | 757 |
gh_patches_debug_12631 | rasdani/github-patches | git_diff | cupy__cupy-6118 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Indexing with assignment between broadcastable arrays is inconsistent with NumPy
When performing `A[idx, ...] = B` with `B` broadcastable over `A[idx, ...]` (so no storage expansion for `A[idx, ...]` is necessary) with `B.ndim > A.ndim` CuPy throws a shape mismatch error while NumPy handles this case.
* Code to reproduce
```python
In [1]: import numpy
In [2]: import cupy
In [3]: def test(module):
...: x = module.zeros((3, 3, 3))
...: y = module.ones((1, 3, 3))
...: x[0, ...] = y
...: return x
...:
...:
In [4]: test(numpy)
Out[4]:
array([[[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]],
[[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]],
[[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]]])
In [5]: test(cupy)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-5-3f849ce2914e> in <module>()
----> 1 test(cupy)
<ipython-input-3-450cff366473> in test(module)
2 x = module.zeros((3, 3, 3))
3 y = module.ones((1, 3, 3))
----> 4 x[0, ...] = y
5 return x
cupy/_core/core.pyx in cupy._core.core.ndarray.__setitem__()
cupy/_core/_routines_indexing.pyx in cupy._core._routines_indexing._ndarray_setitem()
cupy/_core/_routines_indexing.pyx in cupy._core._routines_indexing._scatter_op()
cupy/_core/_kernel.pyx in cupy._core._kernel.ufunc.__call__()
cupy/_core/_kernel.pyx in cupy._core._kernel._get_out_args()
ValueError: Out shape is mismatched
```
* Conditions
```
OS : Linux-5.4.0-81-generic-x86_64-with-debian-bullseye-sid
Python Version : 3.6.7
CuPy Version : 9.5.0
CuPy Platform : NVIDIA CUDA
NumPy Version : 1.19.5
SciPy Version : None
Cython Build Version : 0.29.24
Cython Runtime Version : None
CUDA Root : /home/nik/.conda/envs/pytorch-cuda-dev
nvcc PATH : /home/nik/.conda/envs/pytorch-cuda-dev/bin/nvcc
CUDA Build Version : 11020
CUDA Driver Version : 11030
CUDA Runtime Version : 11020
cuBLAS Version : (available)
cuFFT Version : 10401
cuRAND Version : 10203
cuSOLVER Version : (11, 1, 0)
cuSPARSE Version : (available)
NVRTC Version : (11, 2)
Thrust Version : 101000
CUB Build Version : 101000
Jitify Build Version : <unknown>
cuDNN Build Version : 8201
cuDNN Version : 8004
NCCL Build Version : None
NCCL Runtime Version : None
cuTENSOR Version : None
cuSPARSELt Build Version : None
Device 0 Name : NVIDIA GeForce RTX 2060
Device 0 Compute Capability : 75
Device 0 PCI Bus ID : 0000:01:00.0
Device 1 Name : NVIDIA GeForce RTX 2060
Device 1 Compute Capability : 75
Device 1 PCI Bus ID : 0000:21:00.0
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cupy/_manipulation/basic.py`
Content:
```
1 import numpy
2
3 from cupy import _core
4 from cupy._core import _fusion_interface
5 from cupy._core import fusion
6 from cupy._sorting import search
7 from cupy_backends.cuda.api import runtime
8
9
10 def copyto(dst, src, casting='same_kind', where=None):
11 """Copies values from one array to another with broadcasting.
12
13 This function can be called for arrays on different devices. In this case,
14 casting, ``where``, and broadcasting is not supported, and an exception is
15 raised if these are used.
16
17 Args:
18 dst (cupy.ndarray): Target array.
19 src (cupy.ndarray): Source array.
20 casting (str): Casting rule. See :func:`numpy.can_cast` for detail.
21 where (cupy.ndarray of bool): If specified, this array acts as a mask,
22 and an element is copied only if the corresponding element of
23 ``where`` is True.
24
25 .. seealso:: :func:`numpy.copyto`
26
27 """
28
29 src_type = type(src)
30 src_is_python_scalar = src_type in (
31 int, bool, float, complex,
32 fusion._FusionVarScalar, _fusion_interface._ScalarProxy)
33 if src_is_python_scalar:
34 src_dtype = numpy.dtype(type(src))
35 can_cast = numpy.can_cast(src, dst.dtype, casting)
36 else:
37 src_dtype = src.dtype
38 can_cast = numpy.can_cast(src_dtype, dst.dtype, casting)
39
40 if not can_cast:
41 raise TypeError('Cannot cast %s to %s in %s casting mode' %
42 (src_dtype, dst.dtype, casting))
43 if fusion._is_fusing():
44 if where is None:
45 _core.elementwise_copy(src, dst)
46 else:
47 fusion._call_ufunc(search._where_ufunc, where, src, dst, dst)
48 return
49
50 if where is not None:
51 _core.elementwise_copy(src, dst, _where=where)
52 return
53
54 if dst.size == 0:
55 return
56
57 if src_is_python_scalar:
58 dst.fill(src)
59 return
60
61 if _can_memcpy(dst, src):
62 dst.data.copy_from_async(src.data, src.nbytes)
63 return
64
65 device = dst.device
66 prev_device = runtime.getDevice()
67 try:
68 runtime.setDevice(device.id)
69 if src.device != device:
70 src = src.copy()
71 _core.elementwise_copy(src, dst)
72 finally:
73 runtime.setDevice(prev_device)
74
75
76 def _can_memcpy(dst, src):
77 c_contiguous = dst.flags.c_contiguous and src.flags.c_contiguous
78 f_contiguous = dst.flags.f_contiguous and src.flags.f_contiguous
79 return (c_contiguous or f_contiguous) and dst.dtype == src.dtype and \
80 dst.size == src.size
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cupy/_manipulation/basic.py b/cupy/_manipulation/basic.py
--- a/cupy/_manipulation/basic.py
+++ b/cupy/_manipulation/basic.py
@@ -40,6 +40,16 @@
if not can_cast:
raise TypeError('Cannot cast %s to %s in %s casting mode' %
(src_dtype, dst.dtype, casting))
+
+ if not src_is_python_scalar and src.ndim > dst.ndim:
+ # NumPy allows stripping leading unit dimensions.
+ try:
+ src = src.squeeze(tuple(range(src.ndim - dst.ndim)))
+ except ValueError:
+ # "cannot select an axis to squeeze out
+ # which has size not equal to one"
+ pass # raise an error later
+
if fusion._is_fusing():
if where is None:
_core.elementwise_copy(src, dst)
| {"golden_diff": "diff --git a/cupy/_manipulation/basic.py b/cupy/_manipulation/basic.py\n--- a/cupy/_manipulation/basic.py\n+++ b/cupy/_manipulation/basic.py\n@@ -40,6 +40,16 @@\n if not can_cast:\n raise TypeError('Cannot cast %s to %s in %s casting mode' %\n (src_dtype, dst.dtype, casting))\n+\n+ if not src_is_python_scalar and src.ndim > dst.ndim:\n+ # NumPy allows stripping leading unit dimensions.\n+ try:\n+ src = src.squeeze(tuple(range(src.ndim - dst.ndim)))\n+ except ValueError:\n+ # \"cannot select an axis to squeeze out\n+ # which has size not equal to one\"\n+ pass # raise an error later\n+\n if fusion._is_fusing():\n if where is None:\n _core.elementwise_copy(src, dst)\n", "issue": "Indexing with assignment between broadcastable arrays is inconsistent with NumPy\nWhen performing `A[idx, ...] = B` with `B` broadcastable over `A[idx, ...]` (so no storage expansion for `A[idx, ...]` is necessary) with `B.ndim > A.ndim` CuPy throws a shape mismatch error while NumPy handles this case.\r\n\r\n* Code to reproduce\r\n```python\r\nIn [1]: import numpy\r\n\r\nIn [2]: import cupy\r\n\r\nIn [3]: def test(module):\r\n ...: x = module.zeros((3, 3, 3))\r\n ...: y = module.ones((1, 3, 3))\r\n ...: x[0, ...] = y\r\n ...: return x\r\n ...: \r\n ...: \r\n\r\nIn [4]: test(numpy)\r\nOut[4]: \r\narray([[[1., 1., 1.],\r\n [1., 1., 1.],\r\n [1., 1., 1.]],\r\n\r\n [[0., 0., 0.],\r\n [0., 0., 0.],\r\n [0., 0., 0.]],\r\n\r\n [[0., 0., 0.],\r\n [0., 0., 0.],\r\n [0., 0., 0.]]])\r\n\r\nIn [5]: test(cupy)\r\n---------------------------------------------------------------------------\r\nValueError Traceback (most recent call last)\r\n<ipython-input-5-3f849ce2914e> in <module>()\r\n----> 1 test(cupy)\r\n\r\n<ipython-input-3-450cff366473> in test(module)\r\n 2 x = module.zeros((3, 3, 3))\r\n 3 y = module.ones((1, 3, 3))\r\n----> 4 x[0, ...] = y\r\n 5 return x\r\n\r\ncupy/_core/core.pyx in cupy._core.core.ndarray.__setitem__()\r\n\r\ncupy/_core/_routines_indexing.pyx in cupy._core._routines_indexing._ndarray_setitem()\r\n\r\ncupy/_core/_routines_indexing.pyx in cupy._core._routines_indexing._scatter_op()\r\n\r\ncupy/_core/_kernel.pyx in cupy._core._kernel.ufunc.__call__()\r\n\r\ncupy/_core/_kernel.pyx in cupy._core._kernel._get_out_args()\r\n\r\nValueError: Out shape is mismatched\r\n\r\n```\r\n\r\n* Conditions\r\n```\r\nOS : Linux-5.4.0-81-generic-x86_64-with-debian-bullseye-sid\r\nPython Version : 3.6.7\r\nCuPy Version : 9.5.0\r\nCuPy Platform : NVIDIA CUDA\r\nNumPy Version : 1.19.5\r\nSciPy Version : None\r\nCython Build Version : 0.29.24\r\nCython Runtime Version : None\r\nCUDA Root : /home/nik/.conda/envs/pytorch-cuda-dev\r\nnvcc PATH : /home/nik/.conda/envs/pytorch-cuda-dev/bin/nvcc\r\nCUDA Build Version : 11020\r\nCUDA Driver Version : 11030\r\nCUDA Runtime Version : 11020\r\ncuBLAS Version : (available)\r\ncuFFT Version : 10401\r\ncuRAND Version : 10203\r\ncuSOLVER Version : (11, 1, 0)\r\ncuSPARSE Version : (available)\r\nNVRTC Version : (11, 2)\r\nThrust Version : 101000\r\nCUB Build Version : 101000\r\nJitify Build Version : <unknown>\r\ncuDNN Build Version : 8201\r\ncuDNN Version : 8004\r\nNCCL Build Version : None\r\nNCCL Runtime Version : None\r\ncuTENSOR Version : None\r\ncuSPARSELt Build Version : None\r\nDevice 0 Name : NVIDIA GeForce RTX 2060\r\nDevice 0 Compute Capability : 75\r\nDevice 0 PCI Bus ID : 0000:01:00.0\r\nDevice 1 Name : NVIDIA GeForce RTX 2060\r\nDevice 1 Compute Capability : 75\r\nDevice 1 PCI Bus ID : 0000:21:00.0\r\n```\r\n\n", "before_files": [{"content": "import numpy\n\nfrom cupy import _core\nfrom cupy._core import _fusion_interface\nfrom cupy._core import fusion\nfrom cupy._sorting import search\nfrom cupy_backends.cuda.api import runtime\n\n\ndef copyto(dst, src, casting='same_kind', where=None):\n \"\"\"Copies values from one array to another with broadcasting.\n\n This function can be called for arrays on different devices. In this case,\n casting, ``where``, and broadcasting is not supported, and an exception is\n raised if these are used.\n\n Args:\n dst (cupy.ndarray): Target array.\n src (cupy.ndarray): Source array.\n casting (str): Casting rule. See :func:`numpy.can_cast` for detail.\n where (cupy.ndarray of bool): If specified, this array acts as a mask,\n and an element is copied only if the corresponding element of\n ``where`` is True.\n\n .. seealso:: :func:`numpy.copyto`\n\n \"\"\"\n\n src_type = type(src)\n src_is_python_scalar = src_type in (\n int, bool, float, complex,\n fusion._FusionVarScalar, _fusion_interface._ScalarProxy)\n if src_is_python_scalar:\n src_dtype = numpy.dtype(type(src))\n can_cast = numpy.can_cast(src, dst.dtype, casting)\n else:\n src_dtype = src.dtype\n can_cast = numpy.can_cast(src_dtype, dst.dtype, casting)\n\n if not can_cast:\n raise TypeError('Cannot cast %s to %s in %s casting mode' %\n (src_dtype, dst.dtype, casting))\n if fusion._is_fusing():\n if where is None:\n _core.elementwise_copy(src, dst)\n else:\n fusion._call_ufunc(search._where_ufunc, where, src, dst, dst)\n return\n\n if where is not None:\n _core.elementwise_copy(src, dst, _where=where)\n return\n\n if dst.size == 0:\n return\n\n if src_is_python_scalar:\n dst.fill(src)\n return\n\n if _can_memcpy(dst, src):\n dst.data.copy_from_async(src.data, src.nbytes)\n return\n\n device = dst.device\n prev_device = runtime.getDevice()\n try:\n runtime.setDevice(device.id)\n if src.device != device:\n src = src.copy()\n _core.elementwise_copy(src, dst)\n finally:\n runtime.setDevice(prev_device)\n\n\ndef _can_memcpy(dst, src):\n c_contiguous = dst.flags.c_contiguous and src.flags.c_contiguous\n f_contiguous = dst.flags.f_contiguous and src.flags.f_contiguous\n return (c_contiguous or f_contiguous) and dst.dtype == src.dtype and \\\n dst.size == src.size\n", "path": "cupy/_manipulation/basic.py"}], "after_files": [{"content": "import numpy\n\nfrom cupy import _core\nfrom cupy._core import _fusion_interface\nfrom cupy._core import fusion\nfrom cupy._sorting import search\nfrom cupy_backends.cuda.api import runtime\n\n\ndef copyto(dst, src, casting='same_kind', where=None):\n \"\"\"Copies values from one array to another with broadcasting.\n\n This function can be called for arrays on different devices. In this case,\n casting, ``where``, and broadcasting is not supported, and an exception is\n raised if these are used.\n\n Args:\n dst (cupy.ndarray): Target array.\n src (cupy.ndarray): Source array.\n casting (str): Casting rule. See :func:`numpy.can_cast` for detail.\n where (cupy.ndarray of bool): If specified, this array acts as a mask,\n and an element is copied only if the corresponding element of\n ``where`` is True.\n\n .. seealso:: :func:`numpy.copyto`\n\n \"\"\"\n\n src_type = type(src)\n src_is_python_scalar = src_type in (\n int, bool, float, complex,\n fusion._FusionVarScalar, _fusion_interface._ScalarProxy)\n if src_is_python_scalar:\n src_dtype = numpy.dtype(type(src))\n can_cast = numpy.can_cast(src, dst.dtype, casting)\n else:\n src_dtype = src.dtype\n can_cast = numpy.can_cast(src_dtype, dst.dtype, casting)\n\n if not can_cast:\n raise TypeError('Cannot cast %s to %s in %s casting mode' %\n (src_dtype, dst.dtype, casting))\n\n if not src_is_python_scalar and src.ndim > dst.ndim:\n # NumPy allows stripping leading unit dimensions.\n try:\n src = src.squeeze(tuple(range(src.ndim - dst.ndim)))\n except ValueError:\n # \"cannot select an axis to squeeze out\n # which has size not equal to one\"\n pass # raise an error later\n\n if fusion._is_fusing():\n if where is None:\n _core.elementwise_copy(src, dst)\n else:\n fusion._call_ufunc(search._where_ufunc, where, src, dst, dst)\n return\n\n if where is not None:\n _core.elementwise_copy(src, dst, _where=where)\n return\n\n if dst.size == 0:\n return\n\n if src_is_python_scalar:\n dst.fill(src)\n return\n\n if _can_memcpy(dst, src):\n dst.data.copy_from_async(src.data, src.nbytes)\n return\n\n device = dst.device\n prev_device = runtime.getDevice()\n try:\n runtime.setDevice(device.id)\n if src.device != device:\n src = src.copy()\n _core.elementwise_copy(src, dst)\n finally:\n runtime.setDevice(prev_device)\n\n\ndef _can_memcpy(dst, src):\n c_contiguous = dst.flags.c_contiguous and src.flags.c_contiguous\n f_contiguous = dst.flags.f_contiguous and src.flags.f_contiguous\n return (c_contiguous or f_contiguous) and dst.dtype == src.dtype and \\\n dst.size == src.size\n", "path": "cupy/_manipulation/basic.py"}]} | 2,017 | 199 |
gh_patches_debug_43430 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3346 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider jackinthebox is broken
During the global build at 2021-06-23-14-42-18, spider **jackinthebox** failed with **0 features** and **1 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-06-23-14-42-18/logs/jackinthebox.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-23-14-42-18/output/jackinthebox.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-23-14-42-18/output/jackinthebox.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/jackinthebox.py`
Content:
```
1 import json
2 import re
3 import scrapy
4 from locations.items import GeojsonPointItem
5
6 class JackInTheBoxSpider(scrapy.Spider):
7 name = "jackinthebox"
8 item_attributes = { 'brand': "Jack In The Box" }
9 allowed_domains = ["jackinthebox.com"]
10 start_urls = (
11 "https://www.jackinthebox.com/api/locations",
12 )
13 dayMap = {
14 'monday': 'Mo',
15 'tuesday': 'Tu',
16 'wednesday': 'We',
17 'thursday': 'Th',
18 'friday': 'Fr',
19 'saturday': 'Sa',
20 'sunday': 'Su'
21 }
22 def opening_hours(self, days_hours):
23 day_groups = []
24 this_day_group = None
25 for day_hours in days_hours:
26 day = day_hours[0]
27 hours = day_hours[1]
28 match = re.search(r'^(\d{1,2}):(\d{2})\w*(a|p)m-(\d{1,2}):(\d{2})\w*(a|p)m?$', hours)
29 (f_hr, f_min, f_ampm, t_hr, t_min, t_ampm) = match.groups()
30
31 f_hr = int(f_hr)
32 if f_ampm == 'p':
33 f_hr += 12
34 elif f_ampm == 'a' and f_hr == 12:
35 f_hr = 0
36 t_hr = int(t_hr)
37 if t_ampm == 'p':
38 t_hr += 12
39 elif t_ampm == 'a' and t_hr == 12:
40 t_hr = 0
41
42 hours = '{:02d}:{}-{:02d}:{}'.format(
43 f_hr,
44 f_min,
45 t_hr,
46 t_min,
47 )
48
49 if not this_day_group:
50 this_day_group = {
51 'from_day': day,
52 'to_day': day,
53 'hours': hours
54 }
55 elif this_day_group['hours'] != hours:
56 day_groups.append(this_day_group)
57 this_day_group = {
58 'from_day': day,
59 'to_day': day,
60 'hours': hours
61 }
62 elif this_day_group['hours'] == hours:
63 this_day_group['to_day'] = day
64
65 day_groups.append(this_day_group)
66
67 opening_hours = ""
68 if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):
69 opening_hours = '24/7'
70 else:
71 for day_group in day_groups:
72 if day_group['from_day'] == day_group['to_day']:
73 opening_hours += '{from_day} {hours}; '.format(**day_group)
74 elif day_group['from_day'] == 'Su' and day_group['to_day'] == 'Sa':
75 opening_hours += '{hours}; '.format(**day_group)
76 else:
77 opening_hours += '{from_day}-{to_day} {hours}; '.format(**day_group)
78 opening_hours = opening_hours[:-2]
79
80 return opening_hours
81
82 def parse(self, response):
83 stores = json.loads(response.body_as_unicode())
84 for store in stores:
85 properties = {
86 'ref': store['id'],
87 'addr_full': store['address'],
88 'city': store['city'],
89 'state': store['state'],
90 'postcode': store['postal'],
91 'lat': store['lat'],
92 'lon': store['lng'],
93 'phone': store['phone'],
94 }
95
96 if store['twentyfourhours']:
97 properties['opening_hours'] = '24/7'
98 elif 'hours' in store:
99 hours = store['hours']
100 if not all(hours[d] == '' for d in hours):
101 days_hours = []
102 for day in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']:
103 days_hours.append([
104 self.dayMap[day],
105 hours[day].lower().replace(' ', '')
106 ])
107 properties['opening_hours'] = self.opening_hours(days_hours)
108
109 yield GeojsonPointItem(**properties)
110
111
112
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/jackinthebox.py b/locations/spiders/jackinthebox.py
--- a/locations/spiders/jackinthebox.py
+++ b/locations/spiders/jackinthebox.py
@@ -11,13 +11,13 @@
"https://www.jackinthebox.com/api/locations",
)
dayMap = {
- 'monday': 'Mo',
- 'tuesday': 'Tu',
- 'wednesday': 'We',
- 'thursday': 'Th',
- 'friday': 'Fr',
- 'saturday': 'Sa',
- 'sunday': 'Su'
+ 'Monday': 'Mo',
+ 'Tuesday': 'Tu',
+ 'Wednesday': 'We',
+ 'Thursday': 'Th',
+ 'Friday': 'Fr',
+ 'Saturday': 'Sa',
+ 'Sunday': 'Su'
}
def opening_hours(self, days_hours):
day_groups = []
@@ -25,6 +25,9 @@
for day_hours in days_hours:
day = day_hours[0]
hours = day_hours[1]
+ if not hours:
+ continue
+
match = re.search(r'^(\d{1,2}):(\d{2})\w*(a|p)m-(\d{1,2}):(\d{2})\w*(a|p)m?$', hours)
(f_hr, f_min, f_ampm, t_hr, t_min, t_ampm) = match.groups()
@@ -62,7 +65,8 @@
elif this_day_group['hours'] == hours:
this_day_group['to_day'] = day
- day_groups.append(this_day_group)
+ if this_day_group:
+ day_groups.append(this_day_group)
opening_hours = ""
if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):
@@ -80,31 +84,32 @@
return opening_hours
def parse(self, response):
- stores = json.loads(response.body_as_unicode())
- for store in stores:
+ stores = json.loads(response.body_as_unicode())['Locations']
+ for store in stores:
+ address = store['Address']
properties = {
- 'ref': store['id'],
- 'addr_full': store['address'],
- 'city': store['city'],
- 'state': store['state'],
- 'postcode': store['postal'],
- 'lat': store['lat'],
- 'lon': store['lng'],
- 'phone': store['phone'],
+ 'ref': store['LocationId'],
+ 'addr_full': ", ".join([address['StreetLine1'], address['StreetLine2']]),
+ 'city': address['City'],
+ 'state': address['State'],
+ 'postcode': address['Zipcode'],
+ 'lat': store['Coordinates']['Lat'],
+ 'lon': store['Coordinates']['Lon'],
+ 'phone': store['OperationsData']['BusinessPhoneNumber'],
}
- if store['twentyfourhours']:
+ hours = store['OperatingHours']
+ if all (hours['DineInAllDay'][day] == True for day in hours['DineInAllDay']):
properties['opening_hours'] = '24/7'
- elif 'hours' in store:
- hours = store['hours']
- if not all(hours[d] == '' for d in hours):
- days_hours = []
- for day in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']:
- days_hours.append([
- self.dayMap[day],
- hours[day].lower().replace(' ', '')
- ])
- properties['opening_hours'] = self.opening_hours(days_hours)
+
+ else:
+ days_hours = []
+ for day in ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']:
+ days_hours.append([
+ self.dayMap[day],
+ hours['DineIn'][day].lower().replace(' ', '')
+ ])
+ properties['opening_hours'] = self.opening_hours(days_hours)
yield GeojsonPointItem(**properties)
| {"golden_diff": "diff --git a/locations/spiders/jackinthebox.py b/locations/spiders/jackinthebox.py\n--- a/locations/spiders/jackinthebox.py\n+++ b/locations/spiders/jackinthebox.py\n@@ -11,13 +11,13 @@\n \"https://www.jackinthebox.com/api/locations\",\n )\n dayMap = {\n- 'monday': 'Mo',\n- 'tuesday': 'Tu',\n- 'wednesday': 'We',\n- 'thursday': 'Th',\n- 'friday': 'Fr',\n- 'saturday': 'Sa',\n- 'sunday': 'Su'\n+ 'Monday': 'Mo',\n+ 'Tuesday': 'Tu',\n+ 'Wednesday': 'We',\n+ 'Thursday': 'Th',\n+ 'Friday': 'Fr',\n+ 'Saturday': 'Sa',\n+ 'Sunday': 'Su'\n }\n def opening_hours(self, days_hours):\n day_groups = []\n@@ -25,6 +25,9 @@\n for day_hours in days_hours:\n day = day_hours[0]\n hours = day_hours[1]\n+ if not hours:\n+ continue\n+\n match = re.search(r'^(\\d{1,2}):(\\d{2})\\w*(a|p)m-(\\d{1,2}):(\\d{2})\\w*(a|p)m?$', hours)\n (f_hr, f_min, f_ampm, t_hr, t_min, t_ampm) = match.groups()\n \n@@ -62,7 +65,8 @@\n elif this_day_group['hours'] == hours:\n this_day_group['to_day'] = day\n \n- day_groups.append(this_day_group)\n+ if this_day_group:\n+ day_groups.append(this_day_group)\n \n opening_hours = \"\"\n if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):\n@@ -80,31 +84,32 @@\n return opening_hours\n \n def parse(self, response):\n- stores = json.loads(response.body_as_unicode())\n- for store in stores: \n+ stores = json.loads(response.body_as_unicode())['Locations']\n+ for store in stores:\n+ address = store['Address']\n properties = { \n- 'ref': store['id'], \n- 'addr_full': store['address'],\n- 'city': store['city'], \n- 'state': store['state'], \n- 'postcode': store['postal'], \n- 'lat': store['lat'], \n- 'lon': store['lng'], \n- 'phone': store['phone'],\n+ 'ref': store['LocationId'],\n+ 'addr_full': \", \".join([address['StreetLine1'], address['StreetLine2']]),\n+ 'city': address['City'],\n+ 'state': address['State'],\n+ 'postcode': address['Zipcode'],\n+ 'lat': store['Coordinates']['Lat'],\n+ 'lon': store['Coordinates']['Lon'],\n+ 'phone': store['OperationsData']['BusinessPhoneNumber'],\n } \n \n- if store['twentyfourhours']:\n+ hours = store['OperatingHours']\n+ if all (hours['DineInAllDay'][day] == True for day in hours['DineInAllDay']):\n properties['opening_hours'] = '24/7'\n- elif 'hours' in store:\n- hours = store['hours']\n- if not all(hours[d] == '' for d in hours):\n- days_hours = []\n- for day in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']:\n- days_hours.append([\n- self.dayMap[day],\n- hours[day].lower().replace(' ', '')\n- ])\n- properties['opening_hours'] = self.opening_hours(days_hours)\n+\n+ else:\n+ days_hours = []\n+ for day in ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']:\n+ days_hours.append([\n+ self.dayMap[day],\n+ hours['DineIn'][day].lower().replace(' ', '')\n+ ])\n+ properties['opening_hours'] = self.opening_hours(days_hours)\n \n yield GeojsonPointItem(**properties)\n", "issue": "Spider jackinthebox is broken\nDuring the global build at 2021-06-23-14-42-18, spider **jackinthebox** failed with **0 features** and **1 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-06-23-14-42-18/logs/jackinthebox.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-23-14-42-18/output/jackinthebox.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-23-14-42-18/output/jackinthebox.geojson))\n", "before_files": [{"content": "import json\nimport re\nimport scrapy\nfrom locations.items import GeojsonPointItem\n\nclass JackInTheBoxSpider(scrapy.Spider):\n name = \"jackinthebox\"\n item_attributes = { 'brand': \"Jack In The Box\" }\n allowed_domains = [\"jackinthebox.com\"]\n start_urls = (\n \"https://www.jackinthebox.com/api/locations\",\n )\n dayMap = {\n 'monday': 'Mo',\n 'tuesday': 'Tu',\n 'wednesday': 'We',\n 'thursday': 'Th',\n 'friday': 'Fr',\n 'saturday': 'Sa',\n 'sunday': 'Su'\n }\n def opening_hours(self, days_hours):\n day_groups = []\n this_day_group = None\n for day_hours in days_hours:\n day = day_hours[0]\n hours = day_hours[1]\n match = re.search(r'^(\\d{1,2}):(\\d{2})\\w*(a|p)m-(\\d{1,2}):(\\d{2})\\w*(a|p)m?$', hours)\n (f_hr, f_min, f_ampm, t_hr, t_min, t_ampm) = match.groups()\n\n f_hr = int(f_hr)\n if f_ampm == 'p':\n f_hr += 12\n elif f_ampm == 'a' and f_hr == 12:\n f_hr = 0\n t_hr = int(t_hr)\n if t_ampm == 'p':\n t_hr += 12\n elif t_ampm == 'a' and t_hr == 12:\n t_hr = 0\n\n hours = '{:02d}:{}-{:02d}:{}'.format(\n f_hr,\n f_min,\n t_hr,\n t_min,\n )\n\n if not this_day_group:\n this_day_group = {\n 'from_day': day,\n 'to_day': day,\n 'hours': hours\n }\n elif this_day_group['hours'] != hours:\n day_groups.append(this_day_group)\n this_day_group = {\n 'from_day': day,\n 'to_day': day,\n 'hours': hours\n }\n elif this_day_group['hours'] == hours:\n this_day_group['to_day'] = day\n\n day_groups.append(this_day_group)\n\n opening_hours = \"\"\n if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):\n opening_hours = '24/7'\n else:\n for day_group in day_groups:\n if day_group['from_day'] == day_group['to_day']:\n opening_hours += '{from_day} {hours}; '.format(**day_group)\n elif day_group['from_day'] == 'Su' and day_group['to_day'] == 'Sa':\n opening_hours += '{hours}; '.format(**day_group)\n else:\n opening_hours += '{from_day}-{to_day} {hours}; '.format(**day_group)\n opening_hours = opening_hours[:-2]\n\n return opening_hours\n\n def parse(self, response):\n stores = json.loads(response.body_as_unicode())\n for store in stores: \n properties = { \n 'ref': store['id'], \n 'addr_full': store['address'],\n 'city': store['city'], \n 'state': store['state'], \n 'postcode': store['postal'], \n 'lat': store['lat'], \n 'lon': store['lng'], \n 'phone': store['phone'],\n } \n \n if store['twentyfourhours']:\n properties['opening_hours'] = '24/7'\n elif 'hours' in store:\n hours = store['hours']\n if not all(hours[d] == '' for d in hours):\n days_hours = []\n for day in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']:\n days_hours.append([\n self.dayMap[day],\n hours[day].lower().replace(' ', '')\n ])\n properties['opening_hours'] = self.opening_hours(days_hours)\n \n yield GeojsonPointItem(**properties) \n\n\n", "path": "locations/spiders/jackinthebox.py"}], "after_files": [{"content": "import json\nimport re\nimport scrapy\nfrom locations.items import GeojsonPointItem\n\nclass JackInTheBoxSpider(scrapy.Spider):\n name = \"jackinthebox\"\n item_attributes = { 'brand': \"Jack In The Box\" }\n allowed_domains = [\"jackinthebox.com\"]\n start_urls = (\n \"https://www.jackinthebox.com/api/locations\",\n )\n dayMap = {\n 'Monday': 'Mo',\n 'Tuesday': 'Tu',\n 'Wednesday': 'We',\n 'Thursday': 'Th',\n 'Friday': 'Fr',\n 'Saturday': 'Sa',\n 'Sunday': 'Su'\n }\n def opening_hours(self, days_hours):\n day_groups = []\n this_day_group = None\n for day_hours in days_hours:\n day = day_hours[0]\n hours = day_hours[1]\n if not hours:\n continue\n\n match = re.search(r'^(\\d{1,2}):(\\d{2})\\w*(a|p)m-(\\d{1,2}):(\\d{2})\\w*(a|p)m?$', hours)\n (f_hr, f_min, f_ampm, t_hr, t_min, t_ampm) = match.groups()\n\n f_hr = int(f_hr)\n if f_ampm == 'p':\n f_hr += 12\n elif f_ampm == 'a' and f_hr == 12:\n f_hr = 0\n t_hr = int(t_hr)\n if t_ampm == 'p':\n t_hr += 12\n elif t_ampm == 'a' and t_hr == 12:\n t_hr = 0\n\n hours = '{:02d}:{}-{:02d}:{}'.format(\n f_hr,\n f_min,\n t_hr,\n t_min,\n )\n\n if not this_day_group:\n this_day_group = {\n 'from_day': day,\n 'to_day': day,\n 'hours': hours\n }\n elif this_day_group['hours'] != hours:\n day_groups.append(this_day_group)\n this_day_group = {\n 'from_day': day,\n 'to_day': day,\n 'hours': hours\n }\n elif this_day_group['hours'] == hours:\n this_day_group['to_day'] = day\n\n if this_day_group:\n day_groups.append(this_day_group)\n\n opening_hours = \"\"\n if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):\n opening_hours = '24/7'\n else:\n for day_group in day_groups:\n if day_group['from_day'] == day_group['to_day']:\n opening_hours += '{from_day} {hours}; '.format(**day_group)\n elif day_group['from_day'] == 'Su' and day_group['to_day'] == 'Sa':\n opening_hours += '{hours}; '.format(**day_group)\n else:\n opening_hours += '{from_day}-{to_day} {hours}; '.format(**day_group)\n opening_hours = opening_hours[:-2]\n\n return opening_hours\n\n def parse(self, response):\n stores = json.loads(response.body_as_unicode())['Locations']\n for store in stores:\n address = store['Address']\n properties = { \n 'ref': store['LocationId'],\n 'addr_full': \", \".join([address['StreetLine1'], address['StreetLine2']]),\n 'city': address['City'],\n 'state': address['State'],\n 'postcode': address['Zipcode'],\n 'lat': store['Coordinates']['Lat'],\n 'lon': store['Coordinates']['Lon'],\n 'phone': store['OperationsData']['BusinessPhoneNumber'],\n } \n \n hours = store['OperatingHours']\n if all (hours['DineInAllDay'][day] == True for day in hours['DineInAllDay']):\n properties['opening_hours'] = '24/7'\n\n else:\n days_hours = []\n for day in ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']:\n days_hours.append([\n self.dayMap[day],\n hours['DineIn'][day].lower().replace(' ', '')\n ])\n properties['opening_hours'] = self.opening_hours(days_hours)\n \n yield GeojsonPointItem(**properties) \n\n\n", "path": "locations/spiders/jackinthebox.py"}]} | 1,629 | 986 |
gh_patches_debug_5360 | rasdani/github-patches | git_diff | ibis-project__ibis-2884 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BUG: File pseudo-backends failing for missing pandas option
The next code is failing in master since #2833:
```python
>>> import ibis
>>> con = ibis.csv.connect('/home/mgarcia/src/ibis/ci/ibis-testing-data/')
>>> expr = con.table('functional_alltypes')['double_col'] * 2
>>> print(expr.execute())
OptionError: "No such keys(s): 'pandas.enable_trace'"
```
The problem is when the `csv` backend (or other file backends) are loaded, but the pandas backend is not. This is because `ibis.pandas` loads the pandas options, which looks like they are needed by the file pseudo-backends.
The CI is not failing, I guess because we test pandas and the file backends are tested together, and pandas is loaded when the file backends are tested.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ibis/backends/base/file/__init__.py`
Content:
```
1 from pathlib import Path
2
3 import ibis.expr.types as ir
4 from ibis.backends.base import BaseBackend, Client, Database
5 from ibis.backends.pandas.core import execute_and_reset
6
7
8 class FileClient(Client):
9 def __init__(self, backend, root):
10 self.backend = backend
11 self.extension = backend.extension
12 self.table_class = backend.table_class
13 self.root = Path(str(root))
14 self.dictionary = {}
15
16 def insert(self, path, expr, **kwargs):
17 raise NotImplementedError
18
19 def table(self, name, path):
20 raise NotImplementedError
21
22 def database(self, name=None, path=None):
23 if name is None:
24 return FileDatabase('root', self, path=path)
25
26 if name not in self.list_databases(path):
27 raise AttributeError(name)
28 if path is None:
29 path = self.root
30
31 new_name = "{}.{}".format(name, self.extension)
32 if (self.root / name).is_dir():
33 path /= name
34 elif not str(path).endswith(new_name):
35 path /= new_name
36
37 return FileDatabase(name, self, path=path)
38
39 def execute(self, expr, params=None, **kwargs): # noqa
40 assert isinstance(expr, ir.Expr)
41 return execute_and_reset(expr, params=params, **kwargs)
42
43 def list_tables(self, path=None):
44 raise NotImplementedError
45
46 def _list_tables_files(self, path=None):
47 # tables are files in a dir
48 if path is None:
49 path = self.root
50
51 tables = []
52 if path.is_dir():
53 for d in path.iterdir():
54 if d.is_file():
55 if str(d).endswith(self.extension):
56 tables.append(d.stem)
57 elif path.is_file():
58 if str(path).endswith(self.extension):
59 tables.append(path.stem)
60 return tables
61
62 def list_databases(self, path=None):
63 raise NotImplementedError
64
65 def _list_databases_dirs(self, path=None):
66 # databases are dir
67 if path is None:
68 path = self.root
69
70 tables = []
71 if path.is_dir():
72 for d in path.iterdir():
73 if d.is_dir():
74 tables.append(d.name)
75 return tables
76
77 def _list_databases_dirs_or_files(self, path=None):
78 # databases are dir & file
79 if path is None:
80 path = self.root
81
82 tables = []
83 if path.is_dir():
84 for d in path.iterdir():
85 if d.is_dir():
86 tables.append(d.name)
87 elif d.is_file():
88 if str(d).endswith(self.extension):
89 tables.append(d.stem)
90 elif path.is_file():
91 # by definition we are at the db level at this point
92 pass
93
94 return tables
95
96
97 class FileDatabase(Database):
98 def __init__(self, name, client, path=None):
99 super().__init__(name, client)
100 self.path = path
101
102 def __str__(self):
103 return '{0.__class__.__name__}({0.name})'.format(self)
104
105 def __dir__(self):
106 dbs = self.list_databases(path=self.path)
107 tables = self.list_tables(path=self.path)
108 return sorted(set(dbs).union(set(tables)))
109
110 def __getattr__(self, name):
111 try:
112 return self.table(name, path=self.path)
113 except AttributeError:
114 return self.database(name, path=self.path)
115
116 def table(self, name, path):
117 return self.client.table(name, path=path)
118
119 def database(self, name=None, path=None):
120 return self.client.database(name=name, path=path)
121
122 def list_databases(self, path=None):
123 if path is None:
124 path = self.path
125 return sorted(self.client.list_databases(path=path))
126
127 def list_tables(self, path=None):
128 if path is None:
129 path = self.path
130 return sorted(self.client.list_tables(path=path))
131
132
133 class BaseFileBackend(BaseBackend):
134 """
135 Base backend class for pandas pseudo-backends for file formats.
136 """
137
138 def connect(self, path):
139 """Create a Client for use with Ibis
140
141 Parameters
142 ----------
143 path : str or pathlib.Path
144
145 Returns
146 -------
147 Client
148 """
149 return self.client_class(backend=self, root=path)
150
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ibis/backends/base/file/__init__.py b/ibis/backends/base/file/__init__.py
--- a/ibis/backends/base/file/__init__.py
+++ b/ibis/backends/base/file/__init__.py
@@ -1,9 +1,13 @@
from pathlib import Path
+import ibis
import ibis.expr.types as ir
from ibis.backends.base import BaseBackend, Client, Database
from ibis.backends.pandas.core import execute_and_reset
+# Load options of pandas backend
+ibis.pandas
+
class FileClient(Client):
def __init__(self, backend, root):
| {"golden_diff": "diff --git a/ibis/backends/base/file/__init__.py b/ibis/backends/base/file/__init__.py\n--- a/ibis/backends/base/file/__init__.py\n+++ b/ibis/backends/base/file/__init__.py\n@@ -1,9 +1,13 @@\n from pathlib import Path\n \n+import ibis\n import ibis.expr.types as ir\n from ibis.backends.base import BaseBackend, Client, Database\n from ibis.backends.pandas.core import execute_and_reset\n \n+# Load options of pandas backend\n+ibis.pandas\n+\n \n class FileClient(Client):\n def __init__(self, backend, root):\n", "issue": "BUG: File pseudo-backends failing for missing pandas option\nThe next code is failing in master since #2833:\r\n\r\n```python\r\n>>> import ibis\r\n>>> con = ibis.csv.connect('/home/mgarcia/src/ibis/ci/ibis-testing-data/')\r\n>>> expr = con.table('functional_alltypes')['double_col'] * 2\r\n>>> print(expr.execute())\r\nOptionError: \"No such keys(s): 'pandas.enable_trace'\"\r\n```\r\n\r\nThe problem is when the `csv` backend (or other file backends) are loaded, but the pandas backend is not. This is because `ibis.pandas` loads the pandas options, which looks like they are needed by the file pseudo-backends.\r\n\r\nThe CI is not failing, I guess because we test pandas and the file backends are tested together, and pandas is loaded when the file backends are tested.\n", "before_files": [{"content": "from pathlib import Path\n\nimport ibis.expr.types as ir\nfrom ibis.backends.base import BaseBackend, Client, Database\nfrom ibis.backends.pandas.core import execute_and_reset\n\n\nclass FileClient(Client):\n def __init__(self, backend, root):\n self.backend = backend\n self.extension = backend.extension\n self.table_class = backend.table_class\n self.root = Path(str(root))\n self.dictionary = {}\n\n def insert(self, path, expr, **kwargs):\n raise NotImplementedError\n\n def table(self, name, path):\n raise NotImplementedError\n\n def database(self, name=None, path=None):\n if name is None:\n return FileDatabase('root', self, path=path)\n\n if name not in self.list_databases(path):\n raise AttributeError(name)\n if path is None:\n path = self.root\n\n new_name = \"{}.{}\".format(name, self.extension)\n if (self.root / name).is_dir():\n path /= name\n elif not str(path).endswith(new_name):\n path /= new_name\n\n return FileDatabase(name, self, path=path)\n\n def execute(self, expr, params=None, **kwargs): # noqa\n assert isinstance(expr, ir.Expr)\n return execute_and_reset(expr, params=params, **kwargs)\n\n def list_tables(self, path=None):\n raise NotImplementedError\n\n def _list_tables_files(self, path=None):\n # tables are files in a dir\n if path is None:\n path = self.root\n\n tables = []\n if path.is_dir():\n for d in path.iterdir():\n if d.is_file():\n if str(d).endswith(self.extension):\n tables.append(d.stem)\n elif path.is_file():\n if str(path).endswith(self.extension):\n tables.append(path.stem)\n return tables\n\n def list_databases(self, path=None):\n raise NotImplementedError\n\n def _list_databases_dirs(self, path=None):\n # databases are dir\n if path is None:\n path = self.root\n\n tables = []\n if path.is_dir():\n for d in path.iterdir():\n if d.is_dir():\n tables.append(d.name)\n return tables\n\n def _list_databases_dirs_or_files(self, path=None):\n # databases are dir & file\n if path is None:\n path = self.root\n\n tables = []\n if path.is_dir():\n for d in path.iterdir():\n if d.is_dir():\n tables.append(d.name)\n elif d.is_file():\n if str(d).endswith(self.extension):\n tables.append(d.stem)\n elif path.is_file():\n # by definition we are at the db level at this point\n pass\n\n return tables\n\n\nclass FileDatabase(Database):\n def __init__(self, name, client, path=None):\n super().__init__(name, client)\n self.path = path\n\n def __str__(self):\n return '{0.__class__.__name__}({0.name})'.format(self)\n\n def __dir__(self):\n dbs = self.list_databases(path=self.path)\n tables = self.list_tables(path=self.path)\n return sorted(set(dbs).union(set(tables)))\n\n def __getattr__(self, name):\n try:\n return self.table(name, path=self.path)\n except AttributeError:\n return self.database(name, path=self.path)\n\n def table(self, name, path):\n return self.client.table(name, path=path)\n\n def database(self, name=None, path=None):\n return self.client.database(name=name, path=path)\n\n def list_databases(self, path=None):\n if path is None:\n path = self.path\n return sorted(self.client.list_databases(path=path))\n\n def list_tables(self, path=None):\n if path is None:\n path = self.path\n return sorted(self.client.list_tables(path=path))\n\n\nclass BaseFileBackend(BaseBackend):\n \"\"\"\n Base backend class for pandas pseudo-backends for file formats.\n \"\"\"\n\n def connect(self, path):\n \"\"\"Create a Client for use with Ibis\n\n Parameters\n ----------\n path : str or pathlib.Path\n\n Returns\n -------\n Client\n \"\"\"\n return self.client_class(backend=self, root=path)\n", "path": "ibis/backends/base/file/__init__.py"}], "after_files": [{"content": "from pathlib import Path\n\nimport ibis\nimport ibis.expr.types as ir\nfrom ibis.backends.base import BaseBackend, Client, Database\nfrom ibis.backends.pandas.core import execute_and_reset\n\n# Load options of pandas backend\nibis.pandas\n\n\nclass FileClient(Client):\n def __init__(self, backend, root):\n self.backend = backend\n self.extension = backend.extension\n self.table_class = backend.table_class\n self.root = Path(str(root))\n self.dictionary = {}\n\n def insert(self, path, expr, **kwargs):\n raise NotImplementedError\n\n def table(self, name, path):\n raise NotImplementedError\n\n def database(self, name=None, path=None):\n if name is None:\n return FileDatabase('root', self, path=path)\n\n if name not in self.list_databases(path):\n raise AttributeError(name)\n if path is None:\n path = self.root\n\n new_name = \"{}.{}\".format(name, self.extension)\n if (self.root / name).is_dir():\n path /= name\n elif not str(path).endswith(new_name):\n path /= new_name\n\n return FileDatabase(name, self, path=path)\n\n def execute(self, expr, params=None, **kwargs): # noqa\n assert isinstance(expr, ir.Expr)\n return execute_and_reset(expr, params=params, **kwargs)\n\n def list_tables(self, path=None):\n raise NotImplementedError\n\n def _list_tables_files(self, path=None):\n # tables are files in a dir\n if path is None:\n path = self.root\n\n tables = []\n if path.is_dir():\n for d in path.iterdir():\n if d.is_file():\n if str(d).endswith(self.extension):\n tables.append(d.stem)\n elif path.is_file():\n if str(path).endswith(self.extension):\n tables.append(path.stem)\n return tables\n\n def list_databases(self, path=None):\n raise NotImplementedError\n\n def _list_databases_dirs(self, path=None):\n # databases are dir\n if path is None:\n path = self.root\n\n tables = []\n if path.is_dir():\n for d in path.iterdir():\n if d.is_dir():\n tables.append(d.name)\n return tables\n\n def _list_databases_dirs_or_files(self, path=None):\n # databases are dir & file\n if path is None:\n path = self.root\n\n tables = []\n if path.is_dir():\n for d in path.iterdir():\n if d.is_dir():\n tables.append(d.name)\n elif d.is_file():\n if str(d).endswith(self.extension):\n tables.append(d.stem)\n elif path.is_file():\n # by definition we are at the db level at this point\n pass\n\n return tables\n\n\nclass FileDatabase(Database):\n def __init__(self, name, client, path=None):\n super().__init__(name, client)\n self.path = path\n\n def __str__(self):\n return '{0.__class__.__name__}({0.name})'.format(self)\n\n def __dir__(self):\n dbs = self.list_databases(path=self.path)\n tables = self.list_tables(path=self.path)\n return sorted(set(dbs).union(set(tables)))\n\n def __getattr__(self, name):\n try:\n return self.table(name, path=self.path)\n except AttributeError:\n return self.database(name, path=self.path)\n\n def table(self, name, path):\n return self.client.table(name, path=path)\n\n def database(self, name=None, path=None):\n return self.client.database(name=name, path=path)\n\n def list_databases(self, path=None):\n if path is None:\n path = self.path\n return sorted(self.client.list_databases(path=path))\n\n def list_tables(self, path=None):\n if path is None:\n path = self.path\n return sorted(self.client.list_tables(path=path))\n\n\nclass BaseFileBackend(BaseBackend):\n \"\"\"\n Base backend class for pandas pseudo-backends for file formats.\n \"\"\"\n\n def connect(self, path):\n \"\"\"Create a Client for use with Ibis\n\n Parameters\n ----------\n path : str or pathlib.Path\n\n Returns\n -------\n Client\n \"\"\"\n return self.client_class(backend=self, root=path)\n", "path": "ibis/backends/base/file/__init__.py"}]} | 1,715 | 141 |
gh_patches_debug_35728 | rasdani/github-patches | git_diff | mindsdb__lightwood-518 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Lightwood.api.ensemble is not necessary
This script is deprecated, as the ensemble module has moved to `lw.ensemble` with a base abstraction. A quick inspection of the code (ex: grep for this call) and I don't see any references. Please double check if this file is required, as I think it should be removed.
The culprit link is [here](https://github.com/mindsdb/lightwood/blob/0372d292796a6d1f91ac9df9b8658ad2f128b7c9/lightwood/api/ensemble.py)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lightwood/api/ensemble.py`
Content:
```
1 from lightwood import Predictor
2 from lightwood.constants.lightwood import ColumnDataTypes
3 from collections import Counter
4 import numpy as np
5 import pickle
6 import os
7
8
9 class LightwoodEnsemble:
10 def __init__(self, predictors=None, load_from_path=None):
11 self.path_list = None
12 if load_from_path is not None:
13 with open(os.path.join(load_from_path, 'lightwood_data'), 'rb') as pickle_in:
14 obj = pickle.load(pickle_in)
15 self.path = load_from_path
16 self.path_list = obj.path_list
17 self.ensemble = [Predictor(load_from_path=path) for path in self.path_list]
18 elif isinstance(predictors, Predictor):
19 self.ensemble = [predictors]
20 elif isinstance(predictors, list):
21 self.ensemble = predictors
22
23 def append(self, predictor):
24 if isinstance(self.ensemble, list):
25 self.ensemble.append(predictor)
26 else:
27 self.ensemble = [predictor]
28
29 def __iter__(self):
30 yield self.ensemble
31
32 def predict(self, when_data):
33 predictions = [p.predict(when_data=when_data) for p in self.ensemble]
34 formatted_predictions = {}
35 for target in self.ensemble[0].config['output_features']:
36 target_name = target['name']
37 formatted_predictions[target_name] = {}
38 pred_arr = np.array([p[target_name]['predictions'] for p in predictions])
39 if target['type'] == ColumnDataTypes.NUMERIC:
40 final_preds = np.mean(pred_arr, axis=0).tolist()
41 elif target['type'] == ColumnDataTypes.CATEGORICAL:
42 final_preds = [max(Counter(pred_arr[:, idx])) for idx in range(pred_arr.shape[1])]
43
44 # @TODO: implement class distribution for ensembles
45 # NOTE: label set *could* grow when adding predictors, which complicates belief score computation
46 formatted_predictions[target_name]['class_distribution'] = np.ones(shape=(len(final_preds), 1))
47 else:
48 raise Exception('Only numeric and categorical datatypes are supported for ensembles')
49
50 formatted_predictions[target_name]['predictions'] = final_preds
51
52 return formatted_predictions
53
54 def save(self, path_to):
55 # TODO: potentially save predictors inside ensemble pickle, though there's the issue of nonpersistent stuff with torch.save() # noqa
56 path_list = []
57 for i, model in enumerate(self.ensemble):
58 path = os.path.join(path_to, f'lightwood_predictor_{i}')
59 path_list.append(path)
60 model.save(path_to=path)
61
62 self.path_list = path_list
63
64 # TODO: in the future, save preds inside this data struct
65 self.ensemble = None # we deref predictors for now
66 with open(os.path.join(path_to, 'lightwood_data'), 'wb') as file:
67 pickle.dump(self, file, pickle.HIGHEST_PROTOCOL)
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lightwood/api/ensemble.py b/lightwood/api/ensemble.py
deleted file mode 100644
--- a/lightwood/api/ensemble.py
+++ /dev/null
@@ -1,67 +0,0 @@
-from lightwood import Predictor
-from lightwood.constants.lightwood import ColumnDataTypes
-from collections import Counter
-import numpy as np
-import pickle
-import os
-
-
-class LightwoodEnsemble:
- def __init__(self, predictors=None, load_from_path=None):
- self.path_list = None
- if load_from_path is not None:
- with open(os.path.join(load_from_path, 'lightwood_data'), 'rb') as pickle_in:
- obj = pickle.load(pickle_in)
- self.path = load_from_path
- self.path_list = obj.path_list
- self.ensemble = [Predictor(load_from_path=path) for path in self.path_list]
- elif isinstance(predictors, Predictor):
- self.ensemble = [predictors]
- elif isinstance(predictors, list):
- self.ensemble = predictors
-
- def append(self, predictor):
- if isinstance(self.ensemble, list):
- self.ensemble.append(predictor)
- else:
- self.ensemble = [predictor]
-
- def __iter__(self):
- yield self.ensemble
-
- def predict(self, when_data):
- predictions = [p.predict(when_data=when_data) for p in self.ensemble]
- formatted_predictions = {}
- for target in self.ensemble[0].config['output_features']:
- target_name = target['name']
- formatted_predictions[target_name] = {}
- pred_arr = np.array([p[target_name]['predictions'] for p in predictions])
- if target['type'] == ColumnDataTypes.NUMERIC:
- final_preds = np.mean(pred_arr, axis=0).tolist()
- elif target['type'] == ColumnDataTypes.CATEGORICAL:
- final_preds = [max(Counter(pred_arr[:, idx])) for idx in range(pred_arr.shape[1])]
-
- # @TODO: implement class distribution for ensembles
- # NOTE: label set *could* grow when adding predictors, which complicates belief score computation
- formatted_predictions[target_name]['class_distribution'] = np.ones(shape=(len(final_preds), 1))
- else:
- raise Exception('Only numeric and categorical datatypes are supported for ensembles')
-
- formatted_predictions[target_name]['predictions'] = final_preds
-
- return formatted_predictions
-
- def save(self, path_to):
- # TODO: potentially save predictors inside ensemble pickle, though there's the issue of nonpersistent stuff with torch.save() # noqa
- path_list = []
- for i, model in enumerate(self.ensemble):
- path = os.path.join(path_to, f'lightwood_predictor_{i}')
- path_list.append(path)
- model.save(path_to=path)
-
- self.path_list = path_list
-
- # TODO: in the future, save preds inside this data struct
- self.ensemble = None # we deref predictors for now
- with open(os.path.join(path_to, 'lightwood_data'), 'wb') as file:
- pickle.dump(self, file, pickle.HIGHEST_PROTOCOL)
| {"golden_diff": "diff --git a/lightwood/api/ensemble.py b/lightwood/api/ensemble.py\ndeleted file mode 100644\n--- a/lightwood/api/ensemble.py\n+++ /dev/null\n@@ -1,67 +0,0 @@\n-from lightwood import Predictor\n-from lightwood.constants.lightwood import ColumnDataTypes\n-from collections import Counter\n-import numpy as np\n-import pickle\n-import os\n-\n-\n-class LightwoodEnsemble:\n- def __init__(self, predictors=None, load_from_path=None):\n- self.path_list = None\n- if load_from_path is not None:\n- with open(os.path.join(load_from_path, 'lightwood_data'), 'rb') as pickle_in:\n- obj = pickle.load(pickle_in)\n- self.path = load_from_path\n- self.path_list = obj.path_list\n- self.ensemble = [Predictor(load_from_path=path) for path in self.path_list]\n- elif isinstance(predictors, Predictor):\n- self.ensemble = [predictors]\n- elif isinstance(predictors, list):\n- self.ensemble = predictors\n-\n- def append(self, predictor):\n- if isinstance(self.ensemble, list):\n- self.ensemble.append(predictor)\n- else:\n- self.ensemble = [predictor]\n-\n- def __iter__(self):\n- yield self.ensemble\n-\n- def predict(self, when_data):\n- predictions = [p.predict(when_data=when_data) for p in self.ensemble]\n- formatted_predictions = {}\n- for target in self.ensemble[0].config['output_features']:\n- target_name = target['name']\n- formatted_predictions[target_name] = {}\n- pred_arr = np.array([p[target_name]['predictions'] for p in predictions])\n- if target['type'] == ColumnDataTypes.NUMERIC:\n- final_preds = np.mean(pred_arr, axis=0).tolist()\n- elif target['type'] == ColumnDataTypes.CATEGORICAL:\n- final_preds = [max(Counter(pred_arr[:, idx])) for idx in range(pred_arr.shape[1])]\n-\n- # @TODO: implement class distribution for ensembles\n- # NOTE: label set *could* grow when adding predictors, which complicates belief score computation\n- formatted_predictions[target_name]['class_distribution'] = np.ones(shape=(len(final_preds), 1))\n- else:\n- raise Exception('Only numeric and categorical datatypes are supported for ensembles')\n-\n- formatted_predictions[target_name]['predictions'] = final_preds\n-\n- return formatted_predictions\n-\n- def save(self, path_to):\n- # TODO: potentially save predictors inside ensemble pickle, though there's the issue of nonpersistent stuff with torch.save() # noqa\n- path_list = []\n- for i, model in enumerate(self.ensemble):\n- path = os.path.join(path_to, f'lightwood_predictor_{i}')\n- path_list.append(path)\n- model.save(path_to=path)\n-\n- self.path_list = path_list\n-\n- # TODO: in the future, save preds inside this data struct\n- self.ensemble = None # we deref predictors for now\n- with open(os.path.join(path_to, 'lightwood_data'), 'wb') as file:\n- pickle.dump(self, file, pickle.HIGHEST_PROTOCOL)\n", "issue": "Lightwood.api.ensemble is not necessary\nThis script is deprecated, as the ensemble module has moved to `lw.ensemble` with a base abstraction. A quick inspection of the code (ex: grep for this call) and I don't see any references. Please double check if this file is required, as I think it should be removed.\r\n\r\nThe culprit link is [here](https://github.com/mindsdb/lightwood/blob/0372d292796a6d1f91ac9df9b8658ad2f128b7c9/lightwood/api/ensemble.py)\n", "before_files": [{"content": "from lightwood import Predictor\nfrom lightwood.constants.lightwood import ColumnDataTypes\nfrom collections import Counter\nimport numpy as np\nimport pickle\nimport os\n\n\nclass LightwoodEnsemble:\n def __init__(self, predictors=None, load_from_path=None):\n self.path_list = None\n if load_from_path is not None:\n with open(os.path.join(load_from_path, 'lightwood_data'), 'rb') as pickle_in:\n obj = pickle.load(pickle_in)\n self.path = load_from_path\n self.path_list = obj.path_list\n self.ensemble = [Predictor(load_from_path=path) for path in self.path_list]\n elif isinstance(predictors, Predictor):\n self.ensemble = [predictors]\n elif isinstance(predictors, list):\n self.ensemble = predictors\n\n def append(self, predictor):\n if isinstance(self.ensemble, list):\n self.ensemble.append(predictor)\n else:\n self.ensemble = [predictor]\n\n def __iter__(self):\n yield self.ensemble\n\n def predict(self, when_data):\n predictions = [p.predict(when_data=when_data) for p in self.ensemble]\n formatted_predictions = {}\n for target in self.ensemble[0].config['output_features']:\n target_name = target['name']\n formatted_predictions[target_name] = {}\n pred_arr = np.array([p[target_name]['predictions'] for p in predictions])\n if target['type'] == ColumnDataTypes.NUMERIC:\n final_preds = np.mean(pred_arr, axis=0).tolist()\n elif target['type'] == ColumnDataTypes.CATEGORICAL:\n final_preds = [max(Counter(pred_arr[:, idx])) for idx in range(pred_arr.shape[1])]\n\n # @TODO: implement class distribution for ensembles\n # NOTE: label set *could* grow when adding predictors, which complicates belief score computation\n formatted_predictions[target_name]['class_distribution'] = np.ones(shape=(len(final_preds), 1))\n else:\n raise Exception('Only numeric and categorical datatypes are supported for ensembles')\n\n formatted_predictions[target_name]['predictions'] = final_preds\n\n return formatted_predictions\n\n def save(self, path_to):\n # TODO: potentially save predictors inside ensemble pickle, though there's the issue of nonpersistent stuff with torch.save() # noqa\n path_list = []\n for i, model in enumerate(self.ensemble):\n path = os.path.join(path_to, f'lightwood_predictor_{i}')\n path_list.append(path)\n model.save(path_to=path)\n\n self.path_list = path_list\n\n # TODO: in the future, save preds inside this data struct\n self.ensemble = None # we deref predictors for now\n with open(os.path.join(path_to, 'lightwood_data'), 'wb') as file:\n pickle.dump(self, file, pickle.HIGHEST_PROTOCOL)\n", "path": "lightwood/api/ensemble.py"}], "after_files": [{"content": null, "path": "lightwood/api/ensemble.py"}]} | 1,140 | 724 |
gh_patches_debug_1352 | rasdani/github-patches | git_diff | pwr-Solaar__Solaar-1826 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 1.1.7
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python3
2
3 from glob import glob as _glob
4
5 try:
6 from setuptools import setup
7 except ImportError:
8 from distutils.core import setup
9
10 # from solaar import NAME, __version__
11 __version__ = '1.1.7'
12 NAME = 'Solaar'
13
14
15 def _data_files():
16 from os.path import dirname as _dirname
17
18 yield 'share/solaar/icons', _glob('share/solaar/icons/solaar*.svg')
19 yield 'share/solaar/icons', _glob('share/solaar/icons/light_*.png')
20 yield 'share/icons/hicolor/scalable/apps', ['share/solaar/icons/solaar.svg']
21
22 for mo in _glob('share/locale/*/LC_MESSAGES/solaar.mo'):
23 yield _dirname(mo), [mo]
24
25 yield 'share/applications', ['share/applications/solaar.desktop']
26 yield 'share/solaar/udev-rules.d', ['rules.d/42-logitech-unify-permissions.rules']
27 yield 'share/metainfo', ['share/solaar/io.github.pwr_solaar.solaar.metainfo.xml']
28
29 del _dirname
30
31
32 setup(
33 name=NAME.lower(),
34 version=__version__,
35 description='Linux device manager for Logitech receivers, keyboards, mice, and tablets.',
36 long_description='''
37 Solaar is a Linux device manager for many Logitech peripherals that connect through
38 Unifying and other receivers or via USB or Bluetooth.
39 Solaar is able to pair/unpair devices with receivers and show and modify some of the
40 modifiable features of devices.
41 For instructions on installing Solaar see https://pwr-solaar.github.io/Solaar/installation'''.strip(),
42 author='Daniel Pavel',
43 license='GPLv2',
44 url='http://pwr-solaar.github.io/Solaar/',
45 classifiers=[
46 'Development Status :: 4 - Beta',
47 'Environment :: X11 Applications :: GTK',
48 'Environment :: Console',
49 'Intended Audience :: End Users/Desktop',
50 'License :: DFSG approved',
51 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
52 'Natural Language :: English',
53 'Programming Language :: Python :: 3 :: Only',
54 'Operating System :: POSIX :: Linux',
55 'Topic :: Utilities',
56 ],
57 platforms=['linux'],
58
59 # sudo apt install python-gi python3-gi \
60 # gir1.2-gtk-3.0 gir1.2-notify-0.7 gir1.2-ayatanaappindicator3-0.1
61 # os_requires=['gi.repository.GObject (>= 2.0)', 'gi.repository.Gtk (>= 3.0)'],
62 python_requires='>=3.7',
63 install_requires=[
64 'evdev (>= 1.1.2)',
65 'pyudev (>= 0.13)',
66 'PyYAML (>= 3.12)',
67 'python-xlib (>= 0.27)',
68 'psutil (>= 5.4.3)',
69 'typing_extensions (>=4.0.0)',
70 ],
71 extras_require={
72 'report-descriptor': ['hid-parser'],
73 'desktop-notifications': ['Notify (>= 0.7)'],
74 },
75 package_dir={'': 'lib'},
76 packages=['keysyms', 'hidapi', 'logitech_receiver', 'solaar', 'solaar.ui', 'solaar.cli'],
77 data_files=list(_data_files()),
78 scripts=_glob('bin/*'),
79 )
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -66,7 +66,6 @@
'PyYAML (>= 3.12)',
'python-xlib (>= 0.27)',
'psutil (>= 5.4.3)',
- 'typing_extensions (>=4.0.0)',
],
extras_require={
'report-descriptor': ['hid-parser'],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -66,7 +66,6 @@\n 'PyYAML (>= 3.12)',\n 'python-xlib (>= 0.27)',\n 'psutil (>= 5.4.3)',\n- 'typing_extensions (>=4.0.0)',\n ],\n extras_require={\n 'report-descriptor': ['hid-parser'],\n", "issue": "Release 1.1.7\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nfrom glob import glob as _glob\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\n# from solaar import NAME, __version__\n__version__ = '1.1.7'\nNAME = 'Solaar'\n\n\ndef _data_files():\n from os.path import dirname as _dirname\n\n yield 'share/solaar/icons', _glob('share/solaar/icons/solaar*.svg')\n yield 'share/solaar/icons', _glob('share/solaar/icons/light_*.png')\n yield 'share/icons/hicolor/scalable/apps', ['share/solaar/icons/solaar.svg']\n\n for mo in _glob('share/locale/*/LC_MESSAGES/solaar.mo'):\n yield _dirname(mo), [mo]\n\n yield 'share/applications', ['share/applications/solaar.desktop']\n yield 'share/solaar/udev-rules.d', ['rules.d/42-logitech-unify-permissions.rules']\n yield 'share/metainfo', ['share/solaar/io.github.pwr_solaar.solaar.metainfo.xml']\n\n del _dirname\n\n\nsetup(\n name=NAME.lower(),\n version=__version__,\n description='Linux device manager for Logitech receivers, keyboards, mice, and tablets.',\n long_description='''\nSolaar is a Linux device manager for many Logitech peripherals that connect through\nUnifying and other receivers or via USB or Bluetooth.\nSolaar is able to pair/unpair devices with receivers and show and modify some of the\nmodifiable features of devices.\nFor instructions on installing Solaar see https://pwr-solaar.github.io/Solaar/installation'''.strip(),\n author='Daniel Pavel',\n license='GPLv2',\n url='http://pwr-solaar.github.io/Solaar/',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: X11 Applications :: GTK',\n 'Environment :: Console',\n 'Intended Audience :: End Users/Desktop',\n 'License :: DFSG approved',\n 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3 :: Only',\n 'Operating System :: POSIX :: Linux',\n 'Topic :: Utilities',\n ],\n platforms=['linux'],\n\n # sudo apt install python-gi python3-gi \\\n # gir1.2-gtk-3.0 gir1.2-notify-0.7 gir1.2-ayatanaappindicator3-0.1\n # os_requires=['gi.repository.GObject (>= 2.0)', 'gi.repository.Gtk (>= 3.0)'],\n python_requires='>=3.7',\n install_requires=[\n 'evdev (>= 1.1.2)',\n 'pyudev (>= 0.13)',\n 'PyYAML (>= 3.12)',\n 'python-xlib (>= 0.27)',\n 'psutil (>= 5.4.3)',\n 'typing_extensions (>=4.0.0)',\n ],\n extras_require={\n 'report-descriptor': ['hid-parser'],\n 'desktop-notifications': ['Notify (>= 0.7)'],\n },\n package_dir={'': 'lib'},\n packages=['keysyms', 'hidapi', 'logitech_receiver', 'solaar', 'solaar.ui', 'solaar.cli'],\n data_files=list(_data_files()),\n scripts=_glob('bin/*'),\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nfrom glob import glob as _glob\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\n# from solaar import NAME, __version__\n__version__ = '1.1.7'\nNAME = 'Solaar'\n\n\ndef _data_files():\n from os.path import dirname as _dirname\n\n yield 'share/solaar/icons', _glob('share/solaar/icons/solaar*.svg')\n yield 'share/solaar/icons', _glob('share/solaar/icons/light_*.png')\n yield 'share/icons/hicolor/scalable/apps', ['share/solaar/icons/solaar.svg']\n\n for mo in _glob('share/locale/*/LC_MESSAGES/solaar.mo'):\n yield _dirname(mo), [mo]\n\n yield 'share/applications', ['share/applications/solaar.desktop']\n yield 'share/solaar/udev-rules.d', ['rules.d/42-logitech-unify-permissions.rules']\n yield 'share/metainfo', ['share/solaar/io.github.pwr_solaar.solaar.metainfo.xml']\n\n del _dirname\n\n\nsetup(\n name=NAME.lower(),\n version=__version__,\n description='Linux device manager for Logitech receivers, keyboards, mice, and tablets.',\n long_description='''\nSolaar is a Linux device manager for many Logitech peripherals that connect through\nUnifying and other receivers or via USB or Bluetooth.\nSolaar is able to pair/unpair devices with receivers and show and modify some of the\nmodifiable features of devices.\nFor instructions on installing Solaar see https://pwr-solaar.github.io/Solaar/installation'''.strip(),\n author='Daniel Pavel',\n license='GPLv2',\n url='http://pwr-solaar.github.io/Solaar/',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: X11 Applications :: GTK',\n 'Environment :: Console',\n 'Intended Audience :: End Users/Desktop',\n 'License :: DFSG approved',\n 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3 :: Only',\n 'Operating System :: POSIX :: Linux',\n 'Topic :: Utilities',\n ],\n platforms=['linux'],\n\n # sudo apt install python-gi python3-gi \\\n # gir1.2-gtk-3.0 gir1.2-notify-0.7 gir1.2-ayatanaappindicator3-0.1\n # os_requires=['gi.repository.GObject (>= 2.0)', 'gi.repository.Gtk (>= 3.0)'],\n python_requires='>=3.7',\n install_requires=[\n 'evdev (>= 1.1.2)',\n 'pyudev (>= 0.13)',\n 'PyYAML (>= 3.12)',\n 'python-xlib (>= 0.27)',\n 'psutil (>= 5.4.3)',\n ],\n extras_require={\n 'report-descriptor': ['hid-parser'],\n 'desktop-notifications': ['Notify (>= 0.7)'],\n },\n package_dir={'': 'lib'},\n packages=['keysyms', 'hidapi', 'logitech_receiver', 'solaar', 'solaar.ui', 'solaar.cli'],\n data_files=list(_data_files()),\n scripts=_glob('bin/*'),\n)\n", "path": "setup.py"}]} | 1,187 | 100 |
gh_patches_debug_6252 | rasdani/github-patches | git_diff | google__turbinia-809 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
GrepTask issue
```
2021-04-28 17:13:25 [ERROR] GrepTask Task failed with exception: [a bytes-like object is required, not 'str']
2021-04-28 17:13:25 [ERROR] Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/turbinia-20210330-py3.6.egg/turbinia/workers/__init__.py", line 893, in run_wrapper
self.result = self.run(evidence, self.result)
File "/usr/local/lib/python3.6/dist-packages/turbinia-20210330-py3.6.egg/turbinia/workers/grep.py", line 49, in run
fh.write('\n'.join(patterns))
File "/usr/lib/python3.6/tempfile.py", line 624, in func_wrapper
return func(*args, **kwargs)
TypeError: a bytes-like object is required, not 'str'
2021-04-28 17:13:26 [ERROR] GrepTask Task failed with exception: [a bytes-like object is required, not 'str']
2021-04-28 17:13:26 [INFO] Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/turbinia-20210330-py3.6.egg/turbinia/workers/__init__.py", line 893, in run_wrapper
self.result = self.run(evidence, self.result)
File "/usr/local/lib/python3.6/dist-packages/turbinia-20210330-py3.6.egg/turbinia/workers/grep.py", line 49, in run
fh.write('\n'.join(patterns))
File "/usr/lib/python3.6/tempfile.py", line 624, in func_wrapper
return func(*args, **kwargs)
TypeError: a bytes-like object is required, not 'str'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `turbinia/workers/grep.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright 2015 Google Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Task to filter a text file using extended regular expression patterns."""
16
17 from __future__ import unicode_literals
18
19 import os
20 from tempfile import NamedTemporaryFile
21
22 from turbinia.evidence import FilteredTextFile
23 from turbinia.workers import TurbiniaTask
24
25
26 class GrepTask(TurbiniaTask):
27 """Filter input based on extended regular expression patterns."""
28
29 def run(self, evidence, result):
30 """Run grep binary.
31
32 Args:
33 evidence (Evidence object): The evidence we will process
34 result (TurbiniaTaskResult): The object to place task results into.
35
36 Returns:
37 TurbiniaTaskResult object.
38 """
39
40 patterns = evidence.config.get('filter_patterns')
41 if not patterns:
42 result.close(self, success=True, status='No patterns supplied, exit task')
43 return result
44
45 # Create temporary file to write patterns to.
46 # Used as input to grep (-f).
47 with NamedTemporaryFile(dir=self.output_dir, delete=False) as fh:
48 patterns_file_path = fh.name
49 fh.write('\n'.join(patterns))
50
51 # Create a path that we can write the new file to.
52 base_name = os.path.basename(evidence.local_path)
53 output_file_path = os.path.join(
54 self.output_dir, '{0:s}.filtered'.format(base_name))
55
56 output_evidence = FilteredTextFile(source_path=output_file_path)
57 cmd = 'grep -E -b -n -f {0:s} {1:s} > {2:s}'.format(
58 patterns_file_path, evidence.local_path, output_file_path)
59
60 result.log('Running [{0:s}]'.format(cmd))
61 ret, result = self.execute(
62 cmd, result, new_evidence=[output_evidence], shell=True,
63 success_codes=[0, 1])
64
65 # Grep returns 0 on success and 1 if no results are found.
66 if ret == 0:
67 status = 'Grep Task found results in {0:s}'.format(evidence.name)
68 result.close(self, success=True, status=status)
69 elif ret == 1:
70 status = 'Grep Task did not find any results in {0:s}'.format(
71 evidence.name)
72 result.close(self, success=True, status=status)
73 else:
74 result.close(self, success=False)
75
76 return result
77
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/turbinia/workers/grep.py b/turbinia/workers/grep.py
--- a/turbinia/workers/grep.py
+++ b/turbinia/workers/grep.py
@@ -46,7 +46,7 @@
# Used as input to grep (-f).
with NamedTemporaryFile(dir=self.output_dir, delete=False) as fh:
patterns_file_path = fh.name
- fh.write('\n'.join(patterns))
+ fh.write('\n'.join(patterns.encode('utf-8')))
# Create a path that we can write the new file to.
base_name = os.path.basename(evidence.local_path)
| {"golden_diff": "diff --git a/turbinia/workers/grep.py b/turbinia/workers/grep.py\n--- a/turbinia/workers/grep.py\n+++ b/turbinia/workers/grep.py\n@@ -46,7 +46,7 @@\n # Used as input to grep (-f).\n with NamedTemporaryFile(dir=self.output_dir, delete=False) as fh:\n patterns_file_path = fh.name\n- fh.write('\\n'.join(patterns))\n+ fh.write('\\n'.join(patterns.encode('utf-8')))\n \n # Create a path that we can write the new file to.\n base_name = os.path.basename(evidence.local_path)\n", "issue": "GrepTask issue\n```\r\n2021-04-28 17:13:25 [ERROR] GrepTask Task failed with exception: [a bytes-like object is required, not 'str']\r\n2021-04-28 17:13:25 [ERROR] Traceback (most recent call last):\r\n File \"/usr/local/lib/python3.6/dist-packages/turbinia-20210330-py3.6.egg/turbinia/workers/__init__.py\", line 893, in run_wrapper\r\n self.result = self.run(evidence, self.result)\r\n File \"/usr/local/lib/python3.6/dist-packages/turbinia-20210330-py3.6.egg/turbinia/workers/grep.py\", line 49, in run\r\n fh.write('\\n'.join(patterns))\r\n File \"/usr/lib/python3.6/tempfile.py\", line 624, in func_wrapper\r\n return func(*args, **kwargs)\r\nTypeError: a bytes-like object is required, not 'str'\r\n\r\n2021-04-28 17:13:26 [ERROR] GrepTask Task failed with exception: [a bytes-like object is required, not 'str']\r\n2021-04-28 17:13:26 [INFO] Traceback (most recent call last):\r\n File \"/usr/local/lib/python3.6/dist-packages/turbinia-20210330-py3.6.egg/turbinia/workers/__init__.py\", line 893, in run_wrapper\r\n self.result = self.run(evidence, self.result)\r\n File \"/usr/local/lib/python3.6/dist-packages/turbinia-20210330-py3.6.egg/turbinia/workers/grep.py\", line 49, in run\r\n fh.write('\\n'.join(patterns))\r\n File \"/usr/lib/python3.6/tempfile.py\", line 624, in func_wrapper\r\n return func(*args, **kwargs)\r\nTypeError: a bytes-like object is required, not 'str'\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Task to filter a text file using extended regular expression patterns.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport os\nfrom tempfile import NamedTemporaryFile\n\nfrom turbinia.evidence import FilteredTextFile\nfrom turbinia.workers import TurbiniaTask\n\n\nclass GrepTask(TurbiniaTask):\n \"\"\"Filter input based on extended regular expression patterns.\"\"\"\n\n def run(self, evidence, result):\n \"\"\"Run grep binary.\n\n Args:\n evidence (Evidence object): The evidence we will process\n result (TurbiniaTaskResult): The object to place task results into.\n\n Returns:\n TurbiniaTaskResult object.\n \"\"\"\n\n patterns = evidence.config.get('filter_patterns')\n if not patterns:\n result.close(self, success=True, status='No patterns supplied, exit task')\n return result\n\n # Create temporary file to write patterns to.\n # Used as input to grep (-f).\n with NamedTemporaryFile(dir=self.output_dir, delete=False) as fh:\n patterns_file_path = fh.name\n fh.write('\\n'.join(patterns))\n\n # Create a path that we can write the new file to.\n base_name = os.path.basename(evidence.local_path)\n output_file_path = os.path.join(\n self.output_dir, '{0:s}.filtered'.format(base_name))\n\n output_evidence = FilteredTextFile(source_path=output_file_path)\n cmd = 'grep -E -b -n -f {0:s} {1:s} > {2:s}'.format(\n patterns_file_path, evidence.local_path, output_file_path)\n\n result.log('Running [{0:s}]'.format(cmd))\n ret, result = self.execute(\n cmd, result, new_evidence=[output_evidence], shell=True,\n success_codes=[0, 1])\n\n # Grep returns 0 on success and 1 if no results are found.\n if ret == 0:\n status = 'Grep Task found results in {0:s}'.format(evidence.name)\n result.close(self, success=True, status=status)\n elif ret == 1:\n status = 'Grep Task did not find any results in {0:s}'.format(\n evidence.name)\n result.close(self, success=True, status=status)\n else:\n result.close(self, success=False)\n\n return result\n", "path": "turbinia/workers/grep.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Task to filter a text file using extended regular expression patterns.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport os\nfrom tempfile import NamedTemporaryFile\n\nfrom turbinia.evidence import FilteredTextFile\nfrom turbinia.workers import TurbiniaTask\n\n\nclass GrepTask(TurbiniaTask):\n \"\"\"Filter input based on extended regular expression patterns.\"\"\"\n\n def run(self, evidence, result):\n \"\"\"Run grep binary.\n\n Args:\n evidence (Evidence object): The evidence we will process\n result (TurbiniaTaskResult): The object to place task results into.\n\n Returns:\n TurbiniaTaskResult object.\n \"\"\"\n\n patterns = evidence.config.get('filter_patterns')\n if not patterns:\n result.close(self, success=True, status='No patterns supplied, exit task')\n return result\n\n # Create temporary file to write patterns to.\n # Used as input to grep (-f).\n with NamedTemporaryFile(dir=self.output_dir, delete=False) as fh:\n patterns_file_path = fh.name\n fh.write('\\n'.join(patterns.encode('utf-8')))\n\n # Create a path that we can write the new file to.\n base_name = os.path.basename(evidence.local_path)\n output_file_path = os.path.join(\n self.output_dir, '{0:s}.filtered'.format(base_name))\n\n output_evidence = FilteredTextFile(source_path=output_file_path)\n cmd = 'grep -E -b -n -f {0:s} {1:s} > {2:s}'.format(\n patterns_file_path, evidence.local_path, output_file_path)\n\n result.log('Running [{0:s}]'.format(cmd))\n ret, result = self.execute(\n cmd, result, new_evidence=[output_evidence], shell=True,\n success_codes=[0, 1])\n\n # Grep returns 0 on success and 1 if no results are found.\n if ret == 0:\n status = 'Grep Task found results in {0:s}'.format(evidence.name)\n result.close(self, success=True, status=status)\n elif ret == 1:\n status = 'Grep Task did not find any results in {0:s}'.format(\n evidence.name)\n result.close(self, success=True, status=status)\n else:\n result.close(self, success=False)\n\n return result\n", "path": "turbinia/workers/grep.py"}]} | 1,543 | 148 |
gh_patches_debug_37596 | rasdani/github-patches | git_diff | streamlink__streamlink-4550 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plugins.useetv: log if no link has been found
<!--
Thanks for opening a pull request!
Before you continue, please make sure that you have read and understood the contribution guidelines, otherwise your changes may be rejected:
https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink
If possible, run the tests, perform code linting and build the documentation locally on your system first to avoid unnecessary build failures:
https://streamlink.github.io/latest/developing.html#validating-changes
Also don't forget to add a meaningful description of your changes, so that the reviewing process is as simple as possible for the maintainers.
Thank you very much!
-->
**Why this PR ?**
This PR has been made to verify if no link has been found. Indeed, USeeTV doesn't provide all his channels worldwide. Some channels are blocked for Indonesian people only, and some others need a subscription to work (see beIN Asia as an example). Some channels like SeaToday would work, but channels like this one :

will only show a Geo-restriction message above the player, telling the end-user he has no access to the stream.
This also reflects inside the player, meaning no link can be scraped.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/useetv.py`
Content:
```
1 """
2 $description Live TV channels and video on-demand service from UseeTV, owned by Telkom Indonesia.
3 $url useetv.com
4 $type live, vod
5 """
6
7 import re
8
9 from streamlink.plugin import Plugin, pluginmatcher
10 from streamlink.plugin.api import validate
11 from streamlink.stream.dash import DASHStream
12 from streamlink.stream.hls import HLSStream
13
14
15 @pluginmatcher(re.compile(r"https?://(?:www\.)?useetv\.com/"))
16 class UseeTV(Plugin):
17 def find_url(self):
18 url_re = re.compile(r"""['"](https://.*?/(?:[Pp]laylist\.m3u8|manifest\.mpd)[^'"]+)['"]""")
19
20 return self.session.http.get(self.url, schema=validate.Schema(
21 validate.parse_html(),
22 validate.any(
23 validate.all(
24 validate.xml_xpath_string("""
25 .//script[contains(text(), 'laylist.m3u8') or contains(text(), 'manifest.mpd')][1]/text()
26 """),
27 str,
28 validate.transform(url_re.search),
29 validate.any(None, validate.all(validate.get(1), validate.url())),
30 ),
31 validate.all(
32 validate.xml_xpath_string(".//video[@id='video-player']/source/@src"),
33 validate.any(None, validate.url()),
34 ),
35 ),
36 ))
37
38 def _get_streams(self):
39 url = self.find_url()
40
41 if url and ".m3u8" in url:
42 return HLSStream.parse_variant_playlist(self.session, url)
43 elif url and ".mpd" in url:
44 return DASHStream.parse_manifest(self.session, url)
45
46
47 __plugin__ = UseeTV
48
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/plugins/useetv.py b/src/streamlink/plugins/useetv.py
--- a/src/streamlink/plugins/useetv.py
+++ b/src/streamlink/plugins/useetv.py
@@ -4,6 +4,7 @@
$type live, vod
"""
+import logging
import re
from streamlink.plugin import Plugin, pluginmatcher
@@ -11,32 +12,46 @@
from streamlink.stream.dash import DASHStream
from streamlink.stream.hls import HLSStream
+log = logging.getLogger(__name__)
+
@pluginmatcher(re.compile(r"https?://(?:www\.)?useetv\.com/"))
class UseeTV(Plugin):
- def find_url(self):
- url_re = re.compile(r"""['"](https://.*?/(?:[Pp]laylist\.m3u8|manifest\.mpd)[^'"]+)['"]""")
+ def _get_streams(self):
+ root = self.session.http.get(self.url, schema=validate.Schema(validate.parse_html()))
+
+ for needle, errormsg in (
+ (
+ "This service is not available in your Country",
+ "The content is not available in your region",
+ ),
+ (
+ "Silahkan login Menggunakan akun MyIndihome dan berlangganan minipack",
+ "The content is not available without a subscription",
+ ),
+ ):
+ if validate.Schema(validate.xml_xpath(f""".//script[contains(text(), '"{needle}"')]""")).validate(root):
+ log.error(errormsg)
+ return
- return self.session.http.get(self.url, schema=validate.Schema(
- validate.parse_html(),
+ url = validate.Schema(
validate.any(
validate.all(
validate.xml_xpath_string("""
.//script[contains(text(), 'laylist.m3u8') or contains(text(), 'manifest.mpd')][1]/text()
"""),
str,
- validate.transform(url_re.search),
- validate.any(None, validate.all(validate.get(1), validate.url())),
+ validate.transform(
+ re.compile(r"""(?P<q>['"])(?P<url>https://.*?/(?:[Pp]laylist\.m3u8|manifest\.mpd).+?)(?P=q)""").search
+ ),
+ validate.any(None, validate.all(validate.get("url"), validate.url())),
),
validate.all(
validate.xml_xpath_string(".//video[@id='video-player']/source/@src"),
validate.any(None, validate.url()),
),
- ),
- ))
-
- def _get_streams(self):
- url = self.find_url()
+ )
+ ).validate(root)
if url and ".m3u8" in url:
return HLSStream.parse_variant_playlist(self.session, url)
| {"golden_diff": "diff --git a/src/streamlink/plugins/useetv.py b/src/streamlink/plugins/useetv.py\n--- a/src/streamlink/plugins/useetv.py\n+++ b/src/streamlink/plugins/useetv.py\n@@ -4,6 +4,7 @@\n $type live, vod\n \"\"\"\n \n+import logging\n import re\n \n from streamlink.plugin import Plugin, pluginmatcher\n@@ -11,32 +12,46 @@\n from streamlink.stream.dash import DASHStream\n from streamlink.stream.hls import HLSStream\n \n+log = logging.getLogger(__name__)\n+\n \n @pluginmatcher(re.compile(r\"https?://(?:www\\.)?useetv\\.com/\"))\n class UseeTV(Plugin):\n- def find_url(self):\n- url_re = re.compile(r\"\"\"['\"](https://.*?/(?:[Pp]laylist\\.m3u8|manifest\\.mpd)[^'\"]+)['\"]\"\"\")\n+ def _get_streams(self):\n+ root = self.session.http.get(self.url, schema=validate.Schema(validate.parse_html()))\n+\n+ for needle, errormsg in (\n+ (\n+ \"This service is not available in your Country\",\n+ \"The content is not available in your region\",\n+ ),\n+ (\n+ \"Silahkan login Menggunakan akun MyIndihome dan berlangganan minipack\",\n+ \"The content is not available without a subscription\",\n+ ),\n+ ):\n+ if validate.Schema(validate.xml_xpath(f\"\"\".//script[contains(text(), '\"{needle}\"')]\"\"\")).validate(root):\n+ log.error(errormsg)\n+ return\n \n- return self.session.http.get(self.url, schema=validate.Schema(\n- validate.parse_html(),\n+ url = validate.Schema(\n validate.any(\n validate.all(\n validate.xml_xpath_string(\"\"\"\n .//script[contains(text(), 'laylist.m3u8') or contains(text(), 'manifest.mpd')][1]/text()\n \"\"\"),\n str,\n- validate.transform(url_re.search),\n- validate.any(None, validate.all(validate.get(1), validate.url())),\n+ validate.transform(\n+ re.compile(r\"\"\"(?P<q>['\"])(?P<url>https://.*?/(?:[Pp]laylist\\.m3u8|manifest\\.mpd).+?)(?P=q)\"\"\").search\n+ ),\n+ validate.any(None, validate.all(validate.get(\"url\"), validate.url())),\n ),\n validate.all(\n validate.xml_xpath_string(\".//video[@id='video-player']/source/@src\"),\n validate.any(None, validate.url()),\n ),\n- ),\n- ))\n-\n- def _get_streams(self):\n- url = self.find_url()\n+ )\n+ ).validate(root)\n \n if url and \".m3u8\" in url:\n return HLSStream.parse_variant_playlist(self.session, url)\n", "issue": "plugins.useetv: log if no link has been found\n<!--\r\nThanks for opening a pull request!\r\n\r\nBefore you continue, please make sure that you have read and understood the contribution guidelines, otherwise your changes may be rejected:\r\nhttps://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink\r\n\r\nIf possible, run the tests, perform code linting and build the documentation locally on your system first to avoid unnecessary build failures:\r\nhttps://streamlink.github.io/latest/developing.html#validating-changes\r\n\r\nAlso don't forget to add a meaningful description of your changes, so that the reviewing process is as simple as possible for the maintainers.\r\n\r\nThank you very much!\r\n-->\r\n\r\n**Why this PR ?**\r\n\r\nThis PR has been made to verify if no link has been found. Indeed, USeeTV doesn't provide all his channels worldwide. Some channels are blocked for Indonesian people only, and some others need a subscription to work (see beIN Asia as an example). Some channels like SeaToday would work, but channels like this one : \r\n\r\nwill only show a Geo-restriction message above the player, telling the end-user he has no access to the stream. \r\n\r\nThis also reflects inside the player, meaning no link can be scraped.\r\n\n", "before_files": [{"content": "\"\"\"\n$description Live TV channels and video on-demand service from UseeTV, owned by Telkom Indonesia.\n$url useetv.com\n$type live, vod\n\"\"\"\n\nimport re\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.dash import DASHStream\nfrom streamlink.stream.hls import HLSStream\n\n\n@pluginmatcher(re.compile(r\"https?://(?:www\\.)?useetv\\.com/\"))\nclass UseeTV(Plugin):\n def find_url(self):\n url_re = re.compile(r\"\"\"['\"](https://.*?/(?:[Pp]laylist\\.m3u8|manifest\\.mpd)[^'\"]+)['\"]\"\"\")\n\n return self.session.http.get(self.url, schema=validate.Schema(\n validate.parse_html(),\n validate.any(\n validate.all(\n validate.xml_xpath_string(\"\"\"\n .//script[contains(text(), 'laylist.m3u8') or contains(text(), 'manifest.mpd')][1]/text()\n \"\"\"),\n str,\n validate.transform(url_re.search),\n validate.any(None, validate.all(validate.get(1), validate.url())),\n ),\n validate.all(\n validate.xml_xpath_string(\".//video[@id='video-player']/source/@src\"),\n validate.any(None, validate.url()),\n ),\n ),\n ))\n\n def _get_streams(self):\n url = self.find_url()\n\n if url and \".m3u8\" in url:\n return HLSStream.parse_variant_playlist(self.session, url)\n elif url and \".mpd\" in url:\n return DASHStream.parse_manifest(self.session, url)\n\n\n__plugin__ = UseeTV\n", "path": "src/streamlink/plugins/useetv.py"}], "after_files": [{"content": "\"\"\"\n$description Live TV channels and video on-demand service from UseeTV, owned by Telkom Indonesia.\n$url useetv.com\n$type live, vod\n\"\"\"\n\nimport logging\nimport re\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.dash import DASHStream\nfrom streamlink.stream.hls import HLSStream\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(r\"https?://(?:www\\.)?useetv\\.com/\"))\nclass UseeTV(Plugin):\n def _get_streams(self):\n root = self.session.http.get(self.url, schema=validate.Schema(validate.parse_html()))\n\n for needle, errormsg in (\n (\n \"This service is not available in your Country\",\n \"The content is not available in your region\",\n ),\n (\n \"Silahkan login Menggunakan akun MyIndihome dan berlangganan minipack\",\n \"The content is not available without a subscription\",\n ),\n ):\n if validate.Schema(validate.xml_xpath(f\"\"\".//script[contains(text(), '\"{needle}\"')]\"\"\")).validate(root):\n log.error(errormsg)\n return\n\n url = validate.Schema(\n validate.any(\n validate.all(\n validate.xml_xpath_string(\"\"\"\n .//script[contains(text(), 'laylist.m3u8') or contains(text(), 'manifest.mpd')][1]/text()\n \"\"\"),\n str,\n validate.transform(\n re.compile(r\"\"\"(?P<q>['\"])(?P<url>https://.*?/(?:[Pp]laylist\\.m3u8|manifest\\.mpd).+?)(?P=q)\"\"\").search\n ),\n validate.any(None, validate.all(validate.get(\"url\"), validate.url())),\n ),\n validate.all(\n validate.xml_xpath_string(\".//video[@id='video-player']/source/@src\"),\n validate.any(None, validate.url()),\n ),\n )\n ).validate(root)\n\n if url and \".m3u8\" in url:\n return HLSStream.parse_variant_playlist(self.session, url)\n elif url and \".mpd\" in url:\n return DASHStream.parse_manifest(self.session, url)\n\n\n__plugin__ = UseeTV\n", "path": "src/streamlink/plugins/useetv.py"}]} | 1,037 | 618 |
gh_patches_debug_12345 | rasdani/github-patches | git_diff | meltano__meltano-7636 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bug: When meltano.yml is empty, no error message is printed. Rather, it just mentions to reach out to community
### Meltano Version
2.19.0
### Python Version
3.9
### Bug scope
CLI (options, error messages, logging, etc.)
### Operating System
Windows - WSL(Ubuntu)
### Description
when `meltano.yml` is empty, `cli`(`meltano.cli.__init__.py: 105`) raises `EmptyMeltanoFileException` exception whenever we try to run any command such as `meltano add` or `meltano ui`. But, since there's no exception message, it just prints the troubleshooting message and blank lines as follows
```
Need help fixing this problem? Visit http://melta.no/ for troubleshooting steps, or to
join our friendly Slack community.
```
### Code
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/meltano/core/error.py`
Content:
```
1 """Base Error classes."""
2
3 from __future__ import annotations
4
5 import typing as t
6 from asyncio.streams import StreamReader
7 from asyncio.subprocess import Process
8 from enum import Enum
9
10 if t.TYPE_CHECKING:
11 from meltano.core.project import Project
12
13
14 class ExitCode(int, Enum): # noqa: D101
15 OK = 0
16 FAIL = 1
17 NO_RETRY = 2
18
19
20 class MeltanoError(Exception):
21 """Base class for all user-facing errors."""
22
23 def __init__(
24 self,
25 reason: str,
26 instruction: str | None = None,
27 *args: t.Any,
28 **kwargs: t.Any,
29 ) -> None:
30 """Initialize a MeltanoError.
31
32 Args:
33 reason: A short explanation of the error.
34 instruction: A short instruction on how to fix the error.
35 args: Additional arguments to pass to the base exception class.
36 kwargs: Keyword arguments to pass to the base exception class.
37 """
38 self.reason = reason
39 self.instruction = instruction
40 super().__init__(reason, instruction, *args, **kwargs)
41
42 def __str__(self) -> str:
43 """Return a string representation of the error.
44
45 Returns:
46 A string representation of the error.
47 """
48 return (
49 f"{self.reason}. {self.instruction}."
50 if self.instruction
51 else f"{self.reason}."
52 )
53
54
55 class Error(Exception):
56 """Base exception for ELT errors."""
57
58 def exit_code(self): # noqa: D102
59 return ExitCode.FAIL
60
61
62 class ExtractError(Error):
63 """Error in the extraction process, like API errors."""
64
65 def exit_code(self): # noqa: D102
66 return ExitCode.NO_RETRY
67
68
69 class AsyncSubprocessError(Exception):
70 """Happens when an async subprocess exits with a resultcode != 0."""
71
72 def __init__(
73 self,
74 message: str,
75 process: Process,
76 stderr: str | None = None,
77 ): # noqa: DAR101
78 """Initialize AsyncSubprocessError."""
79 self.process = process
80 self._stderr: str | StreamReader | None = stderr or process.stderr
81 super().__init__(message)
82
83 @property
84 async def stderr(self) -> str | None:
85 """Return the output of the process to stderr."""
86 if not self._stderr: # noqa: DAR201
87 return None
88 elif not isinstance(self._stderr, str):
89 stream = await self._stderr.read()
90 self._stderr = stream.decode("utf-8")
91
92 return self._stderr
93
94
95 class PluginInstallError(Exception):
96 """Exception for when a plugin fails to install."""
97
98
99 class PluginInstallWarning(Exception):
100 """Exception for when a plugin optional optional step fails to install."""
101
102
103 class EmptyMeltanoFileException(Exception):
104 """Exception for empty meltano.yml file."""
105
106
107 class MeltanoConfigurationError(MeltanoError):
108 """Exception for when Meltano is inproperly configured."""
109
110
111 class ProjectNotFound(Error):
112 """A Project is instantiated outside of a meltano project structure."""
113
114 def __init__(self, project: Project):
115 """Instantiate the error.
116
117 Args:
118 project: the name of the project which cannot be found
119 """
120 super().__init__(
121 f"Cannot find `{project.meltanofile}`. Are you in a meltano project?",
122 )
123
124
125 class ProjectReadonly(Error):
126 """Attempting to update a readonly project."""
127
128 def __init__(self):
129 """Instantiate the error."""
130 super().__init__("This Meltano project is deployed as read-only")
131
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/meltano/core/error.py b/src/meltano/core/error.py
--- a/src/meltano/core/error.py
+++ b/src/meltano/core/error.py
@@ -100,9 +100,15 @@
"""Exception for when a plugin optional optional step fails to install."""
-class EmptyMeltanoFileException(Exception):
+class EmptyMeltanoFileException(MeltanoError):
"""Exception for empty meltano.yml file."""
+ def __init__(self) -> None:
+ """Instantiate the error."""
+ reason = "Your meltano.yml file is empty"
+ instruction = "Please update your meltano file with a valid configuration"
+ super().__init__(reason, instruction)
+
class MeltanoConfigurationError(MeltanoError):
"""Exception for when Meltano is inproperly configured."""
| {"golden_diff": "diff --git a/src/meltano/core/error.py b/src/meltano/core/error.py\n--- a/src/meltano/core/error.py\n+++ b/src/meltano/core/error.py\n@@ -100,9 +100,15 @@\n \"\"\"Exception for when a plugin optional optional step fails to install.\"\"\"\n \n \n-class EmptyMeltanoFileException(Exception):\n+class EmptyMeltanoFileException(MeltanoError):\n \"\"\"Exception for empty meltano.yml file.\"\"\"\n \n+ def __init__(self) -> None:\n+ \"\"\"Instantiate the error.\"\"\"\n+ reason = \"Your meltano.yml file is empty\"\n+ instruction = \"Please update your meltano file with a valid configuration\"\n+ super().__init__(reason, instruction)\n+\n \n class MeltanoConfigurationError(MeltanoError):\n \"\"\"Exception for when Meltano is inproperly configured.\"\"\"\n", "issue": "bug: When meltano.yml is empty, no error message is printed. Rather, it just mentions to reach out to community\n### Meltano Version\r\n\r\n2.19.0\r\n\r\n### Python Version\r\n\r\n3.9\r\n\r\n### Bug scope\r\n\r\nCLI (options, error messages, logging, etc.)\r\n\r\n### Operating System\r\n\r\nWindows - WSL(Ubuntu)\r\n\r\n### Description\r\n\r\nwhen `meltano.yml` is empty, `cli`(`meltano.cli.__init__.py: 105`) raises `EmptyMeltanoFileException` exception whenever we try to run any command such as `meltano add` or `meltano ui`. But, since there's no exception message, it just prints the troubleshooting message and blank lines as follows\r\n\r\n```\r\nNeed help fixing this problem? Visit http://melta.no/ for troubleshooting steps, or to\r\njoin our friendly Slack community.\r\n\r\n```\r\n\r\n\r\n### Code\r\n\r\n_No response_\n", "before_files": [{"content": "\"\"\"Base Error classes.\"\"\"\n\nfrom __future__ import annotations\n\nimport typing as t\nfrom asyncio.streams import StreamReader\nfrom asyncio.subprocess import Process\nfrom enum import Enum\n\nif t.TYPE_CHECKING:\n from meltano.core.project import Project\n\n\nclass ExitCode(int, Enum): # noqa: D101\n OK = 0\n FAIL = 1\n NO_RETRY = 2\n\n\nclass MeltanoError(Exception):\n \"\"\"Base class for all user-facing errors.\"\"\"\n\n def __init__(\n self,\n reason: str,\n instruction: str | None = None,\n *args: t.Any,\n **kwargs: t.Any,\n ) -> None:\n \"\"\"Initialize a MeltanoError.\n\n Args:\n reason: A short explanation of the error.\n instruction: A short instruction on how to fix the error.\n args: Additional arguments to pass to the base exception class.\n kwargs: Keyword arguments to pass to the base exception class.\n \"\"\"\n self.reason = reason\n self.instruction = instruction\n super().__init__(reason, instruction, *args, **kwargs)\n\n def __str__(self) -> str:\n \"\"\"Return a string representation of the error.\n\n Returns:\n A string representation of the error.\n \"\"\"\n return (\n f\"{self.reason}. {self.instruction}.\"\n if self.instruction\n else f\"{self.reason}.\"\n )\n\n\nclass Error(Exception):\n \"\"\"Base exception for ELT errors.\"\"\"\n\n def exit_code(self): # noqa: D102\n return ExitCode.FAIL\n\n\nclass ExtractError(Error):\n \"\"\"Error in the extraction process, like API errors.\"\"\"\n\n def exit_code(self): # noqa: D102\n return ExitCode.NO_RETRY\n\n\nclass AsyncSubprocessError(Exception):\n \"\"\"Happens when an async subprocess exits with a resultcode != 0.\"\"\"\n\n def __init__(\n self,\n message: str,\n process: Process,\n stderr: str | None = None,\n ): # noqa: DAR101\n \"\"\"Initialize AsyncSubprocessError.\"\"\"\n self.process = process\n self._stderr: str | StreamReader | None = stderr or process.stderr\n super().__init__(message)\n\n @property\n async def stderr(self) -> str | None:\n \"\"\"Return the output of the process to stderr.\"\"\"\n if not self._stderr: # noqa: DAR201\n return None\n elif not isinstance(self._stderr, str):\n stream = await self._stderr.read()\n self._stderr = stream.decode(\"utf-8\")\n\n return self._stderr\n\n\nclass PluginInstallError(Exception):\n \"\"\"Exception for when a plugin fails to install.\"\"\"\n\n\nclass PluginInstallWarning(Exception):\n \"\"\"Exception for when a plugin optional optional step fails to install.\"\"\"\n\n\nclass EmptyMeltanoFileException(Exception):\n \"\"\"Exception for empty meltano.yml file.\"\"\"\n\n\nclass MeltanoConfigurationError(MeltanoError):\n \"\"\"Exception for when Meltano is inproperly configured.\"\"\"\n\n\nclass ProjectNotFound(Error):\n \"\"\"A Project is instantiated outside of a meltano project structure.\"\"\"\n\n def __init__(self, project: Project):\n \"\"\"Instantiate the error.\n\n Args:\n project: the name of the project which cannot be found\n \"\"\"\n super().__init__(\n f\"Cannot find `{project.meltanofile}`. Are you in a meltano project?\",\n )\n\n\nclass ProjectReadonly(Error):\n \"\"\"Attempting to update a readonly project.\"\"\"\n\n def __init__(self):\n \"\"\"Instantiate the error.\"\"\"\n super().__init__(\"This Meltano project is deployed as read-only\")\n", "path": "src/meltano/core/error.py"}], "after_files": [{"content": "\"\"\"Base Error classes.\"\"\"\n\nfrom __future__ import annotations\n\nimport typing as t\nfrom asyncio.streams import StreamReader\nfrom asyncio.subprocess import Process\nfrom enum import Enum\n\nif t.TYPE_CHECKING:\n from meltano.core.project import Project\n\n\nclass ExitCode(int, Enum): # noqa: D101\n OK = 0\n FAIL = 1\n NO_RETRY = 2\n\n\nclass MeltanoError(Exception):\n \"\"\"Base class for all user-facing errors.\"\"\"\n\n def __init__(\n self,\n reason: str,\n instruction: str | None = None,\n *args: t.Any,\n **kwargs: t.Any,\n ) -> None:\n \"\"\"Initialize a MeltanoError.\n\n Args:\n reason: A short explanation of the error.\n instruction: A short instruction on how to fix the error.\n args: Additional arguments to pass to the base exception class.\n kwargs: Keyword arguments to pass to the base exception class.\n \"\"\"\n self.reason = reason\n self.instruction = instruction\n super().__init__(reason, instruction, *args, **kwargs)\n\n def __str__(self) -> str:\n \"\"\"Return a string representation of the error.\n\n Returns:\n A string representation of the error.\n \"\"\"\n return (\n f\"{self.reason}. {self.instruction}.\"\n if self.instruction\n else f\"{self.reason}.\"\n )\n\n\nclass Error(Exception):\n \"\"\"Base exception for ELT errors.\"\"\"\n\n def exit_code(self): # noqa: D102\n return ExitCode.FAIL\n\n\nclass ExtractError(Error):\n \"\"\"Error in the extraction process, like API errors.\"\"\"\n\n def exit_code(self): # noqa: D102\n return ExitCode.NO_RETRY\n\n\nclass AsyncSubprocessError(Exception):\n \"\"\"Happens when an async subprocess exits with a resultcode != 0.\"\"\"\n\n def __init__(\n self,\n message: str,\n process: Process,\n stderr: str | None = None,\n ): # noqa: DAR101\n \"\"\"Initialize AsyncSubprocessError.\"\"\"\n self.process = process\n self._stderr: str | StreamReader | None = stderr or process.stderr\n super().__init__(message)\n\n @property\n async def stderr(self) -> str | None:\n \"\"\"Return the output of the process to stderr.\"\"\"\n if not self._stderr: # noqa: DAR201\n return None\n elif not isinstance(self._stderr, str):\n stream = await self._stderr.read()\n self._stderr = stream.decode(\"utf-8\")\n\n return self._stderr\n\n\nclass PluginInstallError(Exception):\n \"\"\"Exception for when a plugin fails to install.\"\"\"\n\n\nclass PluginInstallWarning(Exception):\n \"\"\"Exception for when a plugin optional optional step fails to install.\"\"\"\n\n\nclass EmptyMeltanoFileException(MeltanoError):\n \"\"\"Exception for empty meltano.yml file.\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Instantiate the error.\"\"\"\n reason = \"Your meltano.yml file is empty\"\n instruction = \"Please update your meltano file with a valid configuration\"\n super().__init__(reason, instruction)\n\n\nclass MeltanoConfigurationError(MeltanoError):\n \"\"\"Exception for when Meltano is inproperly configured.\"\"\"\n\n\nclass ProjectNotFound(Error):\n \"\"\"A Project is instantiated outside of a meltano project structure.\"\"\"\n\n def __init__(self, project: Project):\n \"\"\"Instantiate the error.\n\n Args:\n project: the name of the project which cannot be found\n \"\"\"\n super().__init__(\n f\"Cannot find `{project.meltanofile}`. Are you in a meltano project?\",\n )\n\n\nclass ProjectReadonly(Error):\n \"\"\"Attempting to update a readonly project.\"\"\"\n\n def __init__(self):\n \"\"\"Instantiate the error.\"\"\"\n super().__init__(\"This Meltano project is deployed as read-only\")\n", "path": "src/meltano/core/error.py"}]} | 1,540 | 187 |
gh_patches_debug_19026 | rasdani/github-patches | git_diff | Kinto__kinto-135 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Missing CORS header on /v1/buckets/default/collections/tasks/records
> 09:19:55,733 Blocage d'une requête multi-origines (Cross-Origin Request) : la politique « Same Origin » ne permet pas de consulter la ressource distante située sur http://0.0.0.0:8888/v1/buckets/default/collections/tasks/records?_since=1436512795672. Raison : l'en-tête CORS « Access-Control-Allow-Origin » est manquant.1 <inconnu>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/views/buckets.py`
Content:
```
1 from six import text_type
2 from uuid import UUID
3
4 from pyramid.httpexceptions import HTTPForbidden, HTTPPreconditionFailed
5 from pyramid.security import NO_PERMISSION_REQUIRED
6 from pyramid.view import view_config
7
8 from cliquet import resource
9 from cliquet.utils import hmac_digest, build_request
10
11 from kinto.views import NameGenerator
12
13
14 def create_bucket(request, bucket_id):
15 """Create a bucket if it doesn't exists."""
16 bucket_put = (request.method.lower() == 'put' and
17 request.path.endswith('buckets/default'))
18
19 if not bucket_put:
20 subrequest = build_request(request, {
21 'method': 'PUT',
22 'path': '/buckets/%s' % bucket_id,
23 'body': {"data": {}},
24 'headers': {'If-None-Match': '*'.encode('utf-8')}
25 })
26
27 try:
28 request.invoke_subrequest(subrequest)
29 except HTTPPreconditionFailed:
30 # The bucket already exists
31 pass
32
33
34 def create_collection(request, bucket_id):
35 subpath = request.matchdict['subpath']
36 if subpath.startswith('/collections/'):
37 collection_id = subpath.split('/')[2]
38 collection_put = (request.method.lower() == 'put' and
39 request.path.endswith(collection_id))
40 if not collection_put:
41 subrequest = build_request(request, {
42 'method': 'PUT',
43 'path': '/buckets/%s/collections/%s' % (
44 bucket_id, collection_id),
45 'body': {"data": {}},
46 'headers': {'If-None-Match': '*'.encode('utf-8')}
47 })
48 try:
49 request.invoke_subrequest(subrequest)
50 except HTTPPreconditionFailed:
51 # The collection already exists
52 pass
53
54
55 @view_config(route_name='default_bucket', permission=NO_PERMISSION_REQUIRED)
56 def default_bucket(request):
57 if request.method.lower() == 'options':
58 path = request.path.replace('default', 'unknown')
59 subrequest = build_request(request, {
60 'method': 'OPTIONS',
61 'path': path
62 })
63 return request.invoke_subrequest(subrequest)
64
65 if getattr(request, 'prefixed_userid', None) is None:
66 raise HTTPForbidden # Pass through the forbidden_view_config
67
68 settings = request.registry.settings
69 hmac_secret = settings['cliquet.userid_hmac_secret']
70 # Build the user unguessable bucket_id UUID from its user_id
71 digest = hmac_digest(hmac_secret, request.prefixed_userid)
72 bucket_id = text_type(UUID(digest[:32]))
73 path = request.path.replace('default', bucket_id)
74 querystring = request.url[(request.url.index(request.path) +
75 len(request.path)):]
76
77 # Make sure bucket exists
78 create_bucket(request, bucket_id)
79
80 # Make sure the collection exists
81 create_collection(request, bucket_id)
82
83 subrequest = build_request(request, {
84 'method': request.method,
85 'path': path + querystring,
86 'body': request.body
87 })
88
89 return request.invoke_subrequest(subrequest)
90
91
92 @resource.register(name='bucket',
93 collection_methods=('GET',),
94 collection_path='/buckets',
95 record_path='/buckets/{{id}}')
96 class Bucket(resource.ProtectedResource):
97 permissions = ('read', 'write', 'collection:create', 'group:create')
98
99 def __init__(self, *args, **kwargs):
100 super(Bucket, self).__init__(*args, **kwargs)
101 self.collection.id_generator = NameGenerator()
102
103 def get_parent_id(self, request):
104 # Buckets are not isolated by user, unlike Cliquet resources.
105 return ''
106
107 def delete(self):
108 result = super(Bucket, self).delete()
109
110 # Delete groups.
111 storage = self.collection.storage
112 parent_id = '/buckets/%s' % self.record_id
113 storage.delete_all(collection_id='group', parent_id=parent_id)
114
115 # Delete collections.
116 deleted = storage.delete_all(collection_id='collection',
117 parent_id=parent_id)
118
119 # Delete records.
120 id_field = self.collection.id_field
121 for collection in deleted:
122 parent_id = '/buckets/%s/collections/%s' % (self.record_id,
123 collection[id_field])
124 storage.delete_all(collection_id='record', parent_id=parent_id)
125
126 return result
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kinto/views/buckets.py b/kinto/views/buckets.py
--- a/kinto/views/buckets.py
+++ b/kinto/views/buckets.py
@@ -1,12 +1,13 @@
from six import text_type
from uuid import UUID
-from pyramid.httpexceptions import HTTPForbidden, HTTPPreconditionFailed
+from pyramid.httpexceptions import (HTTPForbidden, HTTPPreconditionFailed,
+ HTTPException)
from pyramid.security import NO_PERMISSION_REQUIRED
from pyramid.view import view_config
from cliquet import resource
-from cliquet.utils import hmac_digest, build_request
+from cliquet.utils import hmac_digest, build_request, reapply_cors
from kinto.views import NameGenerator
@@ -86,7 +87,11 @@
'body': request.body
})
- return request.invoke_subrequest(subrequest)
+ try:
+ response = request.invoke_subrequest(subrequest)
+ except HTTPException as error:
+ response = reapply_cors(subrequest, error)
+ return response
@resource.register(name='bucket',
| {"golden_diff": "diff --git a/kinto/views/buckets.py b/kinto/views/buckets.py\n--- a/kinto/views/buckets.py\n+++ b/kinto/views/buckets.py\n@@ -1,12 +1,13 @@\n from six import text_type\n from uuid import UUID\n \n-from pyramid.httpexceptions import HTTPForbidden, HTTPPreconditionFailed\n+from pyramid.httpexceptions import (HTTPForbidden, HTTPPreconditionFailed,\n+ HTTPException)\n from pyramid.security import NO_PERMISSION_REQUIRED\n from pyramid.view import view_config\n \n from cliquet import resource\n-from cliquet.utils import hmac_digest, build_request\n+from cliquet.utils import hmac_digest, build_request, reapply_cors\n \n from kinto.views import NameGenerator\n \n@@ -86,7 +87,11 @@\n 'body': request.body\n })\n \n- return request.invoke_subrequest(subrequest)\n+ try:\n+ response = request.invoke_subrequest(subrequest)\n+ except HTTPException as error:\n+ response = reapply_cors(subrequest, error)\n+ return response\n \n \n @resource.register(name='bucket',\n", "issue": "Missing CORS header on /v1/buckets/default/collections/tasks/records\n> 09:19:55,733 Blocage d'une requ\u00eate multi-origines (Cross-Origin Request)\u00a0: la politique \u00ab\u00a0Same Origin\u00a0\u00bb ne permet pas de consulter la ressource distante situ\u00e9e sur http://0.0.0.0:8888/v1/buckets/default/collections/tasks/records?_since=1436512795672. Raison\u00a0: l'en-t\u00eate CORS \u00ab\u00a0Access-Control-Allow-Origin\u00a0\u00bb est manquant.1 <inconnu>\n\n", "before_files": [{"content": "from six import text_type\nfrom uuid import UUID\n\nfrom pyramid.httpexceptions import HTTPForbidden, HTTPPreconditionFailed\nfrom pyramid.security import NO_PERMISSION_REQUIRED\nfrom pyramid.view import view_config\n\nfrom cliquet import resource\nfrom cliquet.utils import hmac_digest, build_request\n\nfrom kinto.views import NameGenerator\n\n\ndef create_bucket(request, bucket_id):\n \"\"\"Create a bucket if it doesn't exists.\"\"\"\n bucket_put = (request.method.lower() == 'put' and\n request.path.endswith('buckets/default'))\n\n if not bucket_put:\n subrequest = build_request(request, {\n 'method': 'PUT',\n 'path': '/buckets/%s' % bucket_id,\n 'body': {\"data\": {}},\n 'headers': {'If-None-Match': '*'.encode('utf-8')}\n })\n\n try:\n request.invoke_subrequest(subrequest)\n except HTTPPreconditionFailed:\n # The bucket already exists\n pass\n\n\ndef create_collection(request, bucket_id):\n subpath = request.matchdict['subpath']\n if subpath.startswith('/collections/'):\n collection_id = subpath.split('/')[2]\n collection_put = (request.method.lower() == 'put' and\n request.path.endswith(collection_id))\n if not collection_put:\n subrequest = build_request(request, {\n 'method': 'PUT',\n 'path': '/buckets/%s/collections/%s' % (\n bucket_id, collection_id),\n 'body': {\"data\": {}},\n 'headers': {'If-None-Match': '*'.encode('utf-8')}\n })\n try:\n request.invoke_subrequest(subrequest)\n except HTTPPreconditionFailed:\n # The collection already exists\n pass\n\n\n@view_config(route_name='default_bucket', permission=NO_PERMISSION_REQUIRED)\ndef default_bucket(request):\n if request.method.lower() == 'options':\n path = request.path.replace('default', 'unknown')\n subrequest = build_request(request, {\n 'method': 'OPTIONS',\n 'path': path\n })\n return request.invoke_subrequest(subrequest)\n\n if getattr(request, 'prefixed_userid', None) is None:\n raise HTTPForbidden # Pass through the forbidden_view_config\n\n settings = request.registry.settings\n hmac_secret = settings['cliquet.userid_hmac_secret']\n # Build the user unguessable bucket_id UUID from its user_id\n digest = hmac_digest(hmac_secret, request.prefixed_userid)\n bucket_id = text_type(UUID(digest[:32]))\n path = request.path.replace('default', bucket_id)\n querystring = request.url[(request.url.index(request.path) +\n len(request.path)):]\n\n # Make sure bucket exists\n create_bucket(request, bucket_id)\n\n # Make sure the collection exists\n create_collection(request, bucket_id)\n\n subrequest = build_request(request, {\n 'method': request.method,\n 'path': path + querystring,\n 'body': request.body\n })\n\n return request.invoke_subrequest(subrequest)\n\n\[email protected](name='bucket',\n collection_methods=('GET',),\n collection_path='/buckets',\n record_path='/buckets/{{id}}')\nclass Bucket(resource.ProtectedResource):\n permissions = ('read', 'write', 'collection:create', 'group:create')\n\n def __init__(self, *args, **kwargs):\n super(Bucket, self).__init__(*args, **kwargs)\n self.collection.id_generator = NameGenerator()\n\n def get_parent_id(self, request):\n # Buckets are not isolated by user, unlike Cliquet resources.\n return ''\n\n def delete(self):\n result = super(Bucket, self).delete()\n\n # Delete groups.\n storage = self.collection.storage\n parent_id = '/buckets/%s' % self.record_id\n storage.delete_all(collection_id='group', parent_id=parent_id)\n\n # Delete collections.\n deleted = storage.delete_all(collection_id='collection',\n parent_id=parent_id)\n\n # Delete records.\n id_field = self.collection.id_field\n for collection in deleted:\n parent_id = '/buckets/%s/collections/%s' % (self.record_id,\n collection[id_field])\n storage.delete_all(collection_id='record', parent_id=parent_id)\n\n return result\n", "path": "kinto/views/buckets.py"}], "after_files": [{"content": "from six import text_type\nfrom uuid import UUID\n\nfrom pyramid.httpexceptions import (HTTPForbidden, HTTPPreconditionFailed,\n HTTPException)\nfrom pyramid.security import NO_PERMISSION_REQUIRED\nfrom pyramid.view import view_config\n\nfrom cliquet import resource\nfrom cliquet.utils import hmac_digest, build_request, reapply_cors\n\nfrom kinto.views import NameGenerator\n\n\ndef create_bucket(request, bucket_id):\n \"\"\"Create a bucket if it doesn't exists.\"\"\"\n bucket_put = (request.method.lower() == 'put' and\n request.path.endswith('buckets/default'))\n\n if not bucket_put:\n subrequest = build_request(request, {\n 'method': 'PUT',\n 'path': '/buckets/%s' % bucket_id,\n 'body': {\"data\": {}},\n 'headers': {'If-None-Match': '*'.encode('utf-8')}\n })\n\n try:\n request.invoke_subrequest(subrequest)\n except HTTPPreconditionFailed:\n # The bucket already exists\n pass\n\n\ndef create_collection(request, bucket_id):\n subpath = request.matchdict['subpath']\n if subpath.startswith('/collections/'):\n collection_id = subpath.split('/')[2]\n collection_put = (request.method.lower() == 'put' and\n request.path.endswith(collection_id))\n if not collection_put:\n subrequest = build_request(request, {\n 'method': 'PUT',\n 'path': '/buckets/%s/collections/%s' % (\n bucket_id, collection_id),\n 'body': {\"data\": {}},\n 'headers': {'If-None-Match': '*'.encode('utf-8')}\n })\n try:\n request.invoke_subrequest(subrequest)\n except HTTPPreconditionFailed:\n # The collection already exists\n pass\n\n\n@view_config(route_name='default_bucket', permission=NO_PERMISSION_REQUIRED)\ndef default_bucket(request):\n if request.method.lower() == 'options':\n path = request.path.replace('default', 'unknown')\n subrequest = build_request(request, {\n 'method': 'OPTIONS',\n 'path': path\n })\n return request.invoke_subrequest(subrequest)\n\n if getattr(request, 'prefixed_userid', None) is None:\n raise HTTPForbidden # Pass through the forbidden_view_config\n\n settings = request.registry.settings\n hmac_secret = settings['cliquet.userid_hmac_secret']\n # Build the user unguessable bucket_id UUID from its user_id\n digest = hmac_digest(hmac_secret, request.prefixed_userid)\n bucket_id = text_type(UUID(digest[:32]))\n path = request.path.replace('default', bucket_id)\n querystring = request.url[(request.url.index(request.path) +\n len(request.path)):]\n\n # Make sure bucket exists\n create_bucket(request, bucket_id)\n\n # Make sure the collection exists\n create_collection(request, bucket_id)\n\n subrequest = build_request(request, {\n 'method': request.method,\n 'path': path + querystring,\n 'body': request.body\n })\n\n try:\n response = request.invoke_subrequest(subrequest)\n except HTTPException as error:\n response = reapply_cors(subrequest, error)\n return response\n\n\[email protected](name='bucket',\n collection_methods=('GET',),\n collection_path='/buckets',\n record_path='/buckets/{{id}}')\nclass Bucket(resource.ProtectedResource):\n permissions = ('read', 'write', 'collection:create', 'group:create')\n\n def __init__(self, *args, **kwargs):\n super(Bucket, self).__init__(*args, **kwargs)\n self.collection.id_generator = NameGenerator()\n\n def get_parent_id(self, request):\n # Buckets are not isolated by user, unlike Cliquet resources.\n return ''\n\n def delete(self):\n result = super(Bucket, self).delete()\n\n # Delete groups.\n storage = self.collection.storage\n parent_id = '/buckets/%s' % self.record_id\n storage.delete_all(collection_id='group', parent_id=parent_id)\n\n # Delete collections.\n deleted = storage.delete_all(collection_id='collection',\n parent_id=parent_id)\n\n # Delete records.\n id_field = self.collection.id_field\n for collection in deleted:\n parent_id = '/buckets/%s/collections/%s' % (self.record_id,\n collection[id_field])\n storage.delete_all(collection_id='record', parent_id=parent_id)\n\n return result\n", "path": "kinto/views/buckets.py"}]} | 1,590 | 231 |
gh_patches_debug_25220 | rasdani/github-patches | git_diff | pytorch__examples-189 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[super_resolution]
def _get_orthogonal_init_weights(weights):
fan_out = weights.size(0)
fan_in = weights.size(1) * weights.size(2) * weights.size(3)
u, _, v = svd(normal(0.0, 1.0, (fan_out, fan_in)), full_matrices=False)
if u.shape == (fan_out, fan_in):
return torch.Tensor(u.reshape(weights.size()))
else:
return torch.Tensor(v.reshape(weights.size()))
Why do the above operation?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `super_resolution/model.py`
Content:
```
1 import torch
2 import torch.nn as nn
3 from numpy.random import normal
4 from numpy.linalg import svd
5 from math import sqrt
6
7
8 def _get_orthogonal_init_weights(weights):
9 fan_out = weights.size(0)
10 fan_in = weights.size(1) * weights.size(2) * weights.size(3)
11
12 u, _, v = svd(normal(0.0, 1.0, (fan_out, fan_in)), full_matrices=False)
13
14 if u.shape == (fan_out, fan_in):
15 return torch.Tensor(u.reshape(weights.size()))
16 else:
17 return torch.Tensor(v.reshape(weights.size()))
18
19
20 class Net(nn.Module):
21 def __init__(self, upscale_factor):
22 super(Net, self).__init__()
23
24 self.relu = nn.ReLU()
25 self.conv1 = nn.Conv2d(1, 64, (5, 5), (1, 1), (2, 2))
26 self.conv2 = nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1))
27 self.conv3 = nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1))
28 self.conv4 = nn.Conv2d(32, upscale_factor ** 2, (3, 3), (1, 1), (1, 1))
29 self.pixel_shuffle = nn.PixelShuffle(upscale_factor)
30
31 self._initialize_weights()
32
33 def forward(self, x):
34 x = self.relu(self.conv1(x))
35 x = self.relu(self.conv2(x))
36 x = self.relu(self.conv3(x))
37 x = self.pixel_shuffle(self.conv4(x))
38 return x
39
40 def _initialize_weights(self):
41 self.conv1.weight.data.copy_(_get_orthogonal_init_weights(self.conv1.weight) * sqrt(2))
42 self.conv2.weight.data.copy_(_get_orthogonal_init_weights(self.conv2.weight) * sqrt(2))
43 self.conv3.weight.data.copy_(_get_orthogonal_init_weights(self.conv3.weight) * sqrt(2))
44 self.conv4.weight.data.copy_(_get_orthogonal_init_weights(self.conv4.weight))
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/super_resolution/model.py b/super_resolution/model.py
--- a/super_resolution/model.py
+++ b/super_resolution/model.py
@@ -1,22 +1,11 @@
import torch
import torch.nn as nn
+import torch.nn.init as init
from numpy.random import normal
from numpy.linalg import svd
from math import sqrt
-def _get_orthogonal_init_weights(weights):
- fan_out = weights.size(0)
- fan_in = weights.size(1) * weights.size(2) * weights.size(3)
-
- u, _, v = svd(normal(0.0, 1.0, (fan_out, fan_in)), full_matrices=False)
-
- if u.shape == (fan_out, fan_in):
- return torch.Tensor(u.reshape(weights.size()))
- else:
- return torch.Tensor(v.reshape(weights.size()))
-
-
class Net(nn.Module):
def __init__(self, upscale_factor):
super(Net, self).__init__()
@@ -38,7 +27,7 @@
return x
def _initialize_weights(self):
- self.conv1.weight.data.copy_(_get_orthogonal_init_weights(self.conv1.weight) * sqrt(2))
- self.conv2.weight.data.copy_(_get_orthogonal_init_weights(self.conv2.weight) * sqrt(2))
- self.conv3.weight.data.copy_(_get_orthogonal_init_weights(self.conv3.weight) * sqrt(2))
- self.conv4.weight.data.copy_(_get_orthogonal_init_weights(self.conv4.weight))
+ init.orthogonal(self.conv1.weight, init.gain('relu'))
+ init.orthogonal(self.conv2.weight, init.gain('relu'))
+ init.orthogonal(self.conv3.weight, init.gain('relu'))
+ init.orthogonal(self.conv4.weight)
| {"golden_diff": "diff --git a/super_resolution/model.py b/super_resolution/model.py\n--- a/super_resolution/model.py\n+++ b/super_resolution/model.py\n@@ -1,22 +1,11 @@\n import torch\n import torch.nn as nn\n+import torch.nn.init as init\n from numpy.random import normal\n from numpy.linalg import svd\n from math import sqrt\n \n \n-def _get_orthogonal_init_weights(weights):\n- fan_out = weights.size(0)\n- fan_in = weights.size(1) * weights.size(2) * weights.size(3)\n-\n- u, _, v = svd(normal(0.0, 1.0, (fan_out, fan_in)), full_matrices=False)\n-\n- if u.shape == (fan_out, fan_in):\n- return torch.Tensor(u.reshape(weights.size()))\n- else:\n- return torch.Tensor(v.reshape(weights.size()))\n-\n-\n class Net(nn.Module):\n def __init__(self, upscale_factor):\n super(Net, self).__init__()\n@@ -38,7 +27,7 @@\n return x\n \n def _initialize_weights(self):\n- self.conv1.weight.data.copy_(_get_orthogonal_init_weights(self.conv1.weight) * sqrt(2))\n- self.conv2.weight.data.copy_(_get_orthogonal_init_weights(self.conv2.weight) * sqrt(2))\n- self.conv3.weight.data.copy_(_get_orthogonal_init_weights(self.conv3.weight) * sqrt(2))\n- self.conv4.weight.data.copy_(_get_orthogonal_init_weights(self.conv4.weight))\n+ init.orthogonal(self.conv1.weight, init.gain('relu'))\n+ init.orthogonal(self.conv2.weight, init.gain('relu'))\n+ init.orthogonal(self.conv3.weight, init.gain('relu'))\n+ init.orthogonal(self.conv4.weight)\n", "issue": "[super_resolution]\ndef _get_orthogonal_init_weights(weights):\r\n fan_out = weights.size(0)\r\n fan_in = weights.size(1) * weights.size(2) * weights.size(3)\r\n u, _, v = svd(normal(0.0, 1.0, (fan_out, fan_in)), full_matrices=False)\r\n if u.shape == (fan_out, fan_in):\r\n return torch.Tensor(u.reshape(weights.size()))\r\n else:\r\n return torch.Tensor(v.reshape(weights.size()))\r\n\r\nWhy do the above operation\uff1f\n", "before_files": [{"content": "import torch\nimport torch.nn as nn\nfrom numpy.random import normal\nfrom numpy.linalg import svd\nfrom math import sqrt\n\n\ndef _get_orthogonal_init_weights(weights):\n fan_out = weights.size(0)\n fan_in = weights.size(1) * weights.size(2) * weights.size(3)\n\n u, _, v = svd(normal(0.0, 1.0, (fan_out, fan_in)), full_matrices=False)\n\n if u.shape == (fan_out, fan_in):\n return torch.Tensor(u.reshape(weights.size()))\n else:\n return torch.Tensor(v.reshape(weights.size()))\n\n\nclass Net(nn.Module):\n def __init__(self, upscale_factor):\n super(Net, self).__init__()\n\n self.relu = nn.ReLU()\n self.conv1 = nn.Conv2d(1, 64, (5, 5), (1, 1), (2, 2))\n self.conv2 = nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1))\n self.conv3 = nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1))\n self.conv4 = nn.Conv2d(32, upscale_factor ** 2, (3, 3), (1, 1), (1, 1))\n self.pixel_shuffle = nn.PixelShuffle(upscale_factor)\n\n self._initialize_weights()\n\n def forward(self, x):\n x = self.relu(self.conv1(x))\n x = self.relu(self.conv2(x))\n x = self.relu(self.conv3(x))\n x = self.pixel_shuffle(self.conv4(x))\n return x\n\n def _initialize_weights(self):\n self.conv1.weight.data.copy_(_get_orthogonal_init_weights(self.conv1.weight) * sqrt(2))\n self.conv2.weight.data.copy_(_get_orthogonal_init_weights(self.conv2.weight) * sqrt(2))\n self.conv3.weight.data.copy_(_get_orthogonal_init_weights(self.conv3.weight) * sqrt(2))\n self.conv4.weight.data.copy_(_get_orthogonal_init_weights(self.conv4.weight))\n", "path": "super_resolution/model.py"}], "after_files": [{"content": "import torch\nimport torch.nn as nn\nimport torch.nn.init as init\nfrom numpy.random import normal\nfrom numpy.linalg import svd\nfrom math import sqrt\n\n\nclass Net(nn.Module):\n def __init__(self, upscale_factor):\n super(Net, self).__init__()\n\n self.relu = nn.ReLU()\n self.conv1 = nn.Conv2d(1, 64, (5, 5), (1, 1), (2, 2))\n self.conv2 = nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1))\n self.conv3 = nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1))\n self.conv4 = nn.Conv2d(32, upscale_factor ** 2, (3, 3), (1, 1), (1, 1))\n self.pixel_shuffle = nn.PixelShuffle(upscale_factor)\n\n self._initialize_weights()\n\n def forward(self, x):\n x = self.relu(self.conv1(x))\n x = self.relu(self.conv2(x))\n x = self.relu(self.conv3(x))\n x = self.pixel_shuffle(self.conv4(x))\n return x\n\n def _initialize_weights(self):\n init.orthogonal(self.conv1.weight, init.gain('relu'))\n init.orthogonal(self.conv2.weight, init.gain('relu'))\n init.orthogonal(self.conv3.weight, init.gain('relu'))\n init.orthogonal(self.conv4.weight)\n", "path": "super_resolution/model.py"}]} | 939 | 402 |
gh_patches_debug_10562 | rasdani/github-patches | git_diff | plotly__plotly.py-2132 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plotly.express import raises ModuleNotFound in environment without pandas.
Importing plotly.express when pandas is not available raises `ModuleNotFoundError: No module named 'pandas'`, instead of the intended `ImportError: Plotly express requires pandas to be installed.`
This happens on `from ._imshow import imshow`.
Perhaps this import should be moved below the code that will output a more helpful message?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `packages/python/plotly/plotly/express/__init__.py`
Content:
```
1 """
2 `plotly.express` is a terse, consistent, high-level wrapper around `plotly.graph_objects`
3 for rapid data exploration and figure generation. Learn more at https://plotly.express/
4 """
5 from __future__ import absolute_import
6 from plotly import optional_imports
7 from ._imshow import imshow
8
9 pd = optional_imports.get_module("pandas")
10 if pd is None:
11 raise ImportError(
12 """\
13 Plotly express requires pandas to be installed."""
14 )
15
16 from ._chart_types import ( # noqa: F401
17 scatter,
18 scatter_3d,
19 scatter_polar,
20 scatter_ternary,
21 scatter_mapbox,
22 scatter_geo,
23 line,
24 line_3d,
25 line_polar,
26 line_ternary,
27 line_mapbox,
28 line_geo,
29 area,
30 bar,
31 bar_polar,
32 violin,
33 box,
34 strip,
35 histogram,
36 scatter_matrix,
37 parallel_coordinates,
38 parallel_categories,
39 choropleth,
40 density_contour,
41 density_heatmap,
42 pie,
43 sunburst,
44 treemap,
45 funnel,
46 funnel_area,
47 choropleth_mapbox,
48 density_mapbox,
49 )
50
51
52 from ._core import ( # noqa: F401
53 set_mapbox_access_token,
54 defaults,
55 get_trendline_results,
56 )
57
58 from . import data, colors # noqa: F401
59
60 __all__ = [
61 "scatter",
62 "scatter_3d",
63 "scatter_polar",
64 "scatter_ternary",
65 "scatter_mapbox",
66 "scatter_geo",
67 "scatter_matrix",
68 "density_contour",
69 "density_heatmap",
70 "density_mapbox",
71 "line",
72 "line_3d",
73 "line_polar",
74 "line_ternary",
75 "line_mapbox",
76 "line_geo",
77 "parallel_coordinates",
78 "parallel_categories",
79 "area",
80 "bar",
81 "bar_polar",
82 "violin",
83 "box",
84 "strip",
85 "histogram",
86 "choropleth",
87 "choropleth_mapbox",
88 "pie",
89 "sunburst",
90 "treemap",
91 "funnel",
92 "funnel_area",
93 "imshow",
94 "data",
95 "colors",
96 "set_mapbox_access_token",
97 "get_trendline_results",
98 ]
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/packages/python/plotly/plotly/express/__init__.py b/packages/python/plotly/plotly/express/__init__.py
--- a/packages/python/plotly/plotly/express/__init__.py
+++ b/packages/python/plotly/plotly/express/__init__.py
@@ -4,7 +4,6 @@
"""
from __future__ import absolute_import
from plotly import optional_imports
-from ._imshow import imshow
pd = optional_imports.get_module("pandas")
if pd is None:
@@ -13,6 +12,7 @@
Plotly express requires pandas to be installed."""
)
+from ._imshow import imshow
from ._chart_types import ( # noqa: F401
scatter,
scatter_3d,
| {"golden_diff": "diff --git a/packages/python/plotly/plotly/express/__init__.py b/packages/python/plotly/plotly/express/__init__.py\n--- a/packages/python/plotly/plotly/express/__init__.py\n+++ b/packages/python/plotly/plotly/express/__init__.py\n@@ -4,7 +4,6 @@\n \"\"\"\n from __future__ import absolute_import\n from plotly import optional_imports\n-from ._imshow import imshow\n \n pd = optional_imports.get_module(\"pandas\")\n if pd is None:\n@@ -13,6 +12,7 @@\n Plotly express requires pandas to be installed.\"\"\"\n )\n \n+from ._imshow import imshow\n from ._chart_types import ( # noqa: F401\n scatter,\n scatter_3d,\n", "issue": "plotly.express import raises ModuleNotFound in environment without pandas.\nImporting plotly.express when pandas is not available raises `ModuleNotFoundError: No module named 'pandas'`, instead of the intended `ImportError: Plotly express requires pandas to be installed.`\r\nThis happens on `from ._imshow import imshow`.\r\nPerhaps this import should be moved below the code that will output a more helpful message?\n", "before_files": [{"content": "\"\"\"\n`plotly.express` is a terse, consistent, high-level wrapper around `plotly.graph_objects`\nfor rapid data exploration and figure generation. Learn more at https://plotly.express/\n\"\"\"\nfrom __future__ import absolute_import\nfrom plotly import optional_imports\nfrom ._imshow import imshow\n\npd = optional_imports.get_module(\"pandas\")\nif pd is None:\n raise ImportError(\n \"\"\"\\\nPlotly express requires pandas to be installed.\"\"\"\n )\n\nfrom ._chart_types import ( # noqa: F401\n scatter,\n scatter_3d,\n scatter_polar,\n scatter_ternary,\n scatter_mapbox,\n scatter_geo,\n line,\n line_3d,\n line_polar,\n line_ternary,\n line_mapbox,\n line_geo,\n area,\n bar,\n bar_polar,\n violin,\n box,\n strip,\n histogram,\n scatter_matrix,\n parallel_coordinates,\n parallel_categories,\n choropleth,\n density_contour,\n density_heatmap,\n pie,\n sunburst,\n treemap,\n funnel,\n funnel_area,\n choropleth_mapbox,\n density_mapbox,\n)\n\n\nfrom ._core import ( # noqa: F401\n set_mapbox_access_token,\n defaults,\n get_trendline_results,\n)\n\nfrom . import data, colors # noqa: F401\n\n__all__ = [\n \"scatter\",\n \"scatter_3d\",\n \"scatter_polar\",\n \"scatter_ternary\",\n \"scatter_mapbox\",\n \"scatter_geo\",\n \"scatter_matrix\",\n \"density_contour\",\n \"density_heatmap\",\n \"density_mapbox\",\n \"line\",\n \"line_3d\",\n \"line_polar\",\n \"line_ternary\",\n \"line_mapbox\",\n \"line_geo\",\n \"parallel_coordinates\",\n \"parallel_categories\",\n \"area\",\n \"bar\",\n \"bar_polar\",\n \"violin\",\n \"box\",\n \"strip\",\n \"histogram\",\n \"choropleth\",\n \"choropleth_mapbox\",\n \"pie\",\n \"sunburst\",\n \"treemap\",\n \"funnel\",\n \"funnel_area\",\n \"imshow\",\n \"data\",\n \"colors\",\n \"set_mapbox_access_token\",\n \"get_trendline_results\",\n]\n", "path": "packages/python/plotly/plotly/express/__init__.py"}], "after_files": [{"content": "\"\"\"\n`plotly.express` is a terse, consistent, high-level wrapper around `plotly.graph_objects`\nfor rapid data exploration and figure generation. Learn more at https://plotly.express/\n\"\"\"\nfrom __future__ import absolute_import\nfrom plotly import optional_imports\n\npd = optional_imports.get_module(\"pandas\")\nif pd is None:\n raise ImportError(\n \"\"\"\\\nPlotly express requires pandas to be installed.\"\"\"\n )\n\nfrom ._imshow import imshow\nfrom ._chart_types import ( # noqa: F401\n scatter,\n scatter_3d,\n scatter_polar,\n scatter_ternary,\n scatter_mapbox,\n scatter_geo,\n line,\n line_3d,\n line_polar,\n line_ternary,\n line_mapbox,\n line_geo,\n area,\n bar,\n bar_polar,\n violin,\n box,\n strip,\n histogram,\n scatter_matrix,\n parallel_coordinates,\n parallel_categories,\n choropleth,\n density_contour,\n density_heatmap,\n pie,\n sunburst,\n treemap,\n funnel,\n funnel_area,\n choropleth_mapbox,\n density_mapbox,\n)\n\n\nfrom ._core import ( # noqa: F401\n set_mapbox_access_token,\n defaults,\n get_trendline_results,\n)\n\nfrom . import data, colors # noqa: F401\n\n__all__ = [\n \"scatter\",\n \"scatter_3d\",\n \"scatter_polar\",\n \"scatter_ternary\",\n \"scatter_mapbox\",\n \"scatter_geo\",\n \"scatter_matrix\",\n \"density_contour\",\n \"density_heatmap\",\n \"density_mapbox\",\n \"line\",\n \"line_3d\",\n \"line_polar\",\n \"line_ternary\",\n \"line_mapbox\",\n \"line_geo\",\n \"parallel_coordinates\",\n \"parallel_categories\",\n \"area\",\n \"bar\",\n \"bar_polar\",\n \"violin\",\n \"box\",\n \"strip\",\n \"histogram\",\n \"choropleth\",\n \"choropleth_mapbox\",\n \"pie\",\n \"sunburst\",\n \"treemap\",\n \"funnel\",\n \"funnel_area\",\n \"imshow\",\n \"data\",\n \"colors\",\n \"set_mapbox_access_token\",\n \"get_trendline_results\",\n]\n", "path": "packages/python/plotly/plotly/express/__init__.py"}]} | 1,053 | 172 |
gh_patches_debug_9121 | rasdani/github-patches | git_diff | OCHA-DAP__hdx-ckan-1053 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Org Admin: Dataset management page is broken
Log in as a sysadmin user.
Go to:
http://data.hdx.rwlabs.org/organization/bulk_process/ocha-fiss-geneva
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ckanext-hdx_orgs/ckanext/hdx_orgs/plugin.py`
Content:
```
1 import logging
2 import ckan.plugins as plugins
3 import ckan.plugins.toolkit as tk
4 import ckan.lib.plugins as lib_plugins
5
6 class HDXOrgFormPlugin(plugins.SingletonPlugin, lib_plugins.DefaultOrganizationForm):
7 plugins.implements(plugins.IConfigurer, inherit=False)
8 plugins.implements(plugins.IRoutes, inherit=True)
9 plugins.implements(plugins.IGroupForm, inherit=False)
10 plugins.implements(plugins.ITemplateHelpers, inherit=False)
11
12 num_times_new_template_called = 0
13 num_times_read_template_called = 0
14 num_times_edit_template_called = 0
15 num_times_search_template_called = 0
16 num_times_history_template_called = 0
17 num_times_package_form_called = 0
18 num_times_check_data_dict_called = 0
19 num_times_setup_template_variables_called = 0
20
21 def update_config(self, config):
22 tk.add_template_directory(config, 'templates')
23
24 def get_helpers(self):
25 return {}
26
27 def is_fallback(self):
28 return False
29
30 def group_types(self):
31 return ['organization']
32
33 def _modify_group_schema(self, schema):
34 schema.update({
35 'description':[tk.get_validator('not_empty')],
36 'org_url':[tk.get_validator('not_missing'), tk.get_converter('convert_to_extras')],
37 })
38 return schema
39
40 def form_to_db_schema(self):
41 schema = super(HDXOrgFormPlugin, self).form_to_db_schema()
42 schema = self._modify_group_schema(schema)
43 return schema
44
45 # def check_data_dict(self, data_dict):
46 # return super(HDXOrgFormPlugin, self).check_data_dict(self, data_dict)
47
48 def db_to_form_schema(self):
49 # There's a bug in dictionary validation when form isn't present
50 if tk.request.urlvars['action'] == 'index' or tk.request.urlvars['action'] == 'edit' or tk.request.urlvars['action'] == 'new':
51 schema = super(HDXOrgFormPlugin, self).form_to_db_schema()
52 schema.update({'description':[tk.get_validator('not_empty')] })
53 schema.update({'org_url':[tk.get_validator('not_missing'), tk.get_converter('convert_to_extras')]})
54 return schema
55 else:
56 return None
57
58 def before_map(self, map):
59 map.connect('user_dashboard', '/dashboard', controller='ckanext.hdx_orgs.dashboard:DashboardController', action='dashboard',
60 ckan_icon='list')
61 return map
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ckanext-hdx_orgs/ckanext/hdx_orgs/plugin.py b/ckanext-hdx_orgs/ckanext/hdx_orgs/plugin.py
--- a/ckanext-hdx_orgs/ckanext/hdx_orgs/plugin.py
+++ b/ckanext-hdx_orgs/ckanext/hdx_orgs/plugin.py
@@ -58,4 +58,6 @@
def before_map(self, map):
map.connect('user_dashboard', '/dashboard', controller='ckanext.hdx_orgs.dashboard:DashboardController', action='dashboard',
ckan_icon='list')
+ map.connect('organization_bulk_process', '/organization/bulk_process/{org_id}', controller='organization', action='index')
+ map.connect('organization_bulk_process_no_id', '/organization/bulk_process', controller='organization', action='index')
return map
| {"golden_diff": "diff --git a/ckanext-hdx_orgs/ckanext/hdx_orgs/plugin.py b/ckanext-hdx_orgs/ckanext/hdx_orgs/plugin.py\n--- a/ckanext-hdx_orgs/ckanext/hdx_orgs/plugin.py\n+++ b/ckanext-hdx_orgs/ckanext/hdx_orgs/plugin.py\n@@ -58,4 +58,6 @@\n def before_map(self, map):\n map.connect('user_dashboard', '/dashboard', controller='ckanext.hdx_orgs.dashboard:DashboardController', action='dashboard',\n ckan_icon='list')\n+ map.connect('organization_bulk_process', '/organization/bulk_process/{org_id}', controller='organization', action='index')\n+ map.connect('organization_bulk_process_no_id', '/organization/bulk_process', controller='organization', action='index')\n return map\n", "issue": "Org Admin: Dataset management page is broken\nLog in as a sysadmin user.\nGo to:\nhttp://data.hdx.rwlabs.org/organization/bulk_process/ocha-fiss-geneva\n\n", "before_files": [{"content": "import logging\nimport ckan.plugins as plugins\nimport ckan.plugins.toolkit as tk\nimport ckan.lib.plugins as lib_plugins\n\nclass HDXOrgFormPlugin(plugins.SingletonPlugin, lib_plugins.DefaultOrganizationForm):\n plugins.implements(plugins.IConfigurer, inherit=False)\n plugins.implements(plugins.IRoutes, inherit=True)\n plugins.implements(plugins.IGroupForm, inherit=False)\n plugins.implements(plugins.ITemplateHelpers, inherit=False)\n\n num_times_new_template_called = 0\n num_times_read_template_called = 0\n num_times_edit_template_called = 0\n num_times_search_template_called = 0\n num_times_history_template_called = 0\n num_times_package_form_called = 0\n num_times_check_data_dict_called = 0\n num_times_setup_template_variables_called = 0\n\n def update_config(self, config):\n tk.add_template_directory(config, 'templates')\n\n def get_helpers(self):\n return {}\n\n def is_fallback(self):\n return False\n\n def group_types(self):\n return ['organization']\n\n def _modify_group_schema(self, schema):\n schema.update({\n 'description':[tk.get_validator('not_empty')],\n 'org_url':[tk.get_validator('not_missing'), tk.get_converter('convert_to_extras')],\n })\n return schema\n\n def form_to_db_schema(self):\n schema = super(HDXOrgFormPlugin, self).form_to_db_schema()\n schema = self._modify_group_schema(schema)\n return schema\n \n# def check_data_dict(self, data_dict):\n# return super(HDXOrgFormPlugin, self).check_data_dict(self, data_dict)\n \n def db_to_form_schema(self):\n # There's a bug in dictionary validation when form isn't present\n if tk.request.urlvars['action'] == 'index' or tk.request.urlvars['action'] == 'edit' or tk.request.urlvars['action'] == 'new':\n schema = super(HDXOrgFormPlugin, self).form_to_db_schema()\n schema.update({'description':[tk.get_validator('not_empty')] })\n schema.update({'org_url':[tk.get_validator('not_missing'), tk.get_converter('convert_to_extras')]})\n return schema\n else:\n return None\n\n def before_map(self, map):\n map.connect('user_dashboard', '/dashboard', controller='ckanext.hdx_orgs.dashboard:DashboardController', action='dashboard',\n ckan_icon='list')\n return map\n", "path": "ckanext-hdx_orgs/ckanext/hdx_orgs/plugin.py"}], "after_files": [{"content": "import logging\nimport ckan.plugins as plugins\nimport ckan.plugins.toolkit as tk\nimport ckan.lib.plugins as lib_plugins\n\nclass HDXOrgFormPlugin(plugins.SingletonPlugin, lib_plugins.DefaultOrganizationForm):\n plugins.implements(plugins.IConfigurer, inherit=False)\n plugins.implements(plugins.IRoutes, inherit=True)\n plugins.implements(plugins.IGroupForm, inherit=False)\n plugins.implements(plugins.ITemplateHelpers, inherit=False)\n\n num_times_new_template_called = 0\n num_times_read_template_called = 0\n num_times_edit_template_called = 0\n num_times_search_template_called = 0\n num_times_history_template_called = 0\n num_times_package_form_called = 0\n num_times_check_data_dict_called = 0\n num_times_setup_template_variables_called = 0\n\n def update_config(self, config):\n tk.add_template_directory(config, 'templates')\n\n def get_helpers(self):\n return {}\n\n def is_fallback(self):\n return False\n\n def group_types(self):\n return ['organization']\n\n def _modify_group_schema(self, schema):\n schema.update({\n 'description':[tk.get_validator('not_empty')],\n 'org_url':[tk.get_validator('not_missing'), tk.get_converter('convert_to_extras')],\n })\n return schema\n\n def form_to_db_schema(self):\n schema = super(HDXOrgFormPlugin, self).form_to_db_schema()\n schema = self._modify_group_schema(schema)\n return schema\n \n# def check_data_dict(self, data_dict):\n# return super(HDXOrgFormPlugin, self).check_data_dict(self, data_dict)\n \n def db_to_form_schema(self):\n # There's a bug in dictionary validation when form isn't present\n if tk.request.urlvars['action'] == 'index' or tk.request.urlvars['action'] == 'edit' or tk.request.urlvars['action'] == 'new':\n schema = super(HDXOrgFormPlugin, self).form_to_db_schema()\n schema.update({'description':[tk.get_validator('not_empty')] })\n schema.update({'org_url':[tk.get_validator('not_missing'), tk.get_converter('convert_to_extras')]})\n return schema\n else:\n return None\n\n def before_map(self, map):\n map.connect('user_dashboard', '/dashboard', controller='ckanext.hdx_orgs.dashboard:DashboardController', action='dashboard',\n ckan_icon='list')\n map.connect('organization_bulk_process', '/organization/bulk_process/{org_id}', controller='organization', action='index')\n map.connect('organization_bulk_process_no_id', '/organization/bulk_process', controller='organization', action='index')\n return map\n", "path": "ckanext-hdx_orgs/ckanext/hdx_orgs/plugin.py"}]} | 962 | 194 |
gh_patches_debug_5383 | rasdani/github-patches | git_diff | quantumlib__Cirq-606 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Engine measurements are bytes but simulator measurements are bools
This causes code that works with the simulator to fail when given engine results. We should make these consistent.
Example code that works with simulator results but not engine results:
```python
a = np.zeros([repetition_count], dtype=np.bool)
a ^= results.measurements['x'][:, 0]
a ^= results.measurements['y'][:, 0]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cirq/google/programs.py`
Content:
```
1 # Copyright 2018 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Dict, Iterable, Sequence, Tuple, TYPE_CHECKING
15
16 import numpy as np
17
18 from cirq.api.google.v1 import operations_pb2
19 from cirq.google import xmon_gates, xmon_gate_ext
20 from cirq.google.xmon_device import XmonDevice
21 from cirq.schedules import Schedule, ScheduledOperation
22 from cirq.value import Timestamp
23
24 if TYPE_CHECKING:
25 from typing import Optional # pylint: disable=unused-import
26
27
28 def schedule_to_proto(schedule: Schedule) -> Iterable[operations_pb2.Operation]:
29 """Convert a schedule into protobufs.
30
31 Args:
32 schedule: The schedule to convert to protobufs. Must contain only gates
33 that can be cast to xmon gates.
34
35 Yields:
36 operations_pb2.Operation
37 """
38 last_time_picos = None # type: Optional[int]
39 for so in schedule.scheduled_operations:
40 gate = xmon_gate_ext.cast(xmon_gates.XmonGate, so.operation.gate)
41 op = gate.to_proto(*so.operation.qubits)
42 time_picos = so.time.raw_picos()
43 if last_time_picos is None:
44 op.incremental_delay_picoseconds = time_picos
45 else:
46 op.incremental_delay_picoseconds = time_picos - last_time_picos
47 last_time_picos = time_picos
48 yield op
49
50
51 def schedule_from_proto(
52 device: XmonDevice,
53 ops: Iterable[operations_pb2.Operation],
54 ) -> Schedule:
55 """Convert protobufs into a Schedule for the given device."""
56 scheduled_ops = []
57 last_time_picos = 0
58 for op in ops:
59 time_picos = last_time_picos + op.incremental_delay_picoseconds
60 last_time_picos = time_picos
61 xmon_op = xmon_gates.XmonGate.from_proto(op)
62 scheduled_ops.append(ScheduledOperation.op_at_on(
63 operation=xmon_op,
64 time=Timestamp(picos=time_picos),
65 device=device,
66 ))
67 return Schedule(device, scheduled_ops)
68
69
70 def pack_results(measurements: Sequence[Tuple[str, np.ndarray]]) -> bytes:
71 """Pack measurement results into a byte string.
72
73 Args:
74 measurements: A sequence of tuples, one for each measurement, consisting
75 of a string key and an array of boolean data. The data should be
76 a 2-D array indexed by (repetition, qubit_index). All data for all
77 measurements must have the same number of repetitions.
78
79 Returns:
80 Packed bytes, as described in the unpack_results docstring below.
81
82 Raises:
83 ValueError if the measurement data do not have the compatible shapes.
84 """
85 if not measurements:
86 return b''
87
88 shapes = [(key, np.shape(data)) for key, data in measurements]
89 if not all(len(shape) == 2 for _, shape in shapes):
90 raise ValueError("Expected 2-D data: shapes={}".format(shapes))
91
92 reps = shapes[0][1][0]
93 if not all(shape[0] == reps for _, shape in shapes):
94 raise ValueError(
95 "Expected same reps for all keys: shapes={}".format(shapes))
96
97 bits = np.hstack(np.asarray(data, dtype=bool) for _, data in measurements)
98 bits = bits.reshape(-1)
99
100 # Pad length to multiple of 8 if needed.
101 remainder = len(bits) % 8
102 if remainder:
103 bits = np.pad(bits, (0, 8 - remainder), 'constant')
104
105 # Pack in little-endian bit order.
106 bits = bits.reshape((-1, 8))[:, ::-1]
107 byte_arr = np.packbits(bits, axis=1).reshape(-1)
108
109 return byte_arr.tobytes()
110
111
112 def unpack_results(
113 data: bytes,
114 repetitions: int,
115 key_sizes: Sequence[Tuple[str, int]]
116 ) -> Dict[str, np.ndarray]:
117 """Unpack data from a bitstring into individual measurement results.
118
119 Args:
120 data: Packed measurement results, in the form <rep0><rep1>...
121 where each repetition is <key0_0>..<key0_{size0-1}><key1_0>...
122 with bits packed in little-endian order in each byte.
123 repetitions: number of repetitions.
124 key_sizes: Keys and sizes of the measurements in the data.
125
126 Returns:
127 Dict mapping measurement key to a 2D array of boolean results. Each
128 array has shape (repetitions, size) with size for that measurement.
129 """
130 bits_per_rep = sum(size for _, size in key_sizes)
131 total_bits = repetitions * bits_per_rep
132
133 byte_arr = np.frombuffer(data, dtype='uint8').reshape((len(data), 1))
134 bits = np.unpackbits(byte_arr, axis=1)[:, ::-1].reshape(-1)
135 bits = bits[:total_bits].reshape((repetitions, bits_per_rep))
136
137 results = {}
138 ofs = 0
139 for key, size in key_sizes:
140 results[key] = bits[:, ofs:ofs + size]
141 ofs += size
142
143 return results
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cirq/google/programs.py b/cirq/google/programs.py
--- a/cirq/google/programs.py
+++ b/cirq/google/programs.py
@@ -131,7 +131,7 @@
total_bits = repetitions * bits_per_rep
byte_arr = np.frombuffer(data, dtype='uint8').reshape((len(data), 1))
- bits = np.unpackbits(byte_arr, axis=1)[:, ::-1].reshape(-1)
+ bits = np.unpackbits(byte_arr, axis=1)[:, ::-1].reshape(-1).astype(bool)
bits = bits[:total_bits].reshape((repetitions, bits_per_rep))
results = {}
| {"golden_diff": "diff --git a/cirq/google/programs.py b/cirq/google/programs.py\n--- a/cirq/google/programs.py\n+++ b/cirq/google/programs.py\n@@ -131,7 +131,7 @@\n total_bits = repetitions * bits_per_rep\n \n byte_arr = np.frombuffer(data, dtype='uint8').reshape((len(data), 1))\n- bits = np.unpackbits(byte_arr, axis=1)[:, ::-1].reshape(-1)\n+ bits = np.unpackbits(byte_arr, axis=1)[:, ::-1].reshape(-1).astype(bool)\n bits = bits[:total_bits].reshape((repetitions, bits_per_rep))\n \n results = {}\n", "issue": "Engine measurements are bytes but simulator measurements are bools\nThis causes code that works with the simulator to fail when given engine results. We should make these consistent.\r\n\r\nExample code that works with simulator results but not engine results:\r\n\r\n```python\r\na = np.zeros([repetition_count], dtype=np.bool)\r\na ^= results.measurements['x'][:, 0]\r\na ^= results.measurements['y'][:, 0]\r\n```\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Dict, Iterable, Sequence, Tuple, TYPE_CHECKING\n\nimport numpy as np\n\nfrom cirq.api.google.v1 import operations_pb2\nfrom cirq.google import xmon_gates, xmon_gate_ext\nfrom cirq.google.xmon_device import XmonDevice\nfrom cirq.schedules import Schedule, ScheduledOperation\nfrom cirq.value import Timestamp\n\nif TYPE_CHECKING:\n from typing import Optional # pylint: disable=unused-import\n\n\ndef schedule_to_proto(schedule: Schedule) -> Iterable[operations_pb2.Operation]:\n \"\"\"Convert a schedule into protobufs.\n\n Args:\n schedule: The schedule to convert to protobufs. Must contain only gates\n that can be cast to xmon gates.\n\n Yields:\n operations_pb2.Operation\n \"\"\"\n last_time_picos = None # type: Optional[int]\n for so in schedule.scheduled_operations:\n gate = xmon_gate_ext.cast(xmon_gates.XmonGate, so.operation.gate)\n op = gate.to_proto(*so.operation.qubits)\n time_picos = so.time.raw_picos()\n if last_time_picos is None:\n op.incremental_delay_picoseconds = time_picos\n else:\n op.incremental_delay_picoseconds = time_picos - last_time_picos\n last_time_picos = time_picos\n yield op\n\n\ndef schedule_from_proto(\n device: XmonDevice,\n ops: Iterable[operations_pb2.Operation],\n) -> Schedule:\n \"\"\"Convert protobufs into a Schedule for the given device.\"\"\"\n scheduled_ops = []\n last_time_picos = 0\n for op in ops:\n time_picos = last_time_picos + op.incremental_delay_picoseconds\n last_time_picos = time_picos\n xmon_op = xmon_gates.XmonGate.from_proto(op)\n scheduled_ops.append(ScheduledOperation.op_at_on(\n operation=xmon_op,\n time=Timestamp(picos=time_picos),\n device=device,\n ))\n return Schedule(device, scheduled_ops)\n\n\ndef pack_results(measurements: Sequence[Tuple[str, np.ndarray]]) -> bytes:\n \"\"\"Pack measurement results into a byte string.\n\n Args:\n measurements: A sequence of tuples, one for each measurement, consisting\n of a string key and an array of boolean data. The data should be\n a 2-D array indexed by (repetition, qubit_index). All data for all\n measurements must have the same number of repetitions.\n\n Returns:\n Packed bytes, as described in the unpack_results docstring below.\n\n Raises:\n ValueError if the measurement data do not have the compatible shapes.\n \"\"\"\n if not measurements:\n return b''\n\n shapes = [(key, np.shape(data)) for key, data in measurements]\n if not all(len(shape) == 2 for _, shape in shapes):\n raise ValueError(\"Expected 2-D data: shapes={}\".format(shapes))\n\n reps = shapes[0][1][0]\n if not all(shape[0] == reps for _, shape in shapes):\n raise ValueError(\n \"Expected same reps for all keys: shapes={}\".format(shapes))\n\n bits = np.hstack(np.asarray(data, dtype=bool) for _, data in measurements)\n bits = bits.reshape(-1)\n\n # Pad length to multiple of 8 if needed.\n remainder = len(bits) % 8\n if remainder:\n bits = np.pad(bits, (0, 8 - remainder), 'constant')\n\n # Pack in little-endian bit order.\n bits = bits.reshape((-1, 8))[:, ::-1]\n byte_arr = np.packbits(bits, axis=1).reshape(-1)\n\n return byte_arr.tobytes()\n\n\ndef unpack_results(\n data: bytes,\n repetitions: int,\n key_sizes: Sequence[Tuple[str, int]]\n) -> Dict[str, np.ndarray]:\n \"\"\"Unpack data from a bitstring into individual measurement results.\n\n Args:\n data: Packed measurement results, in the form <rep0><rep1>...\n where each repetition is <key0_0>..<key0_{size0-1}><key1_0>...\n with bits packed in little-endian order in each byte.\n repetitions: number of repetitions.\n key_sizes: Keys and sizes of the measurements in the data.\n\n Returns:\n Dict mapping measurement key to a 2D array of boolean results. Each\n array has shape (repetitions, size) with size for that measurement.\n \"\"\"\n bits_per_rep = sum(size for _, size in key_sizes)\n total_bits = repetitions * bits_per_rep\n\n byte_arr = np.frombuffer(data, dtype='uint8').reshape((len(data), 1))\n bits = np.unpackbits(byte_arr, axis=1)[:, ::-1].reshape(-1)\n bits = bits[:total_bits].reshape((repetitions, bits_per_rep))\n\n results = {}\n ofs = 0\n for key, size in key_sizes:\n results[key] = bits[:, ofs:ofs + size]\n ofs += size\n\n return results\n", "path": "cirq/google/programs.py"}], "after_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Dict, Iterable, Sequence, Tuple, TYPE_CHECKING\n\nimport numpy as np\n\nfrom cirq.api.google.v1 import operations_pb2\nfrom cirq.google import xmon_gates, xmon_gate_ext\nfrom cirq.google.xmon_device import XmonDevice\nfrom cirq.schedules import Schedule, ScheduledOperation\nfrom cirq.value import Timestamp\n\nif TYPE_CHECKING:\n from typing import Optional # pylint: disable=unused-import\n\n\ndef schedule_to_proto(schedule: Schedule) -> Iterable[operations_pb2.Operation]:\n \"\"\"Convert a schedule into protobufs.\n\n Args:\n schedule: The schedule to convert to protobufs. Must contain only gates\n that can be cast to xmon gates.\n\n Yields:\n operations_pb2.Operation\n \"\"\"\n last_time_picos = None # type: Optional[int]\n for so in schedule.scheduled_operations:\n gate = xmon_gate_ext.cast(xmon_gates.XmonGate, so.operation.gate)\n op = gate.to_proto(*so.operation.qubits)\n time_picos = so.time.raw_picos()\n if last_time_picos is None:\n op.incremental_delay_picoseconds = time_picos\n else:\n op.incremental_delay_picoseconds = time_picos - last_time_picos\n last_time_picos = time_picos\n yield op\n\n\ndef schedule_from_proto(\n device: XmonDevice,\n ops: Iterable[operations_pb2.Operation],\n) -> Schedule:\n \"\"\"Convert protobufs into a Schedule for the given device.\"\"\"\n scheduled_ops = []\n last_time_picos = 0\n for op in ops:\n time_picos = last_time_picos + op.incremental_delay_picoseconds\n last_time_picos = time_picos\n xmon_op = xmon_gates.XmonGate.from_proto(op)\n scheduled_ops.append(ScheduledOperation.op_at_on(\n operation=xmon_op,\n time=Timestamp(picos=time_picos),\n device=device,\n ))\n return Schedule(device, scheduled_ops)\n\n\ndef pack_results(measurements: Sequence[Tuple[str, np.ndarray]]) -> bytes:\n \"\"\"Pack measurement results into a byte string.\n\n Args:\n measurements: A sequence of tuples, one for each measurement, consisting\n of a string key and an array of boolean data. The data should be\n a 2-D array indexed by (repetition, qubit_index). All data for all\n measurements must have the same number of repetitions.\n\n Returns:\n Packed bytes, as described in the unpack_results docstring below.\n\n Raises:\n ValueError if the measurement data do not have the compatible shapes.\n \"\"\"\n if not measurements:\n return b''\n\n shapes = [(key, np.shape(data)) for key, data in measurements]\n if not all(len(shape) == 2 for _, shape in shapes):\n raise ValueError(\"Expected 2-D data: shapes={}\".format(shapes))\n\n reps = shapes[0][1][0]\n if not all(shape[0] == reps for _, shape in shapes):\n raise ValueError(\n \"Expected same reps for all keys: shapes={}\".format(shapes))\n\n bits = np.hstack(np.asarray(data, dtype=bool) for _, data in measurements)\n bits = bits.reshape(-1)\n\n # Pad length to multiple of 8 if needed.\n remainder = len(bits) % 8\n if remainder:\n bits = np.pad(bits, (0, 8 - remainder), 'constant')\n\n # Pack in little-endian bit order.\n bits = bits.reshape((-1, 8))[:, ::-1]\n byte_arr = np.packbits(bits, axis=1).reshape(-1)\n\n return byte_arr.tobytes()\n\n\ndef unpack_results(\n data: bytes,\n repetitions: int,\n key_sizes: Sequence[Tuple[str, int]]\n) -> Dict[str, np.ndarray]:\n \"\"\"Unpack data from a bitstring into individual measurement results.\n\n Args:\n data: Packed measurement results, in the form <rep0><rep1>...\n where each repetition is <key0_0>..<key0_{size0-1}><key1_0>...\n with bits packed in little-endian order in each byte.\n repetitions: number of repetitions.\n key_sizes: Keys and sizes of the measurements in the data.\n\n Returns:\n Dict mapping measurement key to a 2D array of boolean results. Each\n array has shape (repetitions, size) with size for that measurement.\n \"\"\"\n bits_per_rep = sum(size for _, size in key_sizes)\n total_bits = repetitions * bits_per_rep\n\n byte_arr = np.frombuffer(data, dtype='uint8').reshape((len(data), 1))\n bits = np.unpackbits(byte_arr, axis=1)[:, ::-1].reshape(-1).astype(bool)\n bits = bits[:total_bits].reshape((repetitions, bits_per_rep))\n\n results = {}\n ofs = 0\n for key, size in key_sizes:\n results[key] = bits[:, ofs:ofs + size]\n ofs += size\n\n return results\n", "path": "cirq/google/programs.py"}]} | 1,908 | 154 |
gh_patches_debug_30606 | rasdani/github-patches | git_diff | streamlink__streamlink-5444 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plugins.lrt: stream is reported Forbidden (though plays if opened manually)
### Checklist
- [X] This is a plugin issue and not a different kind of issue
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
Latest stable release
### Description
When trying to open https://www.lrt.lt/mediateka/tiesiogiai/lrt-televizija or https://www.lrt.lt/mediateka/tiesiogiai/lrt-plius, an error is reported (see the log below).
However, if I try to manually pass the m3u8 URL mentioned in the error to `mpv`, like this (the URL taken from the log below, note the absence of the `%0A` at the end of it):
mpv https://af5dcb595ac445ab94d7da3af2ebb360.dlvr1.net/lrt_hd/master.m3u8?RxKc3mPWTMxjM1SuDkHZeW1Fw3jEx0oqyryrSQODiHo-Bs31UZVEBEPkLtrdbPKVKrlorJgTLUnSwqks_5Y1QrSQRYfbtlWddOuLrpnY9-kuyM_3QE_yBbqwzhre
...then, after a few ffmpeg errors and warnings, it does open.
The error started to appear a few days ago, worked perfectly before that (so, probably, they changed something at their side).
Thanks.
### Debug log
```text
[cli][debug] OS: Linux-5.15.0-76-generic-x86_64-with-glibc2.35
[cli][debug] Python: 3.11.3
[cli][debug] Streamlink: 5.5.1
[cli][debug] Dependencies:
[cli][debug] certifi: 2023.5.7
[cli][debug] isodate: 0.6.1
[cli][debug] lxml: 4.9.2
[cli][debug] pycountry: 22.3.5
[cli][debug] pycryptodome: 3.18.0
[cli][debug] PySocks: 1.7.1
[cli][debug] requests: 2.31.0
[cli][debug] urllib3: 2.0.2
[cli][debug] websocket-client: 1.5.2
[cli][debug] Arguments:
[cli][debug] url=https://www.lrt.lt/mediateka/tiesiogiai/lrt-televizija
[cli][debug] --loglevel=debug
[cli][info] Found matching plugin lrt for URL https://www.lrt.lt/mediateka/tiesiogiai/lrt-televizija
[utils.l10n][debug] Language code: en_US
error: Unable to open URL: https://af5dcb595ac445ab94d7da3af2ebb360.dlvr1.net/lrt_hd/master.m3u8?RxKc3mPWTMxjM1SuDkHZeW1Fw3jEx0oqyryrSQODiHo-Bs31UZVEBEPkLtrdbPKVKrlorJgTLUnSwqks_5Y1QrSQRYfbtlWddOuLrpnY9-kuyM_3QE_yBbqwzhre
(403 Client Error: Forbidden for url: https://af5dcb595ac445ab94d7da3af2ebb360.dlvr1.net/lrt_hd/master.m3u8?RxKc3mPWTMxjM1SuDkHZeW1Fw3jEx0oqyryrSQODiHo-Bs31UZVEBEPkLtrdbPKVKrlorJgTLUnSwqks_5Y1QrSQRYfbtlWddOuLrpnY9-kuyM_3QE_yBbqwzhre%0A)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/lrt.py`
Content:
```
1 """
2 $description Live TV channels from LRT, a Lithuanian public, state-owned broadcaster.
3 $url lrt.lt
4 $type live
5 """
6
7 import logging
8 import re
9
10 from streamlink.plugin import Plugin, pluginmatcher
11 from streamlink.stream.hls import HLSStream
12
13
14 log = logging.getLogger(__name__)
15
16
17 @pluginmatcher(re.compile(
18 r"https?://(?:www\.)?lrt\.lt/mediateka/tiesiogiai/",
19 ))
20 class LRT(Plugin):
21 _video_id_re = re.compile(r"""var\svideo_id\s*=\s*["'](?P<video_id>\w+)["']""")
22 API_URL = "https://www.lrt.lt/servisai/stream_url/live/get_live_url.php?channel={0}"
23
24 def _get_streams(self):
25 page = self.session.http.get(self.url)
26 m = self._video_id_re.search(page.text)
27 if m:
28 video_id = m.group("video_id")
29 data = self.session.http.get(self.API_URL.format(video_id)).json()
30 hls_url = data["response"]["data"]["content"]
31
32 yield from HLSStream.parse_variant_playlist(self.session, hls_url).items()
33 else:
34 log.debug("No match for video_id regex")
35
36
37 __plugin__ = LRT
38
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/plugins/lrt.py b/src/streamlink/plugins/lrt.py
--- a/src/streamlink/plugins/lrt.py
+++ b/src/streamlink/plugins/lrt.py
@@ -4,34 +4,42 @@
$type live
"""
-import logging
import re
from streamlink.plugin import Plugin, pluginmatcher
+from streamlink.plugin.api import validate
from streamlink.stream.hls import HLSStream
-log = logging.getLogger(__name__)
-
-
@pluginmatcher(re.compile(
r"https?://(?:www\.)?lrt\.lt/mediateka/tiesiogiai/",
))
class LRT(Plugin):
- _video_id_re = re.compile(r"""var\svideo_id\s*=\s*["'](?P<video_id>\w+)["']""")
- API_URL = "https://www.lrt.lt/servisai/stream_url/live/get_live_url.php?channel={0}"
-
def _get_streams(self):
- page = self.session.http.get(self.url)
- m = self._video_id_re.search(page.text)
- if m:
- video_id = m.group("video_id")
- data = self.session.http.get(self.API_URL.format(video_id)).json()
- hls_url = data["response"]["data"]["content"]
-
- yield from HLSStream.parse_variant_playlist(self.session, hls_url).items()
- else:
- log.debug("No match for video_id regex")
+ token_url = self.session.http.get(self.url, schema=validate.Schema(
+ re.compile(r"""var\s+tokenURL\s*=\s*(?P<q>["'])(?P<url>https://\S+)(?P=q)"""),
+ validate.none_or_all(validate.get("url")),
+ ))
+ if not token_url:
+ return
+
+ hls_url = self.session.http.get(token_url, schema=validate.Schema(
+ validate.parse_json(),
+ {
+ "response": {
+ "data": {
+ "content": validate.all(
+ str,
+ validate.transform(lambda url: url.strip()),
+ validate.url(path=validate.endswith(".m3u8")),
+ ),
+ },
+ },
+ },
+ validate.get(("response", "data", "content")),
+ ))
+
+ return HLSStream.parse_variant_playlist(self.session, hls_url)
__plugin__ = LRT
| {"golden_diff": "diff --git a/src/streamlink/plugins/lrt.py b/src/streamlink/plugins/lrt.py\n--- a/src/streamlink/plugins/lrt.py\n+++ b/src/streamlink/plugins/lrt.py\n@@ -4,34 +4,42 @@\n $type live\n \"\"\"\n \n-import logging\n import re\n \n from streamlink.plugin import Plugin, pluginmatcher\n+from streamlink.plugin.api import validate\n from streamlink.stream.hls import HLSStream\n \n \n-log = logging.getLogger(__name__)\n-\n-\n @pluginmatcher(re.compile(\n r\"https?://(?:www\\.)?lrt\\.lt/mediateka/tiesiogiai/\",\n ))\n class LRT(Plugin):\n- _video_id_re = re.compile(r\"\"\"var\\svideo_id\\s*=\\s*[\"'](?P<video_id>\\w+)[\"']\"\"\")\n- API_URL = \"https://www.lrt.lt/servisai/stream_url/live/get_live_url.php?channel={0}\"\n-\n def _get_streams(self):\n- page = self.session.http.get(self.url)\n- m = self._video_id_re.search(page.text)\n- if m:\n- video_id = m.group(\"video_id\")\n- data = self.session.http.get(self.API_URL.format(video_id)).json()\n- hls_url = data[\"response\"][\"data\"][\"content\"]\n-\n- yield from HLSStream.parse_variant_playlist(self.session, hls_url).items()\n- else:\n- log.debug(\"No match for video_id regex\")\n+ token_url = self.session.http.get(self.url, schema=validate.Schema(\n+ re.compile(r\"\"\"var\\s+tokenURL\\s*=\\s*(?P<q>[\"'])(?P<url>https://\\S+)(?P=q)\"\"\"),\n+ validate.none_or_all(validate.get(\"url\")),\n+ ))\n+ if not token_url:\n+ return\n+\n+ hls_url = self.session.http.get(token_url, schema=validate.Schema(\n+ validate.parse_json(),\n+ {\n+ \"response\": {\n+ \"data\": {\n+ \"content\": validate.all(\n+ str,\n+ validate.transform(lambda url: url.strip()),\n+ validate.url(path=validate.endswith(\".m3u8\")),\n+ ),\n+ },\n+ },\n+ },\n+ validate.get((\"response\", \"data\", \"content\")),\n+ ))\n+\n+ return HLSStream.parse_variant_playlist(self.session, hls_url)\n \n \n __plugin__ = LRT\n", "issue": "plugins.lrt: stream is reported Forbidden (though plays if opened manually)\n### Checklist\n\n- [X] This is a plugin issue and not a different kind of issue\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nLatest stable release\n\n### Description\n\nWhen trying to open https://www.lrt.lt/mediateka/tiesiogiai/lrt-televizija or https://www.lrt.lt/mediateka/tiesiogiai/lrt-plius, an error is reported (see the log below).\r\n\r\nHowever, if I try to manually pass the m3u8 URL mentioned in the error to `mpv`, like this (the URL taken from the log below, note the absence of the `%0A` at the end of it):\r\n\r\n mpv https://af5dcb595ac445ab94d7da3af2ebb360.dlvr1.net/lrt_hd/master.m3u8?RxKc3mPWTMxjM1SuDkHZeW1Fw3jEx0oqyryrSQODiHo-Bs31UZVEBEPkLtrdbPKVKrlorJgTLUnSwqks_5Y1QrSQRYfbtlWddOuLrpnY9-kuyM_3QE_yBbqwzhre\r\n\r\n...then, after a few ffmpeg errors and warnings, it does open.\r\n\r\nThe error started to appear a few days ago, worked perfectly before that (so, probably, they changed something at their side).\r\n\r\nThanks.\n\n### Debug log\n\n```text\n[cli][debug] OS: Linux-5.15.0-76-generic-x86_64-with-glibc2.35\r\n[cli][debug] Python: 3.11.3\r\n[cli][debug] Streamlink: 5.5.1\r\n[cli][debug] Dependencies:\r\n[cli][debug] certifi: 2023.5.7\r\n[cli][debug] isodate: 0.6.1\r\n[cli][debug] lxml: 4.9.2\r\n[cli][debug] pycountry: 22.3.5\r\n[cli][debug] pycryptodome: 3.18.0\r\n[cli][debug] PySocks: 1.7.1\r\n[cli][debug] requests: 2.31.0\r\n[cli][debug] urllib3: 2.0.2\r\n[cli][debug] websocket-client: 1.5.2\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://www.lrt.lt/mediateka/tiesiogiai/lrt-televizija\r\n[cli][debug] --loglevel=debug\r\n[cli][info] Found matching plugin lrt for URL https://www.lrt.lt/mediateka/tiesiogiai/lrt-televizija\r\n[utils.l10n][debug] Language code: en_US\r\nerror: Unable to open URL: https://af5dcb595ac445ab94d7da3af2ebb360.dlvr1.net/lrt_hd/master.m3u8?RxKc3mPWTMxjM1SuDkHZeW1Fw3jEx0oqyryrSQODiHo-Bs31UZVEBEPkLtrdbPKVKrlorJgTLUnSwqks_5Y1QrSQRYfbtlWddOuLrpnY9-kuyM_3QE_yBbqwzhre\r\n (403 Client Error: Forbidden for url: https://af5dcb595ac445ab94d7da3af2ebb360.dlvr1.net/lrt_hd/master.m3u8?RxKc3mPWTMxjM1SuDkHZeW1Fw3jEx0oqyryrSQODiHo-Bs31UZVEBEPkLtrdbPKVKrlorJgTLUnSwqks_5Y1QrSQRYfbtlWddOuLrpnY9-kuyM_3QE_yBbqwzhre%0A)\n```\n\n", "before_files": [{"content": "\"\"\"\n$description Live TV channels from LRT, a Lithuanian public, state-owned broadcaster.\n$url lrt.lt\n$type live\n\"\"\"\n\nimport logging\nimport re\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.stream.hls import HLSStream\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://(?:www\\.)?lrt\\.lt/mediateka/tiesiogiai/\",\n))\nclass LRT(Plugin):\n _video_id_re = re.compile(r\"\"\"var\\svideo_id\\s*=\\s*[\"'](?P<video_id>\\w+)[\"']\"\"\")\n API_URL = \"https://www.lrt.lt/servisai/stream_url/live/get_live_url.php?channel={0}\"\n\n def _get_streams(self):\n page = self.session.http.get(self.url)\n m = self._video_id_re.search(page.text)\n if m:\n video_id = m.group(\"video_id\")\n data = self.session.http.get(self.API_URL.format(video_id)).json()\n hls_url = data[\"response\"][\"data\"][\"content\"]\n\n yield from HLSStream.parse_variant_playlist(self.session, hls_url).items()\n else:\n log.debug(\"No match for video_id regex\")\n\n\n__plugin__ = LRT\n", "path": "src/streamlink/plugins/lrt.py"}], "after_files": [{"content": "\"\"\"\n$description Live TV channels from LRT, a Lithuanian public, state-owned broadcaster.\n$url lrt.lt\n$type live\n\"\"\"\n\nimport re\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.hls import HLSStream\n\n\n@pluginmatcher(re.compile(\n r\"https?://(?:www\\.)?lrt\\.lt/mediateka/tiesiogiai/\",\n))\nclass LRT(Plugin):\n def _get_streams(self):\n token_url = self.session.http.get(self.url, schema=validate.Schema(\n re.compile(r\"\"\"var\\s+tokenURL\\s*=\\s*(?P<q>[\"'])(?P<url>https://\\S+)(?P=q)\"\"\"),\n validate.none_or_all(validate.get(\"url\")),\n ))\n if not token_url:\n return\n\n hls_url = self.session.http.get(token_url, schema=validate.Schema(\n validate.parse_json(),\n {\n \"response\": {\n \"data\": {\n \"content\": validate.all(\n str,\n validate.transform(lambda url: url.strip()),\n validate.url(path=validate.endswith(\".m3u8\")),\n ),\n },\n },\n },\n validate.get((\"response\", \"data\", \"content\")),\n ))\n\n return HLSStream.parse_variant_playlist(self.session, hls_url)\n\n\n__plugin__ = LRT\n", "path": "src/streamlink/plugins/lrt.py"}]} | 1,662 | 525 |
gh_patches_debug_39191 | rasdani/github-patches | git_diff | wandb__wandb-516 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
use six for configparser for py2 compat
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wandb/settings.py`
Content:
```
1 import os
2 import configparser
3
4 import wandb.util as util
5 from wandb import core, env, wandb_dir
6
7
8 class Settings(object):
9 """Global W&B settings stored under $WANDB_CONFIG_DIR/settings.
10 """
11
12 DEFAULT_SECTION = "default"
13
14 def __init__(self, load_settings=True):
15 config_dir = os.environ.get(env.CONFIG_DIR, os.path.join(os.path.expanduser("~"), ".config", "wandb"))
16
17 # Ensure the config directory and settings file both exist.
18 util.mkdir_exists_ok(config_dir)
19 util.mkdir_exists_ok(wandb_dir())
20
21 self._global_settings_path = os.path.join(config_dir, 'settings')
22 self._global_settings = Settings._settings_wth_defaults({})
23
24 self._local_settings_path = os.path.join(wandb_dir(), 'settings')
25 self._local_settings = Settings._settings_wth_defaults({})
26
27 if load_settings:
28 self._global_settings.read([self._global_settings_path])
29 self._local_settings.read([self._local_settings_path])
30
31 def get(self, section, key, fallback=configparser._UNSET):
32 # Try the local settings first. If we can't find the key, then try the global settings.
33 # If a fallback is provided, return it if we can't find the key in either the local or global
34 # settings.
35 try:
36 return self._local_settings.get(section, key)
37 except configparser.NoOptionError:
38 return self._global_settings.get(section, key, fallback=fallback)
39
40 def set(self, section, key, value, globally=False):
41 def write_setting(settings, settings_path):
42 if not settings.has_section(section):
43 settings.add_section(section)
44 settings.set(section, key, str(value))
45 with open(settings_path, "w+") as f:
46 settings.write(f)
47
48 if globally:
49 write_setting(self._global_settings, self._global_settings_path)
50 else:
51 write_setting(self._local_settings, self._local_settings_path)
52
53 def clear(self, section, key, globally=False):
54 def clear_setting(settings, settings_path):
55 settings.remove_option(section, key)
56 with open(settings_path, "w+") as f:
57 settings.write(f)
58
59 if globally:
60 clear_setting(self._global_settings, self._global_settings_path)
61 else:
62 clear_setting(self._local_settings, self._local_settings_path)
63
64 def items(self, section=None):
65 section = section if section is not None else Settings.DEFAULT_SECTION
66
67 result = {'section': section}
68
69 try:
70 if section in self._global_settings.sections():
71 for option in self._global_settings.options(section):
72 result[option] = self._global_settings.get(section, option)
73 if section in self._local_settings.sections():
74 for option in self._local_settings.options(section):
75 result[option] = self._local_settings.get(section, option)
76 except configparser.InterpolationSyntaxError:
77 core.termwarn("Unable to parse settings file")
78
79 return result
80
81 @staticmethod
82 def _settings_wth_defaults(default_settings):
83 config = configparser.ConfigParser()
84 config.add_section(Settings.DEFAULT_SECTION)
85 for key, value in default_settings.items():
86 config.set(Settings.DEFAULT_SECTION, key, str(value))
87 return config
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wandb/settings.py b/wandb/settings.py
--- a/wandb/settings.py
+++ b/wandb/settings.py
@@ -1,5 +1,6 @@
import os
-import configparser
+
+from six.moves import configparser
import wandb.util as util
from wandb import core, env, wandb_dir
@@ -9,7 +10,9 @@
"""Global W&B settings stored under $WANDB_CONFIG_DIR/settings.
"""
- DEFAULT_SECTION = "default"
+ DEFAULT_SECTION = "client"
+
+ _UNSET = object()
def __init__(self, load_settings=True):
config_dir = os.environ.get(env.CONFIG_DIR, os.path.join(os.path.expanduser("~"), ".config", "wandb"))
@@ -19,23 +22,29 @@
util.mkdir_exists_ok(wandb_dir())
self._global_settings_path = os.path.join(config_dir, 'settings')
- self._global_settings = Settings._settings_wth_defaults({})
+ self._global_settings = Settings._settings()
self._local_settings_path = os.path.join(wandb_dir(), 'settings')
- self._local_settings = Settings._settings_wth_defaults({})
+ self._local_settings = Settings._settings()
if load_settings:
self._global_settings.read([self._global_settings_path])
self._local_settings.read([self._local_settings_path])
- def get(self, section, key, fallback=configparser._UNSET):
+ def get(self, section, key, fallback=_UNSET):
# Try the local settings first. If we can't find the key, then try the global settings.
# If a fallback is provided, return it if we can't find the key in either the local or global
# settings.
try:
return self._local_settings.get(section, key)
except configparser.NoOptionError:
- return self._global_settings.get(section, key, fallback=fallback)
+ try:
+ return self._global_settings.get(section, key)
+ except configparser.NoOptionError:
+ if fallback is not Settings._UNSET:
+ return fallback
+ else:
+ raise
def set(self, section, key, value, globally=False):
def write_setting(settings, settings_path):
@@ -79,7 +88,7 @@
return result
@staticmethod
- def _settings_wth_defaults(default_settings):
+ def _settings(default_settings={}):
config = configparser.ConfigParser()
config.add_section(Settings.DEFAULT_SECTION)
for key, value in default_settings.items():
| {"golden_diff": "diff --git a/wandb/settings.py b/wandb/settings.py\n--- a/wandb/settings.py\n+++ b/wandb/settings.py\n@@ -1,5 +1,6 @@\n import os\n-import configparser\n+\n+from six.moves import configparser\n \n import wandb.util as util\n from wandb import core, env, wandb_dir\n@@ -9,7 +10,9 @@\n \"\"\"Global W&B settings stored under $WANDB_CONFIG_DIR/settings.\n \"\"\"\n \n- DEFAULT_SECTION = \"default\"\n+ DEFAULT_SECTION = \"client\"\n+\n+ _UNSET = object()\n \n def __init__(self, load_settings=True):\n config_dir = os.environ.get(env.CONFIG_DIR, os.path.join(os.path.expanduser(\"~\"), \".config\", \"wandb\"))\n@@ -19,23 +22,29 @@\n util.mkdir_exists_ok(wandb_dir())\n \n self._global_settings_path = os.path.join(config_dir, 'settings')\n- self._global_settings = Settings._settings_wth_defaults({})\n+ self._global_settings = Settings._settings()\n \n self._local_settings_path = os.path.join(wandb_dir(), 'settings')\n- self._local_settings = Settings._settings_wth_defaults({})\n+ self._local_settings = Settings._settings()\n \n if load_settings:\n self._global_settings.read([self._global_settings_path])\n self._local_settings.read([self._local_settings_path])\n \n- def get(self, section, key, fallback=configparser._UNSET):\n+ def get(self, section, key, fallback=_UNSET):\n # Try the local settings first. If we can't find the key, then try the global settings.\n # If a fallback is provided, return it if we can't find the key in either the local or global\n # settings.\n try:\n return self._local_settings.get(section, key)\n except configparser.NoOptionError:\n- return self._global_settings.get(section, key, fallback=fallback)\n+ try:\n+ return self._global_settings.get(section, key)\n+ except configparser.NoOptionError:\n+ if fallback is not Settings._UNSET:\n+ return fallback\n+ else:\n+ raise\n \n def set(self, section, key, value, globally=False):\n def write_setting(settings, settings_path):\n@@ -79,7 +88,7 @@\n return result\n \n @staticmethod\n- def _settings_wth_defaults(default_settings):\n+ def _settings(default_settings={}):\n config = configparser.ConfigParser()\n config.add_section(Settings.DEFAULT_SECTION)\n for key, value in default_settings.items():\n", "issue": "use six for configparser for py2 compat\n\n", "before_files": [{"content": "import os\nimport configparser\n\nimport wandb.util as util\nfrom wandb import core, env, wandb_dir\n\n\nclass Settings(object):\n \"\"\"Global W&B settings stored under $WANDB_CONFIG_DIR/settings.\n \"\"\"\n\n DEFAULT_SECTION = \"default\"\n\n def __init__(self, load_settings=True):\n config_dir = os.environ.get(env.CONFIG_DIR, os.path.join(os.path.expanduser(\"~\"), \".config\", \"wandb\"))\n\n # Ensure the config directory and settings file both exist.\n util.mkdir_exists_ok(config_dir)\n util.mkdir_exists_ok(wandb_dir())\n\n self._global_settings_path = os.path.join(config_dir, 'settings')\n self._global_settings = Settings._settings_wth_defaults({})\n\n self._local_settings_path = os.path.join(wandb_dir(), 'settings')\n self._local_settings = Settings._settings_wth_defaults({})\n\n if load_settings:\n self._global_settings.read([self._global_settings_path])\n self._local_settings.read([self._local_settings_path])\n\n def get(self, section, key, fallback=configparser._UNSET):\n # Try the local settings first. If we can't find the key, then try the global settings.\n # If a fallback is provided, return it if we can't find the key in either the local or global\n # settings.\n try:\n return self._local_settings.get(section, key)\n except configparser.NoOptionError:\n return self._global_settings.get(section, key, fallback=fallback)\n\n def set(self, section, key, value, globally=False):\n def write_setting(settings, settings_path):\n if not settings.has_section(section):\n settings.add_section(section)\n settings.set(section, key, str(value))\n with open(settings_path, \"w+\") as f:\n settings.write(f)\n\n if globally:\n write_setting(self._global_settings, self._global_settings_path)\n else:\n write_setting(self._local_settings, self._local_settings_path)\n\n def clear(self, section, key, globally=False):\n def clear_setting(settings, settings_path):\n settings.remove_option(section, key)\n with open(settings_path, \"w+\") as f:\n settings.write(f)\n\n if globally:\n clear_setting(self._global_settings, self._global_settings_path)\n else:\n clear_setting(self._local_settings, self._local_settings_path)\n\n def items(self, section=None):\n section = section if section is not None else Settings.DEFAULT_SECTION\n\n result = {'section': section}\n\n try:\n if section in self._global_settings.sections():\n for option in self._global_settings.options(section):\n result[option] = self._global_settings.get(section, option)\n if section in self._local_settings.sections():\n for option in self._local_settings.options(section):\n result[option] = self._local_settings.get(section, option)\n except configparser.InterpolationSyntaxError:\n core.termwarn(\"Unable to parse settings file\")\n\n return result\n\n @staticmethod\n def _settings_wth_defaults(default_settings):\n config = configparser.ConfigParser()\n config.add_section(Settings.DEFAULT_SECTION)\n for key, value in default_settings.items():\n config.set(Settings.DEFAULT_SECTION, key, str(value))\n return config\n", "path": "wandb/settings.py"}], "after_files": [{"content": "import os\n\nfrom six.moves import configparser\n\nimport wandb.util as util\nfrom wandb import core, env, wandb_dir\n\n\nclass Settings(object):\n \"\"\"Global W&B settings stored under $WANDB_CONFIG_DIR/settings.\n \"\"\"\n\n DEFAULT_SECTION = \"client\"\n\n _UNSET = object()\n\n def __init__(self, load_settings=True):\n config_dir = os.environ.get(env.CONFIG_DIR, os.path.join(os.path.expanduser(\"~\"), \".config\", \"wandb\"))\n\n # Ensure the config directory and settings file both exist.\n util.mkdir_exists_ok(config_dir)\n util.mkdir_exists_ok(wandb_dir())\n\n self._global_settings_path = os.path.join(config_dir, 'settings')\n self._global_settings = Settings._settings()\n\n self._local_settings_path = os.path.join(wandb_dir(), 'settings')\n self._local_settings = Settings._settings()\n\n if load_settings:\n self._global_settings.read([self._global_settings_path])\n self._local_settings.read([self._local_settings_path])\n\n def get(self, section, key, fallback=_UNSET):\n # Try the local settings first. If we can't find the key, then try the global settings.\n # If a fallback is provided, return it if we can't find the key in either the local or global\n # settings.\n try:\n return self._local_settings.get(section, key)\n except configparser.NoOptionError:\n try:\n return self._global_settings.get(section, key)\n except configparser.NoOptionError:\n if fallback is not Settings._UNSET:\n return fallback\n else:\n raise\n\n def set(self, section, key, value, globally=False):\n def write_setting(settings, settings_path):\n if not settings.has_section(section):\n settings.add_section(section)\n settings.set(section, key, str(value))\n with open(settings_path, \"w+\") as f:\n settings.write(f)\n\n if globally:\n write_setting(self._global_settings, self._global_settings_path)\n else:\n write_setting(self._local_settings, self._local_settings_path)\n\n def clear(self, section, key, globally=False):\n def clear_setting(settings, settings_path):\n settings.remove_option(section, key)\n with open(settings_path, \"w+\") as f:\n settings.write(f)\n\n if globally:\n clear_setting(self._global_settings, self._global_settings_path)\n else:\n clear_setting(self._local_settings, self._local_settings_path)\n\n def items(self, section=None):\n section = section if section is not None else Settings.DEFAULT_SECTION\n\n result = {'section': section}\n\n try:\n if section in self._global_settings.sections():\n for option in self._global_settings.options(section):\n result[option] = self._global_settings.get(section, option)\n if section in self._local_settings.sections():\n for option in self._local_settings.options(section):\n result[option] = self._local_settings.get(section, option)\n except configparser.InterpolationSyntaxError:\n core.termwarn(\"Unable to parse settings file\")\n\n return result\n\n @staticmethod\n def _settings(default_settings={}):\n config = configparser.ConfigParser()\n config.add_section(Settings.DEFAULT_SECTION)\n for key, value in default_settings.items():\n config.set(Settings.DEFAULT_SECTION, key, str(value))\n return config\n", "path": "wandb/settings.py"}]} | 1,131 | 567 |
gh_patches_debug_24433 | rasdani/github-patches | git_diff | opensearch-project__opensearch-build-1652 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Only send `-d` to core components, instead of everything.
As of now, build workflow will not send `-d` if user does not specify `--distribution`.
However, if user specify things such as `--distribution rpm` and try to build core+all plugins,
`-d` will be sent to plugins as well.
The plugin build script does not know how to interpret `-d` thus fail.
```
+ echo 'Invalid option: -?'
Invalid option: -?
+ exit 1
2022-02-17 23:58:36 ERROR Error building common-utils, retry with: ./build.sh manifests/1.3.0/opensearch-1.3.0.yml --component common-utils
Traceback (most recent call last):
File "./src/run_build.py", line 79, in <module>
sys.exit(main())
File "./src/run_build.py", line 67, in main
builder.build(build_recorder)
File "/local/home/zhujiaxi/opensearch-build-peterzhuamazon/src/build_workflow/builder_from_source.py", line 49, in build
self.git_repo.execute(build_command)
File "/local/home/zhujiaxi/opensearch-build-peterzhuamazon/src/git/git_repository.py", line 83, in execute
subprocess.check_call(command, cwd=cwd, shell=True)
File "/usr/lib64/python3.7/subprocess.py", line 363, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command 'bash /local/home/zhujiaxi/opensearch-build-peterzhuamazon/scripts/components/common-utils/build.sh -v 1.3.0 -p linux -a x64 -d rpm -s false -o builds' returned non-zero exit status 1.
```
Need to add a condition where if component != OpenSearch/OpenSearch-Dashboards, then `-d` will not be sent even if not None.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/build_workflow/builder_from_source.py`
Content:
```
1 # SPDX-License-Identifier: Apache-2.0
2 #
3 # The OpenSearch Contributors require contributions made to
4 # this file be licensed under the Apache-2.0 license or a
5 # compatible open source license.
6
7 import os
8
9 from build_workflow.build_recorder import BuildRecorder
10 from build_workflow.builder import Builder
11 from git.git_repository import GitRepository
12 from paths.script_finder import ScriptFinder
13
14 """
15 This class is responsible for executing the build for a component and passing the results to a build recorder.
16 It will notify the build recorder of build information such as repository and git ref, and any artifacts generated by the build.
17 Artifacts found in "<build root>/artifacts/<maven|plugins|libs|dist|core-plugins>" will be recognized and recorded.
18 """
19
20
21 class BuilderFromSource(Builder):
22 def checkout(self, work_dir: str) -> None:
23 self.git_repo = GitRepository(
24 self.component.repository,
25 self.component.ref,
26 os.path.join(work_dir, self.component.name),
27 self.component.working_directory,
28 )
29
30 def build(self, build_recorder: BuildRecorder) -> None:
31 build_script = ScriptFinder.find_build_script(self.target.name, self.component.name, self.git_repo.working_directory)
32
33 build_command = " ".join(
34 filter(
35 None,
36 [
37 "bash",
38 build_script,
39 f"-v {self.target.version}",
40 f"-p {self.target.platform}",
41 f"-a {self.target.architecture}",
42 f"-d {self.target.distribution}" if self.target.distribution else None,
43 f"-s {str(self.target.snapshot).lower()}",
44 f"-o {self.output_path}",
45 ]
46 )
47 )
48
49 self.git_repo.execute(build_command)
50 build_recorder.record_component(self.component.name, self.git_repo)
51
52 def export_artifacts(self, build_recorder: BuildRecorder) -> None:
53 artifacts_path = os.path.join(self.git_repo.working_directory, self.output_path)
54 for artifact_type in ["maven", "dist", "plugins", "libs", "core-plugins"]:
55 for dir, _, files in os.walk(os.path.join(artifacts_path, artifact_type)):
56 for file_name in files:
57 absolute_path = os.path.join(dir, file_name)
58 relative_path = os.path.relpath(absolute_path, artifacts_path)
59 build_recorder.record_artifact(self.component.name, artifact_type, relative_path, absolute_path)
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/build_workflow/builder_from_source.py b/src/build_workflow/builder_from_source.py
--- a/src/build_workflow/builder_from_source.py
+++ b/src/build_workflow/builder_from_source.py
@@ -28,6 +28,11 @@
)
def build(self, build_recorder: BuildRecorder) -> None:
+
+ # List of components whose build scripts support `-d` parameter
+ # Bundled plugins do not need `-d` as they are java based zips
+ DISTRIBUTION_SUPPORTED_COMPONENTS = ["OpenSearch", "OpenSearch-Dashboards"]
+
build_script = ScriptFinder.find_build_script(self.target.name, self.component.name, self.git_repo.working_directory)
build_command = " ".join(
@@ -39,7 +44,7 @@
f"-v {self.target.version}",
f"-p {self.target.platform}",
f"-a {self.target.architecture}",
- f"-d {self.target.distribution}" if self.target.distribution else None,
+ f"-d {self.target.distribution}" if self.target.distribution and (self.component.name in DISTRIBUTION_SUPPORTED_COMPONENTS) else None,
f"-s {str(self.target.snapshot).lower()}",
f"-o {self.output_path}",
]
| {"golden_diff": "diff --git a/src/build_workflow/builder_from_source.py b/src/build_workflow/builder_from_source.py\n--- a/src/build_workflow/builder_from_source.py\n+++ b/src/build_workflow/builder_from_source.py\n@@ -28,6 +28,11 @@\n )\n \n def build(self, build_recorder: BuildRecorder) -> None:\n+\n+ # List of components whose build scripts support `-d` parameter\n+ # Bundled plugins do not need `-d` as they are java based zips\n+ DISTRIBUTION_SUPPORTED_COMPONENTS = [\"OpenSearch\", \"OpenSearch-Dashboards\"]\n+\n build_script = ScriptFinder.find_build_script(self.target.name, self.component.name, self.git_repo.working_directory)\n \n build_command = \" \".join(\n@@ -39,7 +44,7 @@\n f\"-v {self.target.version}\",\n f\"-p {self.target.platform}\",\n f\"-a {self.target.architecture}\",\n- f\"-d {self.target.distribution}\" if self.target.distribution else None,\n+ f\"-d {self.target.distribution}\" if self.target.distribution and (self.component.name in DISTRIBUTION_SUPPORTED_COMPONENTS) else None,\n f\"-s {str(self.target.snapshot).lower()}\",\n f\"-o {self.output_path}\",\n ]\n", "issue": "[BUG] Only send `-d` to core components, instead of everything.\nAs of now, build workflow will not send `-d` if user does not specify `--distribution`.\r\nHowever, if user specify things such as `--distribution rpm` and try to build core+all plugins,\r\n`-d` will be sent to plugins as well.\r\n\r\nThe plugin build script does not know how to interpret `-d` thus fail.\r\n```\r\n+ echo 'Invalid option: -?'\r\nInvalid option: -?\r\n+ exit 1\r\n2022-02-17 23:58:36 ERROR Error building common-utils, retry with: ./build.sh manifests/1.3.0/opensearch-1.3.0.yml --component common-utils\r\nTraceback (most recent call last):\r\n File \"./src/run_build.py\", line 79, in <module>\r\n sys.exit(main())\r\n File \"./src/run_build.py\", line 67, in main\r\n builder.build(build_recorder)\r\n File \"/local/home/zhujiaxi/opensearch-build-peterzhuamazon/src/build_workflow/builder_from_source.py\", line 49, in build\r\n self.git_repo.execute(build_command)\r\n File \"/local/home/zhujiaxi/opensearch-build-peterzhuamazon/src/git/git_repository.py\", line 83, in execute\r\n subprocess.check_call(command, cwd=cwd, shell=True)\r\n File \"/usr/lib64/python3.7/subprocess.py\", line 363, in check_call\r\n raise CalledProcessError(retcode, cmd)\r\nsubprocess.CalledProcessError: Command 'bash /local/home/zhujiaxi/opensearch-build-peterzhuamazon/scripts/components/common-utils/build.sh -v 1.3.0 -p linux -a x64 -d rpm -s false -o builds' returned non-zero exit status 1.\r\n```\r\n\r\nNeed to add a condition where if component != OpenSearch/OpenSearch-Dashboards, then `-d` will not be sent even if not None.\n", "before_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport os\n\nfrom build_workflow.build_recorder import BuildRecorder\nfrom build_workflow.builder import Builder\nfrom git.git_repository import GitRepository\nfrom paths.script_finder import ScriptFinder\n\n\"\"\"\nThis class is responsible for executing the build for a component and passing the results to a build recorder.\nIt will notify the build recorder of build information such as repository and git ref, and any artifacts generated by the build.\nArtifacts found in \"<build root>/artifacts/<maven|plugins|libs|dist|core-plugins>\" will be recognized and recorded.\n\"\"\"\n\n\nclass BuilderFromSource(Builder):\n def checkout(self, work_dir: str) -> None:\n self.git_repo = GitRepository(\n self.component.repository,\n self.component.ref,\n os.path.join(work_dir, self.component.name),\n self.component.working_directory,\n )\n\n def build(self, build_recorder: BuildRecorder) -> None:\n build_script = ScriptFinder.find_build_script(self.target.name, self.component.name, self.git_repo.working_directory)\n\n build_command = \" \".join(\n filter(\n None,\n [\n \"bash\",\n build_script,\n f\"-v {self.target.version}\",\n f\"-p {self.target.platform}\",\n f\"-a {self.target.architecture}\",\n f\"-d {self.target.distribution}\" if self.target.distribution else None,\n f\"-s {str(self.target.snapshot).lower()}\",\n f\"-o {self.output_path}\",\n ]\n )\n )\n\n self.git_repo.execute(build_command)\n build_recorder.record_component(self.component.name, self.git_repo)\n\n def export_artifacts(self, build_recorder: BuildRecorder) -> None:\n artifacts_path = os.path.join(self.git_repo.working_directory, self.output_path)\n for artifact_type in [\"maven\", \"dist\", \"plugins\", \"libs\", \"core-plugins\"]:\n for dir, _, files in os.walk(os.path.join(artifacts_path, artifact_type)):\n for file_name in files:\n absolute_path = os.path.join(dir, file_name)\n relative_path = os.path.relpath(absolute_path, artifacts_path)\n build_recorder.record_artifact(self.component.name, artifact_type, relative_path, absolute_path)\n", "path": "src/build_workflow/builder_from_source.py"}], "after_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport os\n\nfrom build_workflow.build_recorder import BuildRecorder\nfrom build_workflow.builder import Builder\nfrom git.git_repository import GitRepository\nfrom paths.script_finder import ScriptFinder\n\n\"\"\"\nThis class is responsible for executing the build for a component and passing the results to a build recorder.\nIt will notify the build recorder of build information such as repository and git ref, and any artifacts generated by the build.\nArtifacts found in \"<build root>/artifacts/<maven|plugins|libs|dist|core-plugins>\" will be recognized and recorded.\n\"\"\"\n\n\nclass BuilderFromSource(Builder):\n def checkout(self, work_dir: str) -> None:\n self.git_repo = GitRepository(\n self.component.repository,\n self.component.ref,\n os.path.join(work_dir, self.component.name),\n self.component.working_directory,\n )\n\n def build(self, build_recorder: BuildRecorder) -> None:\n\n # List of components whose build scripts support `-d` parameter\n # Bundled plugins do not need `-d` as they are java based zips\n DISTRIBUTION_SUPPORTED_COMPONENTS = [\"OpenSearch\", \"OpenSearch-Dashboards\"]\n\n build_script = ScriptFinder.find_build_script(self.target.name, self.component.name, self.git_repo.working_directory)\n\n build_command = \" \".join(\n filter(\n None,\n [\n \"bash\",\n build_script,\n f\"-v {self.target.version}\",\n f\"-p {self.target.platform}\",\n f\"-a {self.target.architecture}\",\n f\"-d {self.target.distribution}\" if self.target.distribution and (self.component.name in DISTRIBUTION_SUPPORTED_COMPONENTS) else None,\n f\"-s {str(self.target.snapshot).lower()}\",\n f\"-o {self.output_path}\",\n ]\n )\n )\n\n self.git_repo.execute(build_command)\n build_recorder.record_component(self.component.name, self.git_repo)\n\n def export_artifacts(self, build_recorder: BuildRecorder) -> None:\n artifacts_path = os.path.join(self.git_repo.working_directory, self.output_path)\n for artifact_type in [\"maven\", \"dist\", \"plugins\", \"libs\", \"core-plugins\"]:\n for dir, _, files in os.walk(os.path.join(artifacts_path, artifact_type)):\n for file_name in files:\n absolute_path = os.path.join(dir, file_name)\n relative_path = os.path.relpath(absolute_path, artifacts_path)\n build_recorder.record_artifact(self.component.name, artifact_type, relative_path, absolute_path)\n", "path": "src/build_workflow/builder_from_source.py"}]} | 1,325 | 278 |
gh_patches_debug_18245 | rasdani/github-patches | git_diff | streamlink__streamlink-338 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TVCatchup addon not working anymore
root@ovh2:/data# streamlink http://tvcatchup.com/watch/channel4
[cli][info] streamlink is running as root! Be careful!
[cli][info] Found matching plugin tvcatchup for URL http://tvcatchup.com/watch/channel4
error: No streams found on this URL: http://tvcatchup.com/watch/channel4
root@ovh2:/data# streamlink --plugins
[cli][info] streamlink is running as root! Be careful!
Loaded plugins: adultswim, afreeca, afreecatv, aftonbladet, alieztv, antenna, ard_live, ard_mediathek, artetv, atresplayer, azubutv, bambuser, beam, beattv, bigo, bilibili, bliptv, chaturbate, cinergroup, connectcast, crunchyroll, cybergame, dailymotion, dingittv, disney_de, dmcloud, dmcloud_embed, dogan, dogus, dommune, douyutv, dplay, drdk, euronews, expressen, filmon, filmon_us, foxtr, furstream, gaminglive, gomexp, goodgame, hitbox, itvplayer, kanal7, letontv, livecodingtv, livestation, livestream, media_ccc_de, mediaklikk, meerkat, mips, mlgtv, nhkworld, nineanime, nos, npo, nrk, oldlivestream, openrectv, orf_tvthek, pandatv, periscope, picarto, piczel, powerapp, rtlxl, rtve, ruv, seemeplay, servustv, speedrunslive, sportschau, ssh101, stream, streamboat, streamingvideoprovider, streamlive, streamme, streamupcom, svtplay, tga, tigerdile, trt, turkuvaz, tv360, tv3cat, tv4play, tv8, tvcatchup, tvplayer, twitch, ustreamtv, vaughnlive, veetle, vgtv, viagame, viasat, viasat_embed, vidio, wattv, webtv, weeb, younow, youtube, zdf_mediathek
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/tvcatchup.py`
Content:
```
1 import re
2
3 from streamlink.plugin import Plugin
4 from streamlink.plugin.api import http
5 from streamlink.stream import HLSStream
6
7 USER_AGENT = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
8 _url_re = re.compile("http://(?:www\.)?tvcatchup.com/watch/\w+")
9 _stream_re = re.compile(r"\"(?P<stream_url>https?://.*m3u8\?.*clientKey=[^\"]*)\";")
10
11
12 class TVCatchup(Plugin):
13 @classmethod
14 def can_handle_url(cls, url):
15 return _url_re.match(url)
16
17 def _get_streams(self):
18 """
19 Finds the streams from tvcatchup.com.
20 """
21 http.headers.update({"User-Agent": USER_AGENT})
22 res = http.get(self.url)
23
24 match = _stream_re.search(res.text, re.IGNORECASE | re.MULTILINE)
25
26 if match:
27 stream_url = match.groupdict()["stream_url"]
28
29 if stream_url:
30 if "_adp" in stream_url:
31 return HLSStream.parse_variant_playlist(self.session, stream_url)
32 else:
33 return {'576p': HLSStream(self.session, stream_url)}
34
35
36 __plugin__ = TVCatchup
37
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/plugins/tvcatchup.py b/src/streamlink/plugins/tvcatchup.py
--- a/src/streamlink/plugins/tvcatchup.py
+++ b/src/streamlink/plugins/tvcatchup.py
@@ -6,7 +6,7 @@
USER_AGENT = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
_url_re = re.compile("http://(?:www\.)?tvcatchup.com/watch/\w+")
-_stream_re = re.compile(r"\"(?P<stream_url>https?://.*m3u8\?.*clientKey=[^\"]*)\";")
+_stream_re = re.compile(r'''(?P<q>["'])(?P<stream_url>https?://.*m3u8\?.*clientKey=.*?)(?P=q)''')
class TVCatchup(Plugin):
@@ -24,7 +24,7 @@
match = _stream_re.search(res.text, re.IGNORECASE | re.MULTILINE)
if match:
- stream_url = match.groupdict()["stream_url"]
+ stream_url = match.group("stream_url")
if stream_url:
if "_adp" in stream_url:
| {"golden_diff": "diff --git a/src/streamlink/plugins/tvcatchup.py b/src/streamlink/plugins/tvcatchup.py\n--- a/src/streamlink/plugins/tvcatchup.py\n+++ b/src/streamlink/plugins/tvcatchup.py\n@@ -6,7 +6,7 @@\n \n USER_AGENT = \"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36\"\n _url_re = re.compile(\"http://(?:www\\.)?tvcatchup.com/watch/\\w+\")\n-_stream_re = re.compile(r\"\\\"(?P<stream_url>https?://.*m3u8\\?.*clientKey=[^\\\"]*)\\\";\")\n+_stream_re = re.compile(r'''(?P<q>[\"'])(?P<stream_url>https?://.*m3u8\\?.*clientKey=.*?)(?P=q)''')\n \n \n class TVCatchup(Plugin):\n@@ -24,7 +24,7 @@\n match = _stream_re.search(res.text, re.IGNORECASE | re.MULTILINE)\n \n if match:\n- stream_url = match.groupdict()[\"stream_url\"]\n+ stream_url = match.group(\"stream_url\")\n \n if stream_url:\n if \"_adp\" in stream_url:\n", "issue": "TVCatchup addon not working anymore\nroot@ovh2:/data# streamlink http://tvcatchup.com/watch/channel4\r\n[cli][info] streamlink is running as root! Be careful!\r\n[cli][info] Found matching plugin tvcatchup for URL http://tvcatchup.com/watch/channel4\r\nerror: No streams found on this URL: http://tvcatchup.com/watch/channel4\r\nroot@ovh2:/data# streamlink --plugins\r\n[cli][info] streamlink is running as root! Be careful!\r\nLoaded plugins: adultswim, afreeca, afreecatv, aftonbladet, alieztv, antenna, ard_live, ard_mediathek, artetv, atresplayer, azubutv, bambuser, beam, beattv, bigo, bilibili, bliptv, chaturbate, cinergroup, connectcast, crunchyroll, cybergame, dailymotion, dingittv, disney_de, dmcloud, dmcloud_embed, dogan, dogus, dommune, douyutv, dplay, drdk, euronews, expressen, filmon, filmon_us, foxtr, furstream, gaminglive, gomexp, goodgame, hitbox, itvplayer, kanal7, letontv, livecodingtv, livestation, livestream, media_ccc_de, mediaklikk, meerkat, mips, mlgtv, nhkworld, nineanime, nos, npo, nrk, oldlivestream, openrectv, orf_tvthek, pandatv, periscope, picarto, piczel, powerapp, rtlxl, rtve, ruv, seemeplay, servustv, speedrunslive, sportschau, ssh101, stream, streamboat, streamingvideoprovider, streamlive, streamme, streamupcom, svtplay, tga, tigerdile, trt, turkuvaz, tv360, tv3cat, tv4play, tv8, tvcatchup, tvplayer, twitch, ustreamtv, vaughnlive, veetle, vgtv, viagame, viasat, viasat_embed, vidio, wattv, webtv, weeb, younow, youtube, zdf_mediathek\r\n\n", "before_files": [{"content": "import re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http\nfrom streamlink.stream import HLSStream\n\nUSER_AGENT = \"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36\"\n_url_re = re.compile(\"http://(?:www\\.)?tvcatchup.com/watch/\\w+\")\n_stream_re = re.compile(r\"\\\"(?P<stream_url>https?://.*m3u8\\?.*clientKey=[^\\\"]*)\\\";\")\n\n\nclass TVCatchup(Plugin):\n @classmethod\n def can_handle_url(cls, url):\n return _url_re.match(url)\n\n def _get_streams(self):\n \"\"\"\n Finds the streams from tvcatchup.com.\n \"\"\"\n http.headers.update({\"User-Agent\": USER_AGENT})\n res = http.get(self.url)\n\n match = _stream_re.search(res.text, re.IGNORECASE | re.MULTILINE)\n\n if match:\n stream_url = match.groupdict()[\"stream_url\"]\n\n if stream_url:\n if \"_adp\" in stream_url:\n return HLSStream.parse_variant_playlist(self.session, stream_url)\n else:\n return {'576p': HLSStream(self.session, stream_url)}\n\n\n__plugin__ = TVCatchup\n", "path": "src/streamlink/plugins/tvcatchup.py"}], "after_files": [{"content": "import re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http\nfrom streamlink.stream import HLSStream\n\nUSER_AGENT = \"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36\"\n_url_re = re.compile(\"http://(?:www\\.)?tvcatchup.com/watch/\\w+\")\n_stream_re = re.compile(r'''(?P<q>[\"'])(?P<stream_url>https?://.*m3u8\\?.*clientKey=.*?)(?P=q)''')\n\n\nclass TVCatchup(Plugin):\n @classmethod\n def can_handle_url(cls, url):\n return _url_re.match(url)\n\n def _get_streams(self):\n \"\"\"\n Finds the streams from tvcatchup.com.\n \"\"\"\n http.headers.update({\"User-Agent\": USER_AGENT})\n res = http.get(self.url)\n\n match = _stream_re.search(res.text, re.IGNORECASE | re.MULTILINE)\n\n if match:\n stream_url = match.group(\"stream_url\")\n\n if stream_url:\n if \"_adp\" in stream_url:\n return HLSStream.parse_variant_playlist(self.session, stream_url)\n else:\n return {'576p': HLSStream(self.session, stream_url)}\n\n\n__plugin__ = TVCatchup\n", "path": "src/streamlink/plugins/tvcatchup.py"}]} | 1,144 | 296 |
gh_patches_debug_4935 | rasdani/github-patches | git_diff | quantumlib__Cirq-4249 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Push to PyPi failing
```
error in cirq setup command: 'extras_require' must be a dictionary whose values are strings or lists of strings containing valid project/version requirement specifiers.
```
See https://github.com/quantumlib/Cirq/runs/2851981344
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2018 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import io
16 import os
17 from setuptools import setup
18
19 # This reads the __version__ variable from cirq/_version.py
20 __version__ = ''
21
22 from dev_tools import modules
23 from dev_tools.requirements import explode
24
25 exec(open('cirq-core/cirq/_version.py').read())
26
27 name = 'cirq'
28
29 description = (
30 'A framework for creating, editing, and invoking '
31 'Noisy Intermediate Scale Quantum (NISQ) circuits.'
32 )
33
34 # README file as long_description.
35 long_description = io.open('README.rst', encoding='utf-8').read()
36
37 # If CIRQ_PRE_RELEASE_VERSION is set then we update the version to this value.
38 # It is assumed that it ends with one of `.devN`, `.aN`, `.bN`, `.rcN` and hence
39 # it will be a pre-release version on PyPi. See
40 # https://packaging.python.org/guides/distributing-packages-using-setuptools/#pre-release-versioning
41 # for more details.
42 if 'CIRQ_PRE_RELEASE_VERSION' in os.environ:
43 __version__ = os.environ['CIRQ_PRE_RELEASE_VERSION']
44 long_description = (
45 "**This is a development version of Cirq and may be "
46 "unstable.**\n\n**For the latest stable release of Cirq "
47 "see**\n`here <https://pypi.org/project/cirq>`__.\n\n" + long_description
48 )
49
50 # Sanity check
51 assert __version__, 'Version string cannot be empty'
52
53 # This is a pure metapackage that installs all our packages
54 requirements = [f'{p.name}=={p.version}' for p in modules.list_modules()]
55
56 dev_requirements = explode('dev_tools/requirements/deps/dev-tools.txt')
57 dev_requirements = [r.strip() for r in dev_requirements]
58
59 setup(
60 name=name,
61 version=__version__,
62 url='http://github.com/quantumlib/cirq',
63 author='The Cirq Developers',
64 author_email='[email protected]',
65 python_requires='>=3.6.0',
66 install_requires=requirements,
67 extras_require={
68 'dev_env': dev_requirements,
69 },
70 license='Apache 2',
71 description=description,
72 long_description=long_description,
73 )
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -54,7 +54,9 @@
requirements = [f'{p.name}=={p.version}' for p in modules.list_modules()]
dev_requirements = explode('dev_tools/requirements/deps/dev-tools.txt')
-dev_requirements = [r.strip() for r in dev_requirements]
+
+# filter out direct urls (https://github.com/pypa/pip/issues/6301)
+dev_requirements = [r.strip() for r in dev_requirements if "git+http" not in r]
setup(
name=name,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -54,7 +54,9 @@\n requirements = [f'{p.name}=={p.version}' for p in modules.list_modules()]\n \n dev_requirements = explode('dev_tools/requirements/deps/dev-tools.txt')\n-dev_requirements = [r.strip() for r in dev_requirements]\n+\n+# filter out direct urls (https://github.com/pypa/pip/issues/6301)\n+dev_requirements = [r.strip() for r in dev_requirements if \"git+http\" not in r]\n \n setup(\n name=name,\n", "issue": "Push to PyPi failing\n```\r\nerror in cirq setup command: 'extras_require' must be a dictionary whose values are strings or lists of strings containing valid project/version requirement specifiers.\r\n```\r\n\r\nSee https://github.com/quantumlib/Cirq/runs/2851981344\r\n\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\nfrom setuptools import setup\n\n# This reads the __version__ variable from cirq/_version.py\n__version__ = ''\n\nfrom dev_tools import modules\nfrom dev_tools.requirements import explode\n\nexec(open('cirq-core/cirq/_version.py').read())\n\nname = 'cirq'\n\ndescription = (\n 'A framework for creating, editing, and invoking '\n 'Noisy Intermediate Scale Quantum (NISQ) circuits.'\n)\n\n# README file as long_description.\nlong_description = io.open('README.rst', encoding='utf-8').read()\n\n# If CIRQ_PRE_RELEASE_VERSION is set then we update the version to this value.\n# It is assumed that it ends with one of `.devN`, `.aN`, `.bN`, `.rcN` and hence\n# it will be a pre-release version on PyPi. See\n# https://packaging.python.org/guides/distributing-packages-using-setuptools/#pre-release-versioning\n# for more details.\nif 'CIRQ_PRE_RELEASE_VERSION' in os.environ:\n __version__ = os.environ['CIRQ_PRE_RELEASE_VERSION']\n long_description = (\n \"**This is a development version of Cirq and may be \"\n \"unstable.**\\n\\n**For the latest stable release of Cirq \"\n \"see**\\n`here <https://pypi.org/project/cirq>`__.\\n\\n\" + long_description\n )\n\n# Sanity check\nassert __version__, 'Version string cannot be empty'\n\n# This is a pure metapackage that installs all our packages\nrequirements = [f'{p.name}=={p.version}' for p in modules.list_modules()]\n\ndev_requirements = explode('dev_tools/requirements/deps/dev-tools.txt')\ndev_requirements = [r.strip() for r in dev_requirements]\n\nsetup(\n name=name,\n version=__version__,\n url='http://github.com/quantumlib/cirq',\n author='The Cirq Developers',\n author_email='[email protected]',\n python_requires='>=3.6.0',\n install_requires=requirements,\n extras_require={\n 'dev_env': dev_requirements,\n },\n license='Apache 2',\n description=description,\n long_description=long_description,\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\nfrom setuptools import setup\n\n# This reads the __version__ variable from cirq/_version.py\n__version__ = ''\n\nfrom dev_tools import modules\nfrom dev_tools.requirements import explode\n\nexec(open('cirq-core/cirq/_version.py').read())\n\nname = 'cirq'\n\ndescription = (\n 'A framework for creating, editing, and invoking '\n 'Noisy Intermediate Scale Quantum (NISQ) circuits.'\n)\n\n# README file as long_description.\nlong_description = io.open('README.rst', encoding='utf-8').read()\n\n# If CIRQ_PRE_RELEASE_VERSION is set then we update the version to this value.\n# It is assumed that it ends with one of `.devN`, `.aN`, `.bN`, `.rcN` and hence\n# it will be a pre-release version on PyPi. See\n# https://packaging.python.org/guides/distributing-packages-using-setuptools/#pre-release-versioning\n# for more details.\nif 'CIRQ_PRE_RELEASE_VERSION' in os.environ:\n __version__ = os.environ['CIRQ_PRE_RELEASE_VERSION']\n long_description = (\n \"**This is a development version of Cirq and may be \"\n \"unstable.**\\n\\n**For the latest stable release of Cirq \"\n \"see**\\n`here <https://pypi.org/project/cirq>`__.\\n\\n\" + long_description\n )\n\n# Sanity check\nassert __version__, 'Version string cannot be empty'\n\n# This is a pure metapackage that installs all our packages\nrequirements = [f'{p.name}=={p.version}' for p in modules.list_modules()]\n\ndev_requirements = explode('dev_tools/requirements/deps/dev-tools.txt')\n\n# filter out direct urls (https://github.com/pypa/pip/issues/6301)\ndev_requirements = [r.strip() for r in dev_requirements if \"git+http\" not in r]\n\nsetup(\n name=name,\n version=__version__,\n url='http://github.com/quantumlib/cirq',\n author='The Cirq Developers',\n author_email='[email protected]',\n python_requires='>=3.6.0',\n install_requires=requirements,\n extras_require={\n 'dev_env': dev_requirements,\n },\n license='Apache 2',\n description=description,\n long_description=long_description,\n)\n", "path": "setup.py"}]} | 1,081 | 134 |
gh_patches_debug_3669 | rasdani/github-patches | git_diff | ocadotechnology__codeforlife-portal-783 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
portal API not working anymore
**Describe the bug**
When trying to access the following URLs:
/api/lastconnectedsince/YYYY/MM/DD
.../registered/YYYY/MM/DD
.../userspercountry/CC
On any of our servers, we get a 500 error.
Google console says:
> TemplateSyntaxError: 'url' is not a valid tag or filter in tag library 'future'
It happens even with the right to access them.
**To Reproduce**
If you have an authorised google account, go to https://www.codeforlife.education/api/lastconnectedsince/2018/07/20/ and you will see a 500 error
**Expected behaviour**
This page to display a number when your google account is autorised
**Desktop (please complete the following information):**
- OS:Ubuntu 16.04
- Browser:Chrome
**Additional context**
The urls.py file has been reworked for forward compatibility
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from setuptools import find_packages, setup
3 import versioneer
4 setup(name='codeforlife-portal',
5 cmdclass=versioneer.get_cmdclass(),
6 version=versioneer.get_version(),
7 packages=find_packages(),
8 include_package_data=True,
9 install_requires=[
10 'django==1.9.13',
11 'django-appconf==1.0.1',
12 'django-countries==3.4.1',
13 'djangorestframework==3.1.3',
14 'django-jquery==1.9.1',
15 'django-autoconfig==0.8.0',
16 'django-pipeline==1.5.4',
17 'django-recaptcha==1.3.1', # 1.4 dropped support for < 1.11
18
19 'pyyaml==3.10',
20 'rapid-router >= 1.0.0.post.dev1',
21 'six==1.11.0',
22 'aimmo',
23 'docutils==0.12',
24 'reportlab==3.2.0',
25 'postcodes==0.1',
26 'django-formtools==1.0',
27 'django-two-factor-auth==1.5.0',
28 'urllib3==1.22',
29 'requests==2.18.4',
30
31 'django-classy-tags==0.6.1',
32 'django-treebeard==4.3',
33 'django-sekizai==0.10.0',
34
35 'django-online-status==0.1.0',
36
37 'Pillow==3.3.2',
38 'django-reversion==2.0.0',
39 'sqlparse',
40 'libsass',
41 'django-forms-bootstrap'
42 ],
43 tests_require=[
44 'django-setuptest==0.2.1',
45 'django-selenium-clean==0.3.0',
46 'responses==0.4.0',
47 'selenium==2.48.0',
48 ],
49 test_suite='setuptest.setuptest.SetupTestSuite',
50 zip_safe=False,
51 )
52
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -10,7 +10,7 @@
'django==1.9.13',
'django-appconf==1.0.1',
'django-countries==3.4.1',
- 'djangorestframework==3.1.3',
+ 'djangorestframework==3.2.3',
'django-jquery==1.9.1',
'django-autoconfig==0.8.0',
'django-pipeline==1.5.4',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -10,7 +10,7 @@\n 'django==1.9.13',\n 'django-appconf==1.0.1',\n 'django-countries==3.4.1',\n- 'djangorestframework==3.1.3',\n+ 'djangorestframework==3.2.3',\n 'django-jquery==1.9.1',\n 'django-autoconfig==0.8.0',\n 'django-pipeline==1.5.4',\n", "issue": "portal API not working anymore\n**Describe the bug**\r\nWhen trying to access the following URLs:\r\n/api/lastconnectedsince/YYYY/MM/DD\r\n.../registered/YYYY/MM/DD\r\n.../userspercountry/CC\r\nOn any of our servers, we get a 500 error.\r\nGoogle console says: \r\n\r\n> TemplateSyntaxError: 'url' is not a valid tag or filter in tag library 'future'\r\n\r\nIt happens even with the right to access them.\r\n\r\n**To Reproduce**\r\nIf you have an authorised google account, go to https://www.codeforlife.education/api/lastconnectedsince/2018/07/20/ and you will see a 500 error\r\n\r\n**Expected behaviour**\r\nThis page to display a number when your google account is autorised\r\n\r\n**Desktop (please complete the following information):**\r\n\r\n- OS:Ubuntu 16.04\r\n- Browser:Chrome\r\n \r\n**Additional context**\r\nThe urls.py file has been reworked for forward compatibility\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom setuptools import find_packages, setup\nimport versioneer\nsetup(name='codeforlife-portal',\n cmdclass=versioneer.get_cmdclass(),\n version=versioneer.get_version(),\n packages=find_packages(),\n include_package_data=True,\n install_requires=[\n 'django==1.9.13',\n 'django-appconf==1.0.1',\n 'django-countries==3.4.1',\n 'djangorestframework==3.1.3',\n 'django-jquery==1.9.1',\n 'django-autoconfig==0.8.0',\n 'django-pipeline==1.5.4',\n 'django-recaptcha==1.3.1', # 1.4 dropped support for < 1.11\n\n 'pyyaml==3.10',\n 'rapid-router >= 1.0.0.post.dev1',\n 'six==1.11.0',\n 'aimmo',\n 'docutils==0.12',\n 'reportlab==3.2.0',\n 'postcodes==0.1',\n 'django-formtools==1.0',\n 'django-two-factor-auth==1.5.0',\n 'urllib3==1.22',\n 'requests==2.18.4',\n\n 'django-classy-tags==0.6.1',\n 'django-treebeard==4.3',\n 'django-sekizai==0.10.0',\n\n 'django-online-status==0.1.0',\n\n 'Pillow==3.3.2',\n 'django-reversion==2.0.0',\n 'sqlparse',\n 'libsass',\n 'django-forms-bootstrap'\n ],\n tests_require=[\n 'django-setuptest==0.2.1',\n 'django-selenium-clean==0.3.0',\n 'responses==0.4.0',\n 'selenium==2.48.0',\n ],\n test_suite='setuptest.setuptest.SetupTestSuite',\n zip_safe=False,\n )\n", "path": "setup.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom setuptools import find_packages, setup\nimport versioneer\nsetup(name='codeforlife-portal',\n cmdclass=versioneer.get_cmdclass(),\n version=versioneer.get_version(),\n packages=find_packages(),\n include_package_data=True,\n install_requires=[\n 'django==1.9.13',\n 'django-appconf==1.0.1',\n 'django-countries==3.4.1',\n 'djangorestframework==3.2.3',\n 'django-jquery==1.9.1',\n 'django-autoconfig==0.8.0',\n 'django-pipeline==1.5.4',\n 'django-recaptcha==1.3.1', # 1.4 dropped support for < 1.11\n\n 'pyyaml==3.10',\n 'rapid-router >= 1.0.0.post.dev1',\n 'six==1.11.0',\n 'aimmo',\n 'docutils==0.12',\n 'reportlab==3.2.0',\n 'postcodes==0.1',\n 'django-formtools==1.0',\n 'django-two-factor-auth==1.5.0',\n 'urllib3==1.22',\n 'requests==2.18.4',\n\n 'django-classy-tags==0.6.1',\n 'django-treebeard==4.3',\n 'django-sekizai==0.10.0',\n\n 'django-online-status==0.1.0',\n\n 'Pillow==3.3.2',\n 'django-reversion==2.0.0',\n 'sqlparse',\n 'libsass',\n 'django-forms-bootstrap'\n ],\n tests_require=[\n 'django-setuptest==0.2.1',\n 'django-selenium-clean==0.3.0',\n 'responses==0.4.0',\n 'selenium==2.48.0',\n ],\n test_suite='setuptest.setuptest.SetupTestSuite',\n zip_safe=False,\n )\n", "path": "setup.py"}]} | 1,021 | 131 |
gh_patches_debug_59 | rasdani/github-patches | git_diff | Anselmoo__spectrafit-662 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Docs]: Using mike for versioning docs
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Missing Information in the Docs
https://squidfunk.github.io/mkdocs-material/setup/setting-up-versioning/
### Anything else?
_No response_
### Code of Conduct
- [X] I agree to follow this project's Code of Conduct
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `spectrafit/__init__.py`
Content:
```
1 """SpectraFit, fast command line tool for fitting data."""
2 __version__ = "1.0.0a2"
3
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/spectrafit/__init__.py b/spectrafit/__init__.py
--- a/spectrafit/__init__.py
+++ b/spectrafit/__init__.py
@@ -1,2 +1,2 @@
"""SpectraFit, fast command line tool for fitting data."""
-__version__ = "1.0.0a2"
+__version__ = "1.0.0a3"
| {"golden_diff": "diff --git a/spectrafit/__init__.py b/spectrafit/__init__.py\n--- a/spectrafit/__init__.py\n+++ b/spectrafit/__init__.py\n@@ -1,2 +1,2 @@\n \"\"\"SpectraFit, fast command line tool for fitting data.\"\"\"\n-__version__ = \"1.0.0a2\"\n+__version__ = \"1.0.0a3\"\n", "issue": "[Docs]: Using mike for versioning docs\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues\n\n### Current Missing Information in the Docs\n\nhttps://squidfunk.github.io/mkdocs-material/setup/setting-up-versioning/\n\n### Anything else?\n\n_No response_\n\n### Code of Conduct\n\n- [X] I agree to follow this project's Code of Conduct\n", "before_files": [{"content": "\"\"\"SpectraFit, fast command line tool for fitting data.\"\"\"\n__version__ = \"1.0.0a2\"\n", "path": "spectrafit/__init__.py"}], "after_files": [{"content": "\"\"\"SpectraFit, fast command line tool for fitting data.\"\"\"\n__version__ = \"1.0.0a3\"\n", "path": "spectrafit/__init__.py"}]} | 374 | 96 |
gh_patches_debug_40787 | rasdani/github-patches | git_diff | ietf-tools__datatracker-7199 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Refactor to drop dependency on decorator package
### Description
We have a few decorators defined in `ietf/utils/decorator.py` that use the "decorator" package. This provides the `@decorator` decorator and a `decorate` method. The built-in Python `functools.wraps()` method can fulfill the needs here without the additional dependency. As far as I can tell we're not making use of any of the features provided by the "decorator" package.
The `@decorator` mechanism also seems to interfere with Django's `@method_decorator`, which led to refactoring the `requires_api_key` decorator already.
### Code of Conduct
- [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ietf/utils/decorators.py`
Content:
```
1 # Copyright The IETF Trust 2016-2020, All Rights Reserved
2 # -*- coding: utf-8 -*-
3
4
5 import datetime
6
7 from decorator import decorator, decorate
8 from functools import wraps
9
10 from django.conf import settings
11 from django.contrib.auth import login
12 from django.http import HttpResponse
13 from django.shortcuts import render
14 from django.utils import timezone
15 from django.utils.encoding import force_bytes
16
17 import debug # pyflakes:ignore
18
19 from ietf.utils.test_runner import set_coverage_checking
20 from ietf.person.models import Person, PersonalApiKey, PersonApiKeyEvent
21 from ietf.utils import log
22
23 @decorator
24 def skip_coverage(f, *args, **kwargs):
25 if settings.TEST_CODE_COVERAGE_CHECKER:
26 set_coverage_checking(False)
27 result = f(*args, **kwargs)
28 set_coverage_checking(True)
29 return result
30 else:
31 return f(*args, **kwargs)
32
33 @decorator
34 def person_required(f, request, *args, **kwargs):
35 if not request.user.is_authenticated:
36 raise ValueError("The @person_required decorator should be called after @login_required.")
37 try:
38 request.user.person
39 except Person.DoesNotExist:
40 return render(request, 'registration/missing_person.html')
41 return f(request, *args, **kwargs)
42
43
44 def require_api_key(f):
45 @wraps(f)
46 def _wrapper(request, *args, **kwargs):
47 def err(code, text):
48 return HttpResponse(text, status=code, content_type='text/plain')
49 # Check method and get hash
50 if request.method == 'POST':
51 hash = request.POST.get('apikey')
52 elif request.method == 'GET':
53 hash = request.GET.get('apikey')
54 else:
55 return err(405, "Method not allowed")
56 if not hash:
57 return err(400, "Missing apikey parameter")
58 # Check hash
59 key = PersonalApiKey.validate_key(force_bytes(hash))
60 if not key:
61 return err(403, "Invalid apikey")
62 # Check endpoint
63 urlpath = request.META.get('PATH_INFO')
64 if not (urlpath and urlpath == key.endpoint):
65 return err(400, "Apikey endpoint mismatch")
66 # Check time since regular login
67 person = key.person
68 last_login = person.user.last_login
69 if not person.user.is_staff:
70 time_limit = (timezone.now() - datetime.timedelta(days=settings.UTILS_APIKEY_GUI_LOGIN_LIMIT_DAYS))
71 if last_login == None or last_login < time_limit:
72 return err(400, "Too long since last regular login")
73 # Log in
74 login(request, person.user)
75 # restore the user.last_login field, so it reflects only gui logins
76 person.user.last_login = last_login
77 person.user.save()
78 # Update stats
79 key.count += 1
80 key.latest = timezone.now()
81 key.save()
82 PersonApiKeyEvent.objects.create(person=person, type='apikey_login', key=key, desc="Logged in with key ID %s, endpoint %s" % (key.id, key.endpoint))
83 # Execute decorated function
84 try:
85 ret = f(request, *args, **kwargs)
86 except AttributeError as e:
87 log.log("Bad API call: args: %s, kwargs: %s, exception: %s" % (args, kwargs, e))
88 return err(400, "Bad or missing parameters")
89 return ret
90 return _wrapper
91
92
93 def _memoize(func, self, *args, **kwargs):
94 '''Memoize wrapper for instance methods. Use @lru_cache for functions.'''
95 if kwargs: # frozenset is used to ensure hashability
96 key = args, frozenset(list(kwargs.items()))
97 else:
98 key = args
99 # instance method, set up cache if needed
100 if not hasattr(self, '_cache'):
101 self._cache = {}
102 if not func in self._cache:
103 self._cache[func] = {}
104 #
105 cache = self._cache[func]
106 if key not in cache:
107 cache[key] = func(self, *args, **kwargs)
108 return cache[key]
109 def memoize(func):
110 if not hasattr(func, '__class__'):
111 raise NotImplementedError("Use @lru_cache instead of memoize() for functions.")
112 # For methods, we want the cache on the object, not on the class, in order
113 # to not having to think about cache bloat and content becoming stale, so
114 # we cannot set up the cache here.
115 return decorate(func, _memoize)
116
117
118 def ignore_view_kwargs(*args):
119 """Ignore the specified kwargs if they are present
120
121 Usage:
122 @ignore_view_kwargs("ignore_arg1", "ignore_arg2")
123 def my_view(request, good_arg):
124 ...
125
126 This will allow my_view() to be used in url() paths that have zero, one, or both of
127 ignore_arg1 and ignore_arg2 captured. These will be ignored, while good_arg will still
128 be captured as usual.
129 """
130 kwargs_to_ignore = args
131
132 def decorate(view):
133 @wraps(view)
134 def wrapped(*args, **kwargs):
135 for kwarg in kwargs_to_ignore:
136 kwargs.pop(kwarg, None)
137 return view(*args, **kwargs)
138
139 return wrapped
140
141 return decorate
142
143
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ietf/utils/decorators.py b/ietf/utils/decorators.py
--- a/ietf/utils/decorators.py
+++ b/ietf/utils/decorators.py
@@ -4,7 +4,6 @@
import datetime
-from decorator import decorator, decorate
from functools import wraps
from django.conf import settings
@@ -20,25 +19,29 @@
from ietf.person.models import Person, PersonalApiKey, PersonApiKeyEvent
from ietf.utils import log
-@decorator
-def skip_coverage(f, *args, **kwargs):
- if settings.TEST_CODE_COVERAGE_CHECKER:
- set_coverage_checking(False)
- result = f(*args, **kwargs)
- set_coverage_checking(True)
- return result
- else:
- return f(*args, **kwargs)
-
-@decorator
-def person_required(f, request, *args, **kwargs):
- if not request.user.is_authenticated:
- raise ValueError("The @person_required decorator should be called after @login_required.")
- try:
- request.user.person
- except Person.DoesNotExist:
- return render(request, 'registration/missing_person.html')
- return f(request, *args, **kwargs)
+def skip_coverage(f):
+ @wraps(f)
+ def _wrapper(*args, **kwargs):
+ if settings.TEST_CODE_COVERAGE_CHECKER:
+ set_coverage_checking(False)
+ result = f(*args, **kwargs)
+ set_coverage_checking(True)
+ return result
+ else:
+ return f(*args, **kwargs)
+ return _wrapper
+
+def person_required(f):
+ @wraps(f)
+ def _wrapper(request, *args, **kwargs):
+ if not request.user.is_authenticated:
+ raise ValueError("The @person_required decorator should be called after @login_required.")
+ try:
+ request.user.person
+ except Person.DoesNotExist:
+ return render(request, 'registration/missing_person.html')
+ return f(request, *args, **kwargs)
+ return _wrapper
def require_api_key(f):
@@ -90,29 +93,31 @@
return _wrapper
-def _memoize(func, self, *args, **kwargs):
- '''Memoize wrapper for instance methods. Use @lru_cache for functions.'''
- if kwargs: # frozenset is used to ensure hashability
- key = args, frozenset(list(kwargs.items()))
- else:
- key = args
- # instance method, set up cache if needed
- if not hasattr(self, '_cache'):
- self._cache = {}
- if not func in self._cache:
- self._cache[func] = {}
- #
- cache = self._cache[func]
- if key not in cache:
- cache[key] = func(self, *args, **kwargs)
- return cache[key]
def memoize(func):
+ @wraps(func)
+ def _memoize(self, *args, **kwargs):
+ '''Memoize wrapper for instance methods. Use @lru_cache for functions.'''
+ if kwargs: # frozenset is used to ensure hashability
+ key = args, frozenset(list(kwargs.items()))
+ else:
+ key = args
+ # instance method, set up cache if needed
+ if not hasattr(self, '_cache'):
+ self._cache = {}
+ if not func in self._cache:
+ self._cache[func] = {}
+ #
+ cache = self._cache[func]
+ if key not in cache:
+ cache[key] = func(self, *args, **kwargs)
+ return cache[key]
+
if not hasattr(func, '__class__'):
raise NotImplementedError("Use @lru_cache instead of memoize() for functions.")
# For methods, we want the cache on the object, not on the class, in order
# to not having to think about cache bloat and content becoming stale, so
# we cannot set up the cache here.
- return decorate(func, _memoize)
+ return _memoize
def ignore_view_kwargs(*args):
| {"golden_diff": "diff --git a/ietf/utils/decorators.py b/ietf/utils/decorators.py\n--- a/ietf/utils/decorators.py\n+++ b/ietf/utils/decorators.py\n@@ -4,7 +4,6 @@\n \n import datetime\n \n-from decorator import decorator, decorate\n from functools import wraps\n \n from django.conf import settings\n@@ -20,25 +19,29 @@\n from ietf.person.models import Person, PersonalApiKey, PersonApiKeyEvent\n from ietf.utils import log\n \n-@decorator\n-def skip_coverage(f, *args, **kwargs):\n- if settings.TEST_CODE_COVERAGE_CHECKER:\n- set_coverage_checking(False)\n- result = f(*args, **kwargs)\n- set_coverage_checking(True)\n- return result\n- else:\n- return f(*args, **kwargs)\n-\n-@decorator\n-def person_required(f, request, *args, **kwargs):\n- if not request.user.is_authenticated:\n- raise ValueError(\"The @person_required decorator should be called after @login_required.\")\n- try:\n- request.user.person\n- except Person.DoesNotExist:\n- return render(request, 'registration/missing_person.html')\n- return f(request, *args, **kwargs)\n+def skip_coverage(f):\n+ @wraps(f)\n+ def _wrapper(*args, **kwargs):\n+ if settings.TEST_CODE_COVERAGE_CHECKER:\n+ set_coverage_checking(False)\n+ result = f(*args, **kwargs)\n+ set_coverage_checking(True)\n+ return result\n+ else:\n+ return f(*args, **kwargs)\n+ return _wrapper\n+\n+def person_required(f):\n+ @wraps(f)\n+ def _wrapper(request, *args, **kwargs):\n+ if not request.user.is_authenticated:\n+ raise ValueError(\"The @person_required decorator should be called after @login_required.\")\n+ try:\n+ request.user.person\n+ except Person.DoesNotExist:\n+ return render(request, 'registration/missing_person.html')\n+ return f(request, *args, **kwargs)\n+ return _wrapper\n \n \n def require_api_key(f):\n@@ -90,29 +93,31 @@\n return _wrapper\n \n \n-def _memoize(func, self, *args, **kwargs):\n- '''Memoize wrapper for instance methods. Use @lru_cache for functions.'''\n- if kwargs: # frozenset is used to ensure hashability\n- key = args, frozenset(list(kwargs.items()))\n- else:\n- key = args\n- # instance method, set up cache if needed\n- if not hasattr(self, '_cache'):\n- self._cache = {}\n- if not func in self._cache:\n- self._cache[func] = {} \n- #\n- cache = self._cache[func]\n- if key not in cache:\n- cache[key] = func(self, *args, **kwargs)\n- return cache[key]\n def memoize(func):\n+ @wraps(func)\n+ def _memoize(self, *args, **kwargs):\n+ '''Memoize wrapper for instance methods. Use @lru_cache for functions.'''\n+ if kwargs: # frozenset is used to ensure hashability\n+ key = args, frozenset(list(kwargs.items()))\n+ else:\n+ key = args\n+ # instance method, set up cache if needed\n+ if not hasattr(self, '_cache'):\n+ self._cache = {}\n+ if not func in self._cache:\n+ self._cache[func] = {} \n+ #\n+ cache = self._cache[func]\n+ if key not in cache:\n+ cache[key] = func(self, *args, **kwargs)\n+ return cache[key]\n+\n if not hasattr(func, '__class__'):\n raise NotImplementedError(\"Use @lru_cache instead of memoize() for functions.\")\n # For methods, we want the cache on the object, not on the class, in order\n # to not having to think about cache bloat and content becoming stale, so\n # we cannot set up the cache here.\n- return decorate(func, _memoize)\n+ return _memoize\n \n \n def ignore_view_kwargs(*args):\n", "issue": "Refactor to drop dependency on decorator package\n### Description\n\nWe have a few decorators defined in `ietf/utils/decorator.py` that use the \"decorator\" package. This provides the `@decorator` decorator and a `decorate` method. The built-in Python `functools.wraps()` method can fulfill the needs here without the additional dependency. As far as I can tell we're not making use of any of the features provided by the \"decorator\" package.\r\n\r\nThe `@decorator` mechanism also seems to interfere with Django's `@method_decorator`, which led to refactoring the `requires_api_key` decorator already.\n\n### Code of Conduct\n\n- [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md)\n", "before_files": [{"content": "# Copyright The IETF Trust 2016-2020, All Rights Reserved\n# -*- coding: utf-8 -*-\n\n\nimport datetime\n\nfrom decorator import decorator, decorate\nfrom functools import wraps\n\nfrom django.conf import settings\nfrom django.contrib.auth import login\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.utils import timezone\nfrom django.utils.encoding import force_bytes\n\nimport debug # pyflakes:ignore\n\nfrom ietf.utils.test_runner import set_coverage_checking\nfrom ietf.person.models import Person, PersonalApiKey, PersonApiKeyEvent\nfrom ietf.utils import log\n\n@decorator\ndef skip_coverage(f, *args, **kwargs):\n if settings.TEST_CODE_COVERAGE_CHECKER:\n set_coverage_checking(False)\n result = f(*args, **kwargs)\n set_coverage_checking(True)\n return result\n else:\n return f(*args, **kwargs)\n\n@decorator\ndef person_required(f, request, *args, **kwargs):\n if not request.user.is_authenticated:\n raise ValueError(\"The @person_required decorator should be called after @login_required.\")\n try:\n request.user.person\n except Person.DoesNotExist:\n return render(request, 'registration/missing_person.html')\n return f(request, *args, **kwargs)\n\n\ndef require_api_key(f):\n @wraps(f)\n def _wrapper(request, *args, **kwargs):\n def err(code, text):\n return HttpResponse(text, status=code, content_type='text/plain')\n # Check method and get hash\n if request.method == 'POST':\n hash = request.POST.get('apikey')\n elif request.method == 'GET':\n hash = request.GET.get('apikey')\n else:\n return err(405, \"Method not allowed\")\n if not hash:\n return err(400, \"Missing apikey parameter\")\n # Check hash\n key = PersonalApiKey.validate_key(force_bytes(hash))\n if not key:\n return err(403, \"Invalid apikey\")\n # Check endpoint\n urlpath = request.META.get('PATH_INFO')\n if not (urlpath and urlpath == key.endpoint):\n return err(400, \"Apikey endpoint mismatch\") \n # Check time since regular login\n person = key.person\n last_login = person.user.last_login\n if not person.user.is_staff:\n time_limit = (timezone.now() - datetime.timedelta(days=settings.UTILS_APIKEY_GUI_LOGIN_LIMIT_DAYS))\n if last_login == None or last_login < time_limit:\n return err(400, \"Too long since last regular login\")\n # Log in\n login(request, person.user)\n # restore the user.last_login field, so it reflects only gui logins\n person.user.last_login = last_login\n person.user.save()\n # Update stats\n key.count += 1\n key.latest = timezone.now()\n key.save()\n PersonApiKeyEvent.objects.create(person=person, type='apikey_login', key=key, desc=\"Logged in with key ID %s, endpoint %s\" % (key.id, key.endpoint))\n # Execute decorated function\n try:\n ret = f(request, *args, **kwargs)\n except AttributeError as e:\n log.log(\"Bad API call: args: %s, kwargs: %s, exception: %s\" % (args, kwargs, e))\n return err(400, \"Bad or missing parameters\")\n return ret\n return _wrapper\n\n\ndef _memoize(func, self, *args, **kwargs):\n '''Memoize wrapper for instance methods. Use @lru_cache for functions.'''\n if kwargs: # frozenset is used to ensure hashability\n key = args, frozenset(list(kwargs.items()))\n else:\n key = args\n # instance method, set up cache if needed\n if not hasattr(self, '_cache'):\n self._cache = {}\n if not func in self._cache:\n self._cache[func] = {} \n #\n cache = self._cache[func]\n if key not in cache:\n cache[key] = func(self, *args, **kwargs)\n return cache[key]\ndef memoize(func):\n if not hasattr(func, '__class__'):\n raise NotImplementedError(\"Use @lru_cache instead of memoize() for functions.\")\n # For methods, we want the cache on the object, not on the class, in order\n # to not having to think about cache bloat and content becoming stale, so\n # we cannot set up the cache here.\n return decorate(func, _memoize)\n\n\ndef ignore_view_kwargs(*args):\n \"\"\"Ignore the specified kwargs if they are present\n\n Usage: \n @ignore_view_kwargs(\"ignore_arg1\", \"ignore_arg2\")\n def my_view(request, good_arg):\n ...\n\n This will allow my_view() to be used in url() paths that have zero, one, or both of\n ignore_arg1 and ignore_arg2 captured. These will be ignored, while good_arg will still\n be captured as usual.\n \"\"\"\n kwargs_to_ignore = args\n\n def decorate(view):\n @wraps(view)\n def wrapped(*args, **kwargs):\n for kwarg in kwargs_to_ignore:\n kwargs.pop(kwarg, None)\n return view(*args, **kwargs)\n\n return wrapped\n\n return decorate\n\n\n", "path": "ietf/utils/decorators.py"}], "after_files": [{"content": "# Copyright The IETF Trust 2016-2020, All Rights Reserved\n# -*- coding: utf-8 -*-\n\n\nimport datetime\n\nfrom functools import wraps\n\nfrom django.conf import settings\nfrom django.contrib.auth import login\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.utils import timezone\nfrom django.utils.encoding import force_bytes\n\nimport debug # pyflakes:ignore\n\nfrom ietf.utils.test_runner import set_coverage_checking\nfrom ietf.person.models import Person, PersonalApiKey, PersonApiKeyEvent\nfrom ietf.utils import log\n\ndef skip_coverage(f):\n @wraps(f)\n def _wrapper(*args, **kwargs):\n if settings.TEST_CODE_COVERAGE_CHECKER:\n set_coverage_checking(False)\n result = f(*args, **kwargs)\n set_coverage_checking(True)\n return result\n else:\n return f(*args, **kwargs)\n return _wrapper\n\ndef person_required(f):\n @wraps(f)\n def _wrapper(request, *args, **kwargs):\n if not request.user.is_authenticated:\n raise ValueError(\"The @person_required decorator should be called after @login_required.\")\n try:\n request.user.person\n except Person.DoesNotExist:\n return render(request, 'registration/missing_person.html')\n return f(request, *args, **kwargs)\n return _wrapper\n\n\ndef require_api_key(f):\n @wraps(f)\n def _wrapper(request, *args, **kwargs):\n def err(code, text):\n return HttpResponse(text, status=code, content_type='text/plain')\n # Check method and get hash\n if request.method == 'POST':\n hash = request.POST.get('apikey')\n elif request.method == 'GET':\n hash = request.GET.get('apikey')\n else:\n return err(405, \"Method not allowed\")\n if not hash:\n return err(400, \"Missing apikey parameter\")\n # Check hash\n key = PersonalApiKey.validate_key(force_bytes(hash))\n if not key:\n return err(403, \"Invalid apikey\")\n # Check endpoint\n urlpath = request.META.get('PATH_INFO')\n if not (urlpath and urlpath == key.endpoint):\n return err(400, \"Apikey endpoint mismatch\") \n # Check time since regular login\n person = key.person\n last_login = person.user.last_login\n if not person.user.is_staff:\n time_limit = (timezone.now() - datetime.timedelta(days=settings.UTILS_APIKEY_GUI_LOGIN_LIMIT_DAYS))\n if last_login == None or last_login < time_limit:\n return err(400, \"Too long since last regular login\")\n # Log in\n login(request, person.user)\n # restore the user.last_login field, so it reflects only gui logins\n person.user.last_login = last_login\n person.user.save()\n # Update stats\n key.count += 1\n key.latest = timezone.now()\n key.save()\n PersonApiKeyEvent.objects.create(person=person, type='apikey_login', key=key, desc=\"Logged in with key ID %s, endpoint %s\" % (key.id, key.endpoint))\n # Execute decorated function\n try:\n ret = f(request, *args, **kwargs)\n except AttributeError as e:\n log.log(\"Bad API call: args: %s, kwargs: %s, exception: %s\" % (args, kwargs, e))\n return err(400, \"Bad or missing parameters\")\n return ret\n return _wrapper\n\n\ndef memoize(func):\n @wraps(func)\n def _memoize(self, *args, **kwargs):\n '''Memoize wrapper for instance methods. Use @lru_cache for functions.'''\n if kwargs: # frozenset is used to ensure hashability\n key = args, frozenset(list(kwargs.items()))\n else:\n key = args\n # instance method, set up cache if needed\n if not hasattr(self, '_cache'):\n self._cache = {}\n if not func in self._cache:\n self._cache[func] = {} \n #\n cache = self._cache[func]\n if key not in cache:\n cache[key] = func(self, *args, **kwargs)\n return cache[key]\n\n if not hasattr(func, '__class__'):\n raise NotImplementedError(\"Use @lru_cache instead of memoize() for functions.\")\n # For methods, we want the cache on the object, not on the class, in order\n # to not having to think about cache bloat and content becoming stale, so\n # we cannot set up the cache here.\n return _memoize\n\n\ndef ignore_view_kwargs(*args):\n \"\"\"Ignore the specified kwargs if they are present\n\n Usage: \n @ignore_view_kwargs(\"ignore_arg1\", \"ignore_arg2\")\n def my_view(request, good_arg):\n ...\n\n This will allow my_view() to be used in url() paths that have zero, one, or both of\n ignore_arg1 and ignore_arg2 captured. These will be ignored, while good_arg will still\n be captured as usual.\n \"\"\"\n kwargs_to_ignore = args\n\n def decorate(view):\n @wraps(view)\n def wrapped(*args, **kwargs):\n for kwarg in kwargs_to_ignore:\n kwargs.pop(kwarg, None)\n return view(*args, **kwargs)\n\n return wrapped\n\n return decorate\n\n\n", "path": "ietf/utils/decorators.py"}]} | 1,927 | 933 |
gh_patches_debug_12265 | rasdani/github-patches | git_diff | DDMAL__CantusDB-273 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Sources should automatically have segments
From #257:
> A source should always have a segment. It is either "Cantus Database" or "Sequence Database". It's a foreign key field. In cases where a source doesn't have a segment, it is probably a test source that we created.
> Desired behaviour: when creating a source, assign it to "Cantus Database" by default.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `django/cantusdb_project/main_app/models/source.py`
Content:
```
1 from django.db import models
2 from main_app.models import BaseModel
3 from django.contrib.auth import get_user_model
4
5
6 class Source(BaseModel):
7 cursus_choices = [("Monastic", "Monastic"), ("Secular", "Secular")]
8 source_status_choices = [
9 (
10 "Editing process (not all the fields have been proofread)",
11 "Editing process (not all the fields have been proofread)",
12 ),
13 ("Published / Complete", "Published / Complete"),
14 ("Published / Proofread pending", "Published / Proofread pending"),
15 ("Unpublished / Editing process", "Unpublished / Editing process"),
16 ("Unpublished / Indexing process", "Unpublished / Indexing process"),
17 ("Unpublished / Proofread pending", "Unpublished / Proofread pending"),
18 ("Unpublished / Proofreading process", "Unpublished / Proofreading process"),
19 ]
20
21 # sources with public=False cannot be accessed by its url (access denied) and do not appear in source list
22 public = models.BooleanField(blank=True, null=True)
23 # sources with visible=False can be accessed by typing in the url, but do not appear in source list
24 visible = models.BooleanField(blank=True, null=True)
25 title = models.CharField(
26 max_length=255,
27 help_text="Full Manuscript Identification (City, Archive, Shelf-mark)",
28 )
29 # the siglum field as implemented on the old Cantus is composed of both the RISM siglum and the shelfmark
30 # it is a human-readable ID for a source
31 siglum = models.CharField(
32 max_length=63,
33 null=True,
34 blank=True,
35 help_text="RISM-style siglum + Shelf-mark (e.g. GB-Ob 202).",
36 )
37 # the RISM siglum uniquely identifies a library or holding institution
38 rism_siglum = models.ForeignKey(
39 "RismSiglum", on_delete=models.PROTECT, null=True, blank=True,
40 )
41 provenance = models.ForeignKey(
42 "Provenance",
43 on_delete=models.PROTECT,
44 help_text="If the origin is unknown, select a location where the source was "
45 "used later in its lifetime and provide details in the "
46 '"Provenance notes" field.',
47 null=True,
48 blank=True,
49 )
50 provenance_notes = models.TextField(
51 blank=True,
52 null=True,
53 help_text="More exact indication of the provenance (if necessary)",
54 )
55 full_source = models.BooleanField(blank=True, null=True)
56 date = models.CharField(
57 blank=True,
58 null=True,
59 max_length=63,
60 help_text='Date of the manuscript (e.g. "1200s", "1300-1350", etc.)',
61 )
62 century = models.ManyToManyField("Century", related_name="sources")
63 notation = models.ManyToManyField("Notation", related_name="sources")
64 cursus = models.CharField(
65 blank=True, null=True, choices=cursus_choices, max_length=63
66 )
67 # TODO: Fill this field up with JSON info when I have access to the Users
68 current_editors = models.ManyToManyField(get_user_model(), related_name="sources_user_can_edit")
69 inventoried_by = models.ManyToManyField(
70 "Indexer", related_name="sources_inventoried"
71 )
72 full_text_entered_by = models.ManyToManyField(
73 "Indexer", related_name="entered_full_text_for_sources"
74 )
75 melodies_entered_by = models.ManyToManyField(
76 "Indexer", related_name="entered_melody_for_sources"
77 )
78 proofreaders = models.ManyToManyField("Indexer", related_name="proofread_sources")
79 other_editors = models.ManyToManyField("Indexer", related_name="edited_sources")
80 segment = models.ForeignKey(
81 "Segment", on_delete=models.PROTECT, blank=True, null=True
82 )
83 source_status = models.CharField(blank=True, null=True, max_length=255)
84 complete_inventory = models.BooleanField(blank=True, null=True)
85 summary = models.TextField(blank=True, null=True)
86 liturgical_occasions = models.TextField(blank=True, null=True)
87 description = models.TextField(blank=True, null=True)
88 selected_bibliography = models.TextField(blank=True, null=True)
89 image_link = models.URLField(
90 blank=True,
91 null=True,
92 help_text='HTTP link to the image gallery of the source.',
93 )
94 indexing_notes = models.TextField(blank=True, null=True)
95 indexing_date = models.TextField(blank=True, null=True)
96 json_info = models.JSONField(blank=True, null=True)
97 fragmentarium_id = models.CharField(max_length=15, blank=True, null=True)
98 dact_id = models.CharField(max_length=15, blank=True, null=True)
99
100 def number_of_chants(self) -> int:
101 """Returns the number of Chants and Sequences in this Source."""
102 return self.chant_set.count() + self.sequence_set.count()
103
104 def number_of_melodies(self) -> int:
105 """Returns the number of Chants in this Source that have melodies."""
106 return self.chant_set.filter(volpiano__isnull=False).count()
107
108 def __str__(self):
109 string = '{t} ({i})'.format(t=self.title, i=self.id)
110 return string
111
112
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/django/cantusdb_project/main_app/models/source.py b/django/cantusdb_project/main_app/models/source.py
--- a/django/cantusdb_project/main_app/models/source.py
+++ b/django/cantusdb_project/main_app/models/source.py
@@ -1,5 +1,5 @@
from django.db import models
-from main_app.models import BaseModel
+from main_app.models import BaseModel, Segment
from django.contrib.auth import get_user_model
@@ -109,4 +109,8 @@
string = '{t} ({i})'.format(t=self.title, i=self.id)
return string
-
\ No newline at end of file
+ def save(self, *args, **kwargs):
+ # when creating a source, assign it to "Cantus Database" by default
+ cantus_db_segment = Segment.objects.get(name="CANTUS Database")
+ self.segment = cantus_db_segment
+ super().save(*args, **kwargs)
| {"golden_diff": "diff --git a/django/cantusdb_project/main_app/models/source.py b/django/cantusdb_project/main_app/models/source.py\n--- a/django/cantusdb_project/main_app/models/source.py\n+++ b/django/cantusdb_project/main_app/models/source.py\n@@ -1,5 +1,5 @@\n from django.db import models\n-from main_app.models import BaseModel\n+from main_app.models import BaseModel, Segment\n from django.contrib.auth import get_user_model\n \n \n@@ -109,4 +109,8 @@\n string = '{t} ({i})'.format(t=self.title, i=self.id)\n return string\n \n- \n\\ No newline at end of file\n+ def save(self, *args, **kwargs):\n+ # when creating a source, assign it to \"Cantus Database\" by default\n+ cantus_db_segment = Segment.objects.get(name=\"CANTUS Database\")\n+ self.segment = cantus_db_segment\n+ super().save(*args, **kwargs)\n", "issue": "Sources should automatically have segments\nFrom #257:\r\n\r\n> A source should always have a segment. It is either \"Cantus Database\" or \"Sequence Database\". It's a foreign key field. In cases where a source doesn't have a segment, it is probably a test source that we created.\r\n> Desired behaviour: when creating a source, assign it to \"Cantus Database\" by default.\n", "before_files": [{"content": "from django.db import models\nfrom main_app.models import BaseModel\nfrom django.contrib.auth import get_user_model\n\n\nclass Source(BaseModel):\n cursus_choices = [(\"Monastic\", \"Monastic\"), (\"Secular\", \"Secular\")]\n source_status_choices = [\n (\n \"Editing process (not all the fields have been proofread)\",\n \"Editing process (not all the fields have been proofread)\",\n ),\n (\"Published / Complete\", \"Published / Complete\"),\n (\"Published / Proofread pending\", \"Published / Proofread pending\"),\n (\"Unpublished / Editing process\", \"Unpublished / Editing process\"),\n (\"Unpublished / Indexing process\", \"Unpublished / Indexing process\"),\n (\"Unpublished / Proofread pending\", \"Unpublished / Proofread pending\"),\n (\"Unpublished / Proofreading process\", \"Unpublished / Proofreading process\"),\n ]\n\n # sources with public=False cannot be accessed by its url (access denied) and do not appear in source list\n public = models.BooleanField(blank=True, null=True)\n # sources with visible=False can be accessed by typing in the url, but do not appear in source list\n visible = models.BooleanField(blank=True, null=True)\n title = models.CharField(\n max_length=255,\n help_text=\"Full Manuscript Identification (City, Archive, Shelf-mark)\",\n )\n # the siglum field as implemented on the old Cantus is composed of both the RISM siglum and the shelfmark\n # it is a human-readable ID for a source\n siglum = models.CharField(\n max_length=63, \n null=True, \n blank=True,\n help_text=\"RISM-style siglum + Shelf-mark (e.g. GB-Ob 202).\",\n )\n # the RISM siglum uniquely identifies a library or holding institution\n rism_siglum = models.ForeignKey(\n \"RismSiglum\", on_delete=models.PROTECT, null=True, blank=True,\n )\n provenance = models.ForeignKey(\n \"Provenance\",\n on_delete=models.PROTECT,\n help_text=\"If the origin is unknown, select a location where the source was \"\n \"used later in its lifetime and provide details in the \"\n '\"Provenance notes\" field.',\n null=True,\n blank=True,\n )\n provenance_notes = models.TextField(\n blank=True,\n null=True,\n help_text=\"More exact indication of the provenance (if necessary)\",\n )\n full_source = models.BooleanField(blank=True, null=True)\n date = models.CharField(\n blank=True,\n null=True,\n max_length=63,\n help_text='Date of the manuscript (e.g. \"1200s\", \"1300-1350\", etc.)',\n )\n century = models.ManyToManyField(\"Century\", related_name=\"sources\")\n notation = models.ManyToManyField(\"Notation\", related_name=\"sources\")\n cursus = models.CharField(\n blank=True, null=True, choices=cursus_choices, max_length=63\n )\n # TODO: Fill this field up with JSON info when I have access to the Users\n current_editors = models.ManyToManyField(get_user_model(), related_name=\"sources_user_can_edit\")\n inventoried_by = models.ManyToManyField(\n \"Indexer\", related_name=\"sources_inventoried\"\n )\n full_text_entered_by = models.ManyToManyField(\n \"Indexer\", related_name=\"entered_full_text_for_sources\"\n )\n melodies_entered_by = models.ManyToManyField(\n \"Indexer\", related_name=\"entered_melody_for_sources\"\n )\n proofreaders = models.ManyToManyField(\"Indexer\", related_name=\"proofread_sources\")\n other_editors = models.ManyToManyField(\"Indexer\", related_name=\"edited_sources\")\n segment = models.ForeignKey(\n \"Segment\", on_delete=models.PROTECT, blank=True, null=True\n )\n source_status = models.CharField(blank=True, null=True, max_length=255)\n complete_inventory = models.BooleanField(blank=True, null=True)\n summary = models.TextField(blank=True, null=True)\n liturgical_occasions = models.TextField(blank=True, null=True)\n description = models.TextField(blank=True, null=True)\n selected_bibliography = models.TextField(blank=True, null=True)\n image_link = models.URLField(\n blank=True, \n null=True,\n help_text='HTTP link to the image gallery of the source.',\n )\n indexing_notes = models.TextField(blank=True, null=True)\n indexing_date = models.TextField(blank=True, null=True)\n json_info = models.JSONField(blank=True, null=True)\n fragmentarium_id = models.CharField(max_length=15, blank=True, null=True)\n dact_id = models.CharField(max_length=15, blank=True, null=True)\n\n def number_of_chants(self) -> int:\n \"\"\"Returns the number of Chants and Sequences in this Source.\"\"\"\n return self.chant_set.count() + self.sequence_set.count()\n\n def number_of_melodies(self) -> int:\n \"\"\"Returns the number of Chants in this Source that have melodies.\"\"\"\n return self.chant_set.filter(volpiano__isnull=False).count()\n\n def __str__(self):\n string = '{t} ({i})'.format(t=self.title, i=self.id)\n return string\n\n ", "path": "django/cantusdb_project/main_app/models/source.py"}], "after_files": [{"content": "from django.db import models\nfrom main_app.models import BaseModel, Segment\nfrom django.contrib.auth import get_user_model\n\n\nclass Source(BaseModel):\n cursus_choices = [(\"Monastic\", \"Monastic\"), (\"Secular\", \"Secular\")]\n source_status_choices = [\n (\n \"Editing process (not all the fields have been proofread)\",\n \"Editing process (not all the fields have been proofread)\",\n ),\n (\"Published / Complete\", \"Published / Complete\"),\n (\"Published / Proofread pending\", \"Published / Proofread pending\"),\n (\"Unpublished / Editing process\", \"Unpublished / Editing process\"),\n (\"Unpublished / Indexing process\", \"Unpublished / Indexing process\"),\n (\"Unpublished / Proofread pending\", \"Unpublished / Proofread pending\"),\n (\"Unpublished / Proofreading process\", \"Unpublished / Proofreading process\"),\n ]\n\n # sources with public=False cannot be accessed by its url (access denied) and do not appear in source list\n public = models.BooleanField(blank=True, null=True)\n # sources with visible=False can be accessed by typing in the url, but do not appear in source list\n visible = models.BooleanField(blank=True, null=True)\n title = models.CharField(\n max_length=255,\n help_text=\"Full Manuscript Identification (City, Archive, Shelf-mark)\",\n )\n # the siglum field as implemented on the old Cantus is composed of both the RISM siglum and the shelfmark\n # it is a human-readable ID for a source\n siglum = models.CharField(\n max_length=63, \n null=True, \n blank=True,\n help_text=\"RISM-style siglum + Shelf-mark (e.g. GB-Ob 202).\",\n )\n # the RISM siglum uniquely identifies a library or holding institution\n rism_siglum = models.ForeignKey(\n \"RismSiglum\", on_delete=models.PROTECT, null=True, blank=True,\n )\n provenance = models.ForeignKey(\n \"Provenance\",\n on_delete=models.PROTECT,\n help_text=\"If the origin is unknown, select a location where the source was \"\n \"used later in its lifetime and provide details in the \"\n '\"Provenance notes\" field.',\n null=True,\n blank=True,\n )\n provenance_notes = models.TextField(\n blank=True,\n null=True,\n help_text=\"More exact indication of the provenance (if necessary)\",\n )\n full_source = models.BooleanField(blank=True, null=True)\n date = models.CharField(\n blank=True,\n null=True,\n max_length=63,\n help_text='Date of the manuscript (e.g. \"1200s\", \"1300-1350\", etc.)',\n )\n century = models.ManyToManyField(\"Century\", related_name=\"sources\")\n notation = models.ManyToManyField(\"Notation\", related_name=\"sources\")\n cursus = models.CharField(\n blank=True, null=True, choices=cursus_choices, max_length=63\n )\n # TODO: Fill this field up with JSON info when I have access to the Users\n current_editors = models.ManyToManyField(get_user_model(), related_name=\"sources_user_can_edit\")\n inventoried_by = models.ManyToManyField(\n \"Indexer\", related_name=\"sources_inventoried\"\n )\n full_text_entered_by = models.ManyToManyField(\n \"Indexer\", related_name=\"entered_full_text_for_sources\"\n )\n melodies_entered_by = models.ManyToManyField(\n \"Indexer\", related_name=\"entered_melody_for_sources\"\n )\n proofreaders = models.ManyToManyField(\"Indexer\", related_name=\"proofread_sources\")\n other_editors = models.ManyToManyField(\"Indexer\", related_name=\"edited_sources\")\n segment = models.ForeignKey(\n \"Segment\", on_delete=models.PROTECT, blank=True, null=True\n )\n source_status = models.CharField(blank=True, null=True, max_length=255)\n complete_inventory = models.BooleanField(blank=True, null=True)\n summary = models.TextField(blank=True, null=True)\n liturgical_occasions = models.TextField(blank=True, null=True)\n description = models.TextField(blank=True, null=True)\n selected_bibliography = models.TextField(blank=True, null=True)\n image_link = models.URLField(\n blank=True, \n null=True,\n help_text='HTTP link to the image gallery of the source.',\n )\n indexing_notes = models.TextField(blank=True, null=True)\n indexing_date = models.TextField(blank=True, null=True)\n json_info = models.JSONField(blank=True, null=True)\n fragmentarium_id = models.CharField(max_length=15, blank=True, null=True)\n dact_id = models.CharField(max_length=15, blank=True, null=True)\n\n def number_of_chants(self) -> int:\n \"\"\"Returns the number of Chants and Sequences in this Source.\"\"\"\n return self.chant_set.count() + self.sequence_set.count()\n\n def number_of_melodies(self) -> int:\n \"\"\"Returns the number of Chants in this Source that have melodies.\"\"\"\n return self.chant_set.filter(volpiano__isnull=False).count()\n\n def __str__(self):\n string = '{t} ({i})'.format(t=self.title, i=self.id)\n return string\n\n def save(self, *args, **kwargs):\n # when creating a source, assign it to \"Cantus Database\" by default\n cantus_db_segment = Segment.objects.get(name=\"CANTUS Database\")\n self.segment = cantus_db_segment\n super().save(*args, **kwargs)\n", "path": "django/cantusdb_project/main_app/models/source.py"}]} | 1,709 | 220 |
gh_patches_debug_7058 | rasdani/github-patches | git_diff | Kinto__kinto-1139 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Consistency on PUT with mandatory schema fields
While working on #790 I realize that there is something not clear in our specifications.
Currently, if a resource has a mandatory field (eg. groups `members`), then we cannot do a `PUT` with just the `permissions` values. This is because a PUT can lead to a creation, and the `members` fields has to be provided.
On other resources, which have no mandatory field, it is perfectly possible to only provide `permissions`.
But, I believe we should make every resources behave the same way.
For example, when we'll implement the edition of permissions in Kinto-admin, we don't want to have to pass the `data` if it was not changed.
Two solutions:
- Add a default value (`[]`) for the groups members attribute (_my prefered one, trivial and not absurd_)
- Allow `data` to be omitted only when the `PUT` replaces an existing object (_more complex to implement, but would work for any resource with mandatory fields_)
Consistency on PUT with mandatory schema fields
While working on #790 I realize that there is something not clear in our specifications.
Currently, if a resource has a mandatory field (eg. groups `members`), then we cannot do a `PUT` with just the `permissions` values. This is because a PUT can lead to a creation, and the `members` fields has to be provided.
On other resources, which have no mandatory field, it is perfectly possible to only provide `permissions`.
But, I believe we should make every resources behave the same way.
For example, when we'll implement the edition of permissions in Kinto-admin, we don't want to have to pass the `data` if it was not changed.
Two solutions:
- Add a default value (`[]`) for the groups members attribute (_my prefered one, trivial and not absurd_)
- Allow `data` to be omitted only when the `PUT` replaces an existing object (_more complex to implement, but would work for any resource with mandatory fields_)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/views/groups.py`
Content:
```
1 import colander
2
3 from kinto.core import resource, utils
4 from kinto.core.events import ResourceChanged, ACTIONS
5 from pyramid.events import subscriber
6
7
8 def validate_member(node, member):
9 if member.startswith('/buckets/') or member == 'system.Everyone':
10 raise colander.Invalid(node, "'{}' is not a valid user ID.".format(member))
11
12
13 class GroupSchema(resource.ResourceSchema):
14 members = colander.SchemaNode(colander.Sequence(),
15 colander.SchemaNode(colander.String(),
16 validator=validate_member))
17
18
19 @resource.register(name='group',
20 collection_path='/buckets/{{bucket_id}}/groups',
21 record_path='/buckets/{{bucket_id}}/groups/{{id}}')
22 class Group(resource.ShareableResource):
23 schema = GroupSchema
24
25 def get_parent_id(self, request):
26 bucket_id = request.matchdict['bucket_id']
27 parent_id = utils.instance_uri(request, 'bucket', id=bucket_id)
28 return parent_id
29
30
31 @subscriber(ResourceChanged,
32 for_resources=('group',),
33 for_actions=(ACTIONS.DELETE,))
34 def on_groups_deleted(event):
35 """Some groups were deleted, remove them from users principals.
36 """
37 permission_backend = event.request.registry.permission
38
39 for change in event.impacted_records:
40 group = change['old']
41 bucket_id = event.payload['bucket_id']
42 group_uri = utils.instance_uri(event.request, 'group',
43 bucket_id=bucket_id,
44 id=group['id'])
45
46 permission_backend.remove_principal(group_uri)
47
48
49 @subscriber(ResourceChanged,
50 for_resources=('group',),
51 for_actions=(ACTIONS.CREATE, ACTIONS.UPDATE))
52 def on_groups_changed(event):
53 """Some groups were changed, update users principals.
54 """
55 permission_backend = event.request.registry.permission
56
57 for change in event.impacted_records:
58 if 'old' in change:
59 existing_record_members = set(change['old'].get('members', []))
60 else:
61 existing_record_members = set()
62
63 group = change['new']
64 group_uri = '/buckets/{bucket_id}/groups/{id}'.format(id=group['id'],
65 **event.payload)
66 new_record_members = set(group.get('members', []))
67 new_members = new_record_members - existing_record_members
68 removed_members = existing_record_members - new_record_members
69
70 for member in new_members:
71 # Add the group to the member principal.
72 permission_backend.add_user_principal(member, group_uri)
73
74 for member in removed_members:
75 # Remove the group from the member principal.
76 permission_backend.remove_user_principal(member, group_uri)
77
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kinto/views/groups.py b/kinto/views/groups.py
--- a/kinto/views/groups.py
+++ b/kinto/views/groups.py
@@ -13,7 +13,8 @@
class GroupSchema(resource.ResourceSchema):
members = colander.SchemaNode(colander.Sequence(),
colander.SchemaNode(colander.String(),
- validator=validate_member))
+ validator=validate_member),
+ missing=[])
@resource.register(name='group',
| {"golden_diff": "diff --git a/kinto/views/groups.py b/kinto/views/groups.py\n--- a/kinto/views/groups.py\n+++ b/kinto/views/groups.py\n@@ -13,7 +13,8 @@\n class GroupSchema(resource.ResourceSchema):\n members = colander.SchemaNode(colander.Sequence(),\n colander.SchemaNode(colander.String(),\n- validator=validate_member))\n+ validator=validate_member),\n+ missing=[])\n \n \n @resource.register(name='group',\n", "issue": "Consistency on PUT with mandatory schema fields\nWhile working on #790 I realize that there is something not clear in our specifications.\n\nCurrently, if a resource has a mandatory field (eg. groups `members`), then we cannot do a `PUT` with just the `permissions` values. This is because a PUT can lead to a creation, and the `members` fields has to be provided.\n\nOn other resources, which have no mandatory field, it is perfectly possible to only provide `permissions`.\n\nBut, I believe we should make every resources behave the same way.\n\nFor example, when we'll implement the edition of permissions in Kinto-admin, we don't want to have to pass the `data` if it was not changed.\n\nTwo solutions:\n- Add a default value (`[]`) for the groups members attribute (_my prefered one, trivial and not absurd_)\n- Allow `data` to be omitted only when the `PUT` replaces an existing object (_more complex to implement, but would work for any resource with mandatory fields_)\n\nConsistency on PUT with mandatory schema fields\nWhile working on #790 I realize that there is something not clear in our specifications.\n\nCurrently, if a resource has a mandatory field (eg. groups `members`), then we cannot do a `PUT` with just the `permissions` values. This is because a PUT can lead to a creation, and the `members` fields has to be provided.\n\nOn other resources, which have no mandatory field, it is perfectly possible to only provide `permissions`.\n\nBut, I believe we should make every resources behave the same way.\n\nFor example, when we'll implement the edition of permissions in Kinto-admin, we don't want to have to pass the `data` if it was not changed.\n\nTwo solutions:\n- Add a default value (`[]`) for the groups members attribute (_my prefered one, trivial and not absurd_)\n- Allow `data` to be omitted only when the `PUT` replaces an existing object (_more complex to implement, but would work for any resource with mandatory fields_)\n\n", "before_files": [{"content": "import colander\n\nfrom kinto.core import resource, utils\nfrom kinto.core.events import ResourceChanged, ACTIONS\nfrom pyramid.events import subscriber\n\n\ndef validate_member(node, member):\n if member.startswith('/buckets/') or member == 'system.Everyone':\n raise colander.Invalid(node, \"'{}' is not a valid user ID.\".format(member))\n\n\nclass GroupSchema(resource.ResourceSchema):\n members = colander.SchemaNode(colander.Sequence(),\n colander.SchemaNode(colander.String(),\n validator=validate_member))\n\n\[email protected](name='group',\n collection_path='/buckets/{{bucket_id}}/groups',\n record_path='/buckets/{{bucket_id}}/groups/{{id}}')\nclass Group(resource.ShareableResource):\n schema = GroupSchema\n\n def get_parent_id(self, request):\n bucket_id = request.matchdict['bucket_id']\n parent_id = utils.instance_uri(request, 'bucket', id=bucket_id)\n return parent_id\n\n\n@subscriber(ResourceChanged,\n for_resources=('group',),\n for_actions=(ACTIONS.DELETE,))\ndef on_groups_deleted(event):\n \"\"\"Some groups were deleted, remove them from users principals.\n \"\"\"\n permission_backend = event.request.registry.permission\n\n for change in event.impacted_records:\n group = change['old']\n bucket_id = event.payload['bucket_id']\n group_uri = utils.instance_uri(event.request, 'group',\n bucket_id=bucket_id,\n id=group['id'])\n\n permission_backend.remove_principal(group_uri)\n\n\n@subscriber(ResourceChanged,\n for_resources=('group',),\n for_actions=(ACTIONS.CREATE, ACTIONS.UPDATE))\ndef on_groups_changed(event):\n \"\"\"Some groups were changed, update users principals.\n \"\"\"\n permission_backend = event.request.registry.permission\n\n for change in event.impacted_records:\n if 'old' in change:\n existing_record_members = set(change['old'].get('members', []))\n else:\n existing_record_members = set()\n\n group = change['new']\n group_uri = '/buckets/{bucket_id}/groups/{id}'.format(id=group['id'],\n **event.payload)\n new_record_members = set(group.get('members', []))\n new_members = new_record_members - existing_record_members\n removed_members = existing_record_members - new_record_members\n\n for member in new_members:\n # Add the group to the member principal.\n permission_backend.add_user_principal(member, group_uri)\n\n for member in removed_members:\n # Remove the group from the member principal.\n permission_backend.remove_user_principal(member, group_uri)\n", "path": "kinto/views/groups.py"}], "after_files": [{"content": "import colander\n\nfrom kinto.core import resource, utils\nfrom kinto.core.events import ResourceChanged, ACTIONS\nfrom pyramid.events import subscriber\n\n\ndef validate_member(node, member):\n if member.startswith('/buckets/') or member == 'system.Everyone':\n raise colander.Invalid(node, \"'{}' is not a valid user ID.\".format(member))\n\n\nclass GroupSchema(resource.ResourceSchema):\n members = colander.SchemaNode(colander.Sequence(),\n colander.SchemaNode(colander.String(),\n validator=validate_member),\n missing=[])\n\n\[email protected](name='group',\n collection_path='/buckets/{{bucket_id}}/groups',\n record_path='/buckets/{{bucket_id}}/groups/{{id}}')\nclass Group(resource.ShareableResource):\n schema = GroupSchema\n\n def get_parent_id(self, request):\n bucket_id = request.matchdict['bucket_id']\n parent_id = utils.instance_uri(request, 'bucket', id=bucket_id)\n return parent_id\n\n\n@subscriber(ResourceChanged,\n for_resources=('group',),\n for_actions=(ACTIONS.DELETE,))\ndef on_groups_deleted(event):\n \"\"\"Some groups were deleted, remove them from users principals.\n \"\"\"\n permission_backend = event.request.registry.permission\n\n for change in event.impacted_records:\n group = change['old']\n bucket_id = event.payload['bucket_id']\n group_uri = utils.instance_uri(event.request, 'group',\n bucket_id=bucket_id,\n id=group['id'])\n\n permission_backend.remove_principal(group_uri)\n\n\n@subscriber(ResourceChanged,\n for_resources=('group',),\n for_actions=(ACTIONS.CREATE, ACTIONS.UPDATE))\ndef on_groups_changed(event):\n \"\"\"Some groups were changed, update users principals.\n \"\"\"\n permission_backend = event.request.registry.permission\n\n for change in event.impacted_records:\n if 'old' in change:\n existing_record_members = set(change['old'].get('members', []))\n else:\n existing_record_members = set()\n\n group = change['new']\n group_uri = '/buckets/{bucket_id}/groups/{id}'.format(id=group['id'],\n **event.payload)\n new_record_members = set(group.get('members', []))\n new_members = new_record_members - existing_record_members\n removed_members = existing_record_members - new_record_members\n\n for member in new_members:\n # Add the group to the member principal.\n permission_backend.add_user_principal(member, group_uri)\n\n for member in removed_members:\n # Remove the group from the member principal.\n permission_backend.remove_user_principal(member, group_uri)\n", "path": "kinto/views/groups.py"}]} | 1,375 | 98 |
gh_patches_debug_15873 | rasdani/github-patches | git_diff | frappe__frappe-13917 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Review: Connected App: Difficult to see how Token Cache get_expires_in could be any more wrong
https://github.com/frappe/frappe/blob/86e512452d77f3e61405fd33ecd1bf881790ae18/frappe/integrations/doctype/token_cache/token_cache.py#L53
PR to follow
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `frappe/integrations/doctype/token_cache/token_cache.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright (c) 2019, Frappe Technologies and contributors
3 # For license information, please see license.txt
4
5 from __future__ import unicode_literals
6 from datetime import datetime, timedelta
7
8 import frappe
9 from frappe import _
10 from frappe.utils import cstr, cint
11 from frappe.model.document import Document
12
13 class TokenCache(Document):
14
15 def get_auth_header(self):
16 if self.access_token:
17 headers = {'Authorization': 'Bearer ' + self.get_password('access_token')}
18 return headers
19
20 raise frappe.exceptions.DoesNotExistError
21
22 def update_data(self, data):
23 """
24 Store data returned by authorization flow.
25
26 Params:
27 data - Dict with access_token, refresh_token, expires_in and scope.
28 """
29 token_type = cstr(data.get('token_type', '')).lower()
30 if token_type not in ['bearer', 'mac']:
31 frappe.throw(_('Received an invalid token type.'))
32 # 'Bearer' or 'MAC'
33 token_type = token_type.title() if token_type == 'bearer' else token_type.upper()
34
35 self.token_type = token_type
36 self.access_token = cstr(data.get('access_token', ''))
37 self.refresh_token = cstr(data.get('refresh_token', ''))
38 self.expires_in = cint(data.get('expires_in', 0))
39
40 new_scopes = data.get('scope')
41 if new_scopes:
42 if isinstance(new_scopes, str):
43 new_scopes = new_scopes.split(' ')
44 if isinstance(new_scopes, list):
45 self.scopes = None
46 for scope in new_scopes:
47 self.append('scopes', {'scope': scope})
48
49 self.state = None
50 self.save(ignore_permissions=True)
51 frappe.db.commit()
52 return self
53
54 def get_expires_in(self):
55 expiry_time = frappe.utils.get_datetime(self.modified) + timedelta(self.expires_in)
56 return (datetime.now() - expiry_time).total_seconds()
57
58 def is_expired(self):
59 return self.get_expires_in() < 0
60
61 def get_json(self):
62 return {
63 'access_token': self.get_password('access_token', ''),
64 'refresh_token': self.get_password('refresh_token', ''),
65 'expires_in': self.get_expires_in(),
66 'token_type': self.token_type
67 }
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/frappe/integrations/doctype/token_cache/token_cache.py b/frappe/integrations/doctype/token_cache/token_cache.py
--- a/frappe/integrations/doctype/token_cache/token_cache.py
+++ b/frappe/integrations/doctype/token_cache/token_cache.py
@@ -3,7 +3,7 @@
# For license information, please see license.txt
from __future__ import unicode_literals
-from datetime import datetime, timedelta
+from datetime import timedelta
import frappe
from frappe import _
@@ -52,8 +52,8 @@
return self
def get_expires_in(self):
- expiry_time = frappe.utils.get_datetime(self.modified) + timedelta(self.expires_in)
- return (datetime.now() - expiry_time).total_seconds()
+ expiry_time = frappe.utils.get_datetime(self.modified) + timedelta(seconds=self.expires_in)
+ return (expiry_time - frappe.utils.now_datetime()).total_seconds()
def is_expired(self):
return self.get_expires_in() < 0
| {"golden_diff": "diff --git a/frappe/integrations/doctype/token_cache/token_cache.py b/frappe/integrations/doctype/token_cache/token_cache.py\n--- a/frappe/integrations/doctype/token_cache/token_cache.py\n+++ b/frappe/integrations/doctype/token_cache/token_cache.py\n@@ -3,7 +3,7 @@\n # For license information, please see license.txt\n \n from __future__ import unicode_literals\n-from datetime import datetime, timedelta\n+from datetime import timedelta\n \n import frappe\n from frappe import _\n@@ -52,8 +52,8 @@\n \t\treturn self\n \n \tdef get_expires_in(self):\n-\t\texpiry_time = frappe.utils.get_datetime(self.modified) + timedelta(self.expires_in)\n-\t\treturn (datetime.now() - expiry_time).total_seconds()\n+\t\texpiry_time = frappe.utils.get_datetime(self.modified) + timedelta(seconds=self.expires_in)\n+\t\treturn (expiry_time - frappe.utils.now_datetime()).total_seconds()\n \n \tdef is_expired(self):\n \t\treturn self.get_expires_in() < 0\n", "issue": "Review: Connected App: Difficult to see how Token Cache get_expires_in could be any more wrong\nhttps://github.com/frappe/frappe/blob/86e512452d77f3e61405fd33ecd1bf881790ae18/frappe/integrations/doctype/token_cache/token_cache.py#L53\r\n\r\nPR to follow\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2019, Frappe Technologies and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nfrom datetime import datetime, timedelta\n\nimport frappe\nfrom frappe import _\nfrom frappe.utils import cstr, cint\nfrom frappe.model.document import Document\n\nclass TokenCache(Document):\n\n\tdef get_auth_header(self):\n\t\tif self.access_token:\n\t\t\theaders = {'Authorization': 'Bearer ' + self.get_password('access_token')}\n\t\t\treturn headers\n\n\t\traise frappe.exceptions.DoesNotExistError\n\n\tdef update_data(self, data):\n\t\t\"\"\"\n\t\tStore data returned by authorization flow.\n\n\t\tParams:\n\t\tdata - Dict with access_token, refresh_token, expires_in and scope.\n\t\t\"\"\"\n\t\ttoken_type = cstr(data.get('token_type', '')).lower()\n\t\tif token_type not in ['bearer', 'mac']:\n\t\t\tfrappe.throw(_('Received an invalid token type.'))\n\t\t# 'Bearer' or 'MAC'\n\t\ttoken_type = token_type.title() if token_type == 'bearer' else token_type.upper()\n\n\t\tself.token_type = token_type\n\t\tself.access_token = cstr(data.get('access_token', ''))\n\t\tself.refresh_token = cstr(data.get('refresh_token', ''))\n\t\tself.expires_in = cint(data.get('expires_in', 0))\n\n\t\tnew_scopes = data.get('scope')\n\t\tif new_scopes:\n\t\t\tif isinstance(new_scopes, str):\n\t\t\t\tnew_scopes = new_scopes.split(' ')\n\t\t\tif isinstance(new_scopes, list):\n\t\t\t\tself.scopes = None\n\t\t\t\tfor scope in new_scopes:\n\t\t\t\t\tself.append('scopes', {'scope': scope})\n\n\t\tself.state = None\n\t\tself.save(ignore_permissions=True)\n\t\tfrappe.db.commit()\n\t\treturn self\n\n\tdef get_expires_in(self):\n\t\texpiry_time = frappe.utils.get_datetime(self.modified) + timedelta(self.expires_in)\n\t\treturn (datetime.now() - expiry_time).total_seconds()\n\n\tdef is_expired(self):\n\t\treturn self.get_expires_in() < 0\n\n\tdef get_json(self):\n\t\treturn {\n\t\t\t'access_token': self.get_password('access_token', ''),\n\t\t\t'refresh_token': self.get_password('refresh_token', ''),\n\t\t\t'expires_in': self.get_expires_in(),\n\t\t\t'token_type': self.token_type\n\t\t}\n", "path": "frappe/integrations/doctype/token_cache/token_cache.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2019, Frappe Technologies and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nfrom datetime import timedelta\n\nimport frappe\nfrom frappe import _\nfrom frappe.utils import cstr, cint\nfrom frappe.model.document import Document\n\nclass TokenCache(Document):\n\n\tdef get_auth_header(self):\n\t\tif self.access_token:\n\t\t\theaders = {'Authorization': 'Bearer ' + self.get_password('access_token')}\n\t\t\treturn headers\n\n\t\traise frappe.exceptions.DoesNotExistError\n\n\tdef update_data(self, data):\n\t\t\"\"\"\n\t\tStore data returned by authorization flow.\n\n\t\tParams:\n\t\tdata - Dict with access_token, refresh_token, expires_in and scope.\n\t\t\"\"\"\n\t\ttoken_type = cstr(data.get('token_type', '')).lower()\n\t\tif token_type not in ['bearer', 'mac']:\n\t\t\tfrappe.throw(_('Received an invalid token type.'))\n\t\t# 'Bearer' or 'MAC'\n\t\ttoken_type = token_type.title() if token_type == 'bearer' else token_type.upper()\n\n\t\tself.token_type = token_type\n\t\tself.access_token = cstr(data.get('access_token', ''))\n\t\tself.refresh_token = cstr(data.get('refresh_token', ''))\n\t\tself.expires_in = cint(data.get('expires_in', 0))\n\n\t\tnew_scopes = data.get('scope')\n\t\tif new_scopes:\n\t\t\tif isinstance(new_scopes, str):\n\t\t\t\tnew_scopes = new_scopes.split(' ')\n\t\t\tif isinstance(new_scopes, list):\n\t\t\t\tself.scopes = None\n\t\t\t\tfor scope in new_scopes:\n\t\t\t\t\tself.append('scopes', {'scope': scope})\n\n\t\tself.state = None\n\t\tself.save(ignore_permissions=True)\n\t\tfrappe.db.commit()\n\t\treturn self\n\n\tdef get_expires_in(self):\n\t\texpiry_time = frappe.utils.get_datetime(self.modified) + timedelta(seconds=self.expires_in)\n\t\treturn (expiry_time - frappe.utils.now_datetime()).total_seconds()\n\n\tdef is_expired(self):\n\t\treturn self.get_expires_in() < 0\n\n\tdef get_json(self):\n\t\treturn {\n\t\t\t'access_token': self.get_password('access_token', ''),\n\t\t\t'refresh_token': self.get_password('refresh_token', ''),\n\t\t\t'expires_in': self.get_expires_in(),\n\t\t\t'token_type': self.token_type\n\t\t}\n", "path": "frappe/integrations/doctype/token_cache/token_cache.py"}]} | 1,002 | 224 |
gh_patches_debug_24555 | rasdani/github-patches | git_diff | pyg-team__pytorch_geometric-5051 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Data Batch problem in PyG
### 🐛 Describe the bug
Hi. I am a computational physics researcher and was using PyG very well.
my pyg code was working well a few weeks ago, but now that I run my code, it is not working anymore without any changes.
the problem is like below.
I have many material structures and in my "custom_dataset" class, these are preprocessed and all graph informations (node features, edge features, edge index etc) are inserted into "Data" object in PyTorch geometric.
You can see that each preprocessed sample with index $i$ was printed normal "Data" object in pyg

But When I insert my custom dataset class into pyg DataLoader and I did like below,
``` Python
sample = next(iter(train_loader)) # batch sample
```
batch sample is denoted by "DataDataBatch". I didn't see this kind of object name.
and i can't use "sample.x' or "sample.edge_index" command. Instead I need to do like this

I want to use expressions like "sample.x", "sample.edge_index" or "sample.edge_attr" as like before.
I expect your kind explanations. Thank you.
### Environment
* PyG version: `2.0.5`
* PyTorch version: `1.11.0+cu113`
* OS: `GoogleColab Pro Plus`
* Python version: `Python 3.7.13 in colab`
* CUDA/cuDNN version:
* How you installed PyTorch and PyG (`conda`, `pip`, source):
``` python
# Install required packages.
import os
import torch
os.environ['TORCH'] = torch.__version__
print(torch.__version__)
!pip install -q torch-scatter -f https://data.pyg.org/whl/torch-${TORCH}.html
!pip install -q torch-sparse -f https://data.pyg.org/whl/torch-${TORCH}.html
!pip install -q git+https://github.com/pyg-team/pytorch_geometric.git
!pip install -q pymatgen==2020.11.11
```
* Any other relevant information (*e.g.*, version of `torch-scatter`):
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torch_geometric/loader/dataloader.py`
Content:
```
1 from collections.abc import Mapping, Sequence
2 from typing import List, Optional, Union
3
4 import torch.utils.data
5 from torch.utils.data.dataloader import default_collate
6
7 from torch_geometric.data import Batch, Dataset
8 from torch_geometric.data.data import BaseData
9
10
11 class Collater:
12 def __init__(self, follow_batch, exclude_keys):
13 self.follow_batch = follow_batch
14 self.exclude_keys = exclude_keys
15
16 def __call__(self, batch):
17 elem = batch[0]
18 if isinstance(elem, BaseData):
19 return Batch.from_data_list(batch, self.follow_batch,
20 self.exclude_keys)
21 elif isinstance(elem, torch.Tensor):
22 return default_collate(batch)
23 elif isinstance(elem, float):
24 return torch.tensor(batch, dtype=torch.float)
25 elif isinstance(elem, int):
26 return torch.tensor(batch)
27 elif isinstance(elem, str):
28 return batch
29 elif isinstance(elem, Mapping):
30 return {key: self([data[key] for data in batch]) for key in elem}
31 elif isinstance(elem, tuple) and hasattr(elem, '_fields'):
32 return type(elem)(*(self(s) for s in zip(*batch)))
33 elif isinstance(elem, Sequence) and not isinstance(elem, str):
34 return [self(s) for s in zip(*batch)]
35
36 raise TypeError(f'DataLoader found invalid type: {type(elem)}')
37
38 def collate(self, batch): # Deprecated...
39 return self(batch)
40
41
42 class DataLoader(torch.utils.data.DataLoader):
43 r"""A data loader which merges data objects from a
44 :class:`torch_geometric.data.Dataset` to a mini-batch.
45 Data objects can be either of type :class:`~torch_geometric.data.Data` or
46 :class:`~torch_geometric.data.HeteroData`.
47
48 Args:
49 dataset (Dataset): The dataset from which to load the data.
50 batch_size (int, optional): How many samples per batch to load.
51 (default: :obj:`1`)
52 shuffle (bool, optional): If set to :obj:`True`, the data will be
53 reshuffled at every epoch. (default: :obj:`False`)
54 follow_batch (List[str], optional): Creates assignment batch
55 vectors for each key in the list. (default: :obj:`None`)
56 exclude_keys (List[str], optional): Will exclude each key in the
57 list. (default: :obj:`None`)
58 **kwargs (optional): Additional arguments of
59 :class:`torch.utils.data.DataLoader`.
60 """
61 def __init__(
62 self,
63 dataset: Union[Dataset, List[BaseData]],
64 batch_size: int = 1,
65 shuffle: bool = False,
66 follow_batch: Optional[List[str]] = None,
67 exclude_keys: Optional[List[str]] = None,
68 **kwargs,
69 ):
70
71 if 'collate_fn' in kwargs:
72 del kwargs['collate_fn']
73
74 # Save for PyTorch Lightning < 1.6:
75 self.follow_batch = follow_batch
76 self.exclude_keys = exclude_keys
77
78 super().__init__(
79 dataset,
80 batch_size,
81 shuffle,
82 collate_fn=Collater(follow_batch, exclude_keys),
83 **kwargs,
84 )
85
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torch_geometric/loader/dataloader.py b/torch_geometric/loader/dataloader.py
--- a/torch_geometric/loader/dataloader.py
+++ b/torch_geometric/loader/dataloader.py
@@ -1,4 +1,5 @@
from collections.abc import Mapping, Sequence
+from inspect import signature
from typing import List, Optional, Union
import torch.utils.data
@@ -39,6 +40,28 @@
return self(batch)
+# PyG 'Data' objects are subclasses of MutableMapping, which is an
+# instance of collections.abc.Mapping. Currently, PyTorch pin_memory
+# for DataLoaders treats the returned batches as Mapping objects and
+# calls `pin_memory` on each element in `Data.__dict__`, which is not
+# desired behavior if 'Data' has a `pin_memory` function. We patch
+# this behavior here by monkeypatching `pin_memory`, but can hopefully patch
+# this in PyTorch in the future:
+__torch_pin_memory = torch.utils.data._utils.pin_memory.pin_memory
+__torch_pin_memory_params = signature(__torch_pin_memory).parameters
+
+
+def pin_memory(data, device=None):
+ if hasattr(data, "pin_memory"):
+ return data.pin_memory()
+ if len(__torch_pin_memory_params) > 1:
+ return __torch_pin_memory(data, device)
+ return __torch_pin_memory(data)
+
+
+torch.utils.data._utils.pin_memory.pin_memory = pin_memory
+
+
class DataLoader(torch.utils.data.DataLoader):
r"""A data loader which merges data objects from a
:class:`torch_geometric.data.Dataset` to a mini-batch.
| {"golden_diff": "diff --git a/torch_geometric/loader/dataloader.py b/torch_geometric/loader/dataloader.py\n--- a/torch_geometric/loader/dataloader.py\n+++ b/torch_geometric/loader/dataloader.py\n@@ -1,4 +1,5 @@\n from collections.abc import Mapping, Sequence\n+from inspect import signature\n from typing import List, Optional, Union\n \n import torch.utils.data\n@@ -39,6 +40,28 @@\n return self(batch)\n \n \n+# PyG 'Data' objects are subclasses of MutableMapping, which is an\n+# instance of collections.abc.Mapping. Currently, PyTorch pin_memory\n+# for DataLoaders treats the returned batches as Mapping objects and\n+# calls `pin_memory` on each element in `Data.__dict__`, which is not\n+# desired behavior if 'Data' has a `pin_memory` function. We patch\n+# this behavior here by monkeypatching `pin_memory`, but can hopefully patch\n+# this in PyTorch in the future:\n+__torch_pin_memory = torch.utils.data._utils.pin_memory.pin_memory\n+__torch_pin_memory_params = signature(__torch_pin_memory).parameters\n+\n+\n+def pin_memory(data, device=None):\n+ if hasattr(data, \"pin_memory\"):\n+ return data.pin_memory()\n+ if len(__torch_pin_memory_params) > 1:\n+ return __torch_pin_memory(data, device)\n+ return __torch_pin_memory(data)\n+\n+\n+torch.utils.data._utils.pin_memory.pin_memory = pin_memory\n+\n+\n class DataLoader(torch.utils.data.DataLoader):\n r\"\"\"A data loader which merges data objects from a\n :class:`torch_geometric.data.Dataset` to a mini-batch.\n", "issue": "Data Batch problem in PyG\n### \ud83d\udc1b Describe the bug\n\nHi. I am a computational physics researcher and was using PyG very well.\r\nmy pyg code was working well a few weeks ago, but now that I run my code, it is not working anymore without any changes.\r\n\r\nthe problem is like below.\r\nI have many material structures and in my \"custom_dataset\" class, these are preprocessed and all graph informations (node features, edge features, edge index etc) are inserted into \"Data\" object in PyTorch geometric.\r\nYou can see that each preprocessed sample with index $i$ was printed normal \"Data\" object in pyg\r\n\r\n\r\n\r\nBut When I insert my custom dataset class into pyg DataLoader and I did like below,\r\n\r\n``` Python\r\nsample = next(iter(train_loader)) # batch sample\r\n```\r\n\r\nbatch sample is denoted by \"DataDataBatch\". I didn't see this kind of object name.\r\nand i can't use \"sample.x' or \"sample.edge_index\" command. Instead I need to do like this\r\n\r\n\r\n\r\nI want to use expressions like \"sample.x\", \"sample.edge_index\" or \"sample.edge_attr\" as like before. \r\nI expect your kind explanations. Thank you.\r\n\n\n### Environment\n\n* PyG version: `2.0.5`\r\n* PyTorch version: `1.11.0+cu113`\r\n* OS: `GoogleColab Pro Plus`\r\n* Python version: `Python 3.7.13 in colab`\r\n* CUDA/cuDNN version:\r\n* How you installed PyTorch and PyG (`conda`, `pip`, source): \r\n``` python\r\n# Install required packages.\r\nimport os\r\nimport torch\r\nos.environ['TORCH'] = torch.__version__\r\nprint(torch.__version__)\r\n!pip install -q torch-scatter -f https://data.pyg.org/whl/torch-${TORCH}.html\r\n!pip install -q torch-sparse -f https://data.pyg.org/whl/torch-${TORCH}.html\r\n!pip install -q git+https://github.com/pyg-team/pytorch_geometric.git\r\n!pip install -q pymatgen==2020.11.11 \r\n```\r\n* Any other relevant information (*e.g.*, version of `torch-scatter`):\r\n\n", "before_files": [{"content": "from collections.abc import Mapping, Sequence\nfrom typing import List, Optional, Union\n\nimport torch.utils.data\nfrom torch.utils.data.dataloader import default_collate\n\nfrom torch_geometric.data import Batch, Dataset\nfrom torch_geometric.data.data import BaseData\n\n\nclass Collater:\n def __init__(self, follow_batch, exclude_keys):\n self.follow_batch = follow_batch\n self.exclude_keys = exclude_keys\n\n def __call__(self, batch):\n elem = batch[0]\n if isinstance(elem, BaseData):\n return Batch.from_data_list(batch, self.follow_batch,\n self.exclude_keys)\n elif isinstance(elem, torch.Tensor):\n return default_collate(batch)\n elif isinstance(elem, float):\n return torch.tensor(batch, dtype=torch.float)\n elif isinstance(elem, int):\n return torch.tensor(batch)\n elif isinstance(elem, str):\n return batch\n elif isinstance(elem, Mapping):\n return {key: self([data[key] for data in batch]) for key in elem}\n elif isinstance(elem, tuple) and hasattr(elem, '_fields'):\n return type(elem)(*(self(s) for s in zip(*batch)))\n elif isinstance(elem, Sequence) and not isinstance(elem, str):\n return [self(s) for s in zip(*batch)]\n\n raise TypeError(f'DataLoader found invalid type: {type(elem)}')\n\n def collate(self, batch): # Deprecated...\n return self(batch)\n\n\nclass DataLoader(torch.utils.data.DataLoader):\n r\"\"\"A data loader which merges data objects from a\n :class:`torch_geometric.data.Dataset` to a mini-batch.\n Data objects can be either of type :class:`~torch_geometric.data.Data` or\n :class:`~torch_geometric.data.HeteroData`.\n\n Args:\n dataset (Dataset): The dataset from which to load the data.\n batch_size (int, optional): How many samples per batch to load.\n (default: :obj:`1`)\n shuffle (bool, optional): If set to :obj:`True`, the data will be\n reshuffled at every epoch. (default: :obj:`False`)\n follow_batch (List[str], optional): Creates assignment batch\n vectors for each key in the list. (default: :obj:`None`)\n exclude_keys (List[str], optional): Will exclude each key in the\n list. (default: :obj:`None`)\n **kwargs (optional): Additional arguments of\n :class:`torch.utils.data.DataLoader`.\n \"\"\"\n def __init__(\n self,\n dataset: Union[Dataset, List[BaseData]],\n batch_size: int = 1,\n shuffle: bool = False,\n follow_batch: Optional[List[str]] = None,\n exclude_keys: Optional[List[str]] = None,\n **kwargs,\n ):\n\n if 'collate_fn' in kwargs:\n del kwargs['collate_fn']\n\n # Save for PyTorch Lightning < 1.6:\n self.follow_batch = follow_batch\n self.exclude_keys = exclude_keys\n\n super().__init__(\n dataset,\n batch_size,\n shuffle,\n collate_fn=Collater(follow_batch, exclude_keys),\n **kwargs,\n )\n", "path": "torch_geometric/loader/dataloader.py"}], "after_files": [{"content": "from collections.abc import Mapping, Sequence\nfrom inspect import signature\nfrom typing import List, Optional, Union\n\nimport torch.utils.data\nfrom torch.utils.data.dataloader import default_collate\n\nfrom torch_geometric.data import Batch, Dataset\nfrom torch_geometric.data.data import BaseData\n\n\nclass Collater:\n def __init__(self, follow_batch, exclude_keys):\n self.follow_batch = follow_batch\n self.exclude_keys = exclude_keys\n\n def __call__(self, batch):\n elem = batch[0]\n if isinstance(elem, BaseData):\n return Batch.from_data_list(batch, self.follow_batch,\n self.exclude_keys)\n elif isinstance(elem, torch.Tensor):\n return default_collate(batch)\n elif isinstance(elem, float):\n return torch.tensor(batch, dtype=torch.float)\n elif isinstance(elem, int):\n return torch.tensor(batch)\n elif isinstance(elem, str):\n return batch\n elif isinstance(elem, Mapping):\n return {key: self([data[key] for data in batch]) for key in elem}\n elif isinstance(elem, tuple) and hasattr(elem, '_fields'):\n return type(elem)(*(self(s) for s in zip(*batch)))\n elif isinstance(elem, Sequence) and not isinstance(elem, str):\n return [self(s) for s in zip(*batch)]\n\n raise TypeError(f'DataLoader found invalid type: {type(elem)}')\n\n def collate(self, batch): # Deprecated...\n return self(batch)\n\n\n# PyG 'Data' objects are subclasses of MutableMapping, which is an\n# instance of collections.abc.Mapping. Currently, PyTorch pin_memory\n# for DataLoaders treats the returned batches as Mapping objects and\n# calls `pin_memory` on each element in `Data.__dict__`, which is not\n# desired behavior if 'Data' has a `pin_memory` function. We patch\n# this behavior here by monkeypatching `pin_memory`, but can hopefully patch\n# this in PyTorch in the future:\n__torch_pin_memory = torch.utils.data._utils.pin_memory.pin_memory\n__torch_pin_memory_params = signature(__torch_pin_memory).parameters\n\n\ndef pin_memory(data, device=None):\n if hasattr(data, \"pin_memory\"):\n return data.pin_memory()\n if len(__torch_pin_memory_params) > 1:\n return __torch_pin_memory(data, device)\n return __torch_pin_memory(data)\n\n\ntorch.utils.data._utils.pin_memory.pin_memory = pin_memory\n\n\nclass DataLoader(torch.utils.data.DataLoader):\n r\"\"\"A data loader which merges data objects from a\n :class:`torch_geometric.data.Dataset` to a mini-batch.\n Data objects can be either of type :class:`~torch_geometric.data.Data` or\n :class:`~torch_geometric.data.HeteroData`.\n\n Args:\n dataset (Dataset): The dataset from which to load the data.\n batch_size (int, optional): How many samples per batch to load.\n (default: :obj:`1`)\n shuffle (bool, optional): If set to :obj:`True`, the data will be\n reshuffled at every epoch. (default: :obj:`False`)\n follow_batch (List[str], optional): Creates assignment batch\n vectors for each key in the list. (default: :obj:`None`)\n exclude_keys (List[str], optional): Will exclude each key in the\n list. (default: :obj:`None`)\n **kwargs (optional): Additional arguments of\n :class:`torch.utils.data.DataLoader`.\n \"\"\"\n def __init__(\n self,\n dataset: Union[Dataset, List[BaseData]],\n batch_size: int = 1,\n shuffle: bool = False,\n follow_batch: Optional[List[str]] = None,\n exclude_keys: Optional[List[str]] = None,\n **kwargs,\n ):\n\n if 'collate_fn' in kwargs:\n del kwargs['collate_fn']\n\n # Save for PyTorch Lightning < 1.6:\n self.follow_batch = follow_batch\n self.exclude_keys = exclude_keys\n\n super().__init__(\n dataset,\n batch_size,\n shuffle,\n collate_fn=Collater(follow_batch, exclude_keys),\n **kwargs,\n )\n", "path": "torch_geometric/loader/dataloader.py"}]} | 1,727 | 362 |
gh_patches_debug_3074 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-1341 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot make other users admin on the website
**Describe the bug**
For the moment, there is no way to promote an user to be an admin. One has to do it in the "./bw-dev shell"
**To Reproduce**
Steps to reproduce the behavior:
1. Go to 'Admin' and then the page of the user you want to promote
2. Promote the user and save
3. The "promoted user" logins in
4. Nope, not promoted
**Expected behavior**
The logged in promoted user should see the admin panel.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/views/user_admin.py`
Content:
```
1 """ manage user """
2 from django.contrib.auth.decorators import login_required, permission_required
3 from django.core.paginator import Paginator
4 from django.shortcuts import get_object_or_404
5 from django.template.response import TemplateResponse
6 from django.utils.decorators import method_decorator
7 from django.views import View
8
9 from bookwyrm import forms, models
10 from bookwyrm.settings import PAGE_LENGTH
11
12
13 # pylint: disable= no-self-use
14 @method_decorator(login_required, name="dispatch")
15 @method_decorator(
16 permission_required("bookwyrm.moderate_users", raise_exception=True),
17 name="dispatch",
18 )
19 class UserAdminList(View):
20 """admin view of users on this server"""
21
22 def get(self, request):
23 """list of users"""
24 filters = {}
25 server = request.GET.get("server")
26 if server:
27 server = models.FederatedServer.objects.filter(server_name=server).first()
28 filters["federated_server"] = server
29 filters["federated_server__isnull"] = False
30 username = request.GET.get("username")
31 if username:
32 filters["username__icontains"] = username
33 scope = request.GET.get("scope")
34 if scope:
35 filters["local"] = scope == "local"
36
37 users = models.User.objects.filter(**filters)
38
39 sort = request.GET.get("sort", "-created_date")
40 sort_fields = [
41 "created_date",
42 "last_active_date",
43 "username",
44 "federated_server__server_name",
45 "is_active",
46 ]
47 if sort in sort_fields + ["-{:s}".format(f) for f in sort_fields]:
48 users = users.order_by(sort)
49
50 paginated = Paginator(users, PAGE_LENGTH)
51 data = {
52 "users": paginated.get_page(request.GET.get("page")),
53 "sort": sort,
54 "server": server,
55 }
56 return TemplateResponse(request, "user_admin/user_admin.html", data)
57
58
59 @method_decorator(login_required, name="dispatch")
60 @method_decorator(
61 permission_required("bookwyrm.moderate_users", raise_exception=True),
62 name="dispatch",
63 )
64 class UserAdmin(View):
65 """moderate an individual user"""
66
67 def get(self, request, user):
68 """user view"""
69 user = get_object_or_404(models.User, id=user)
70 data = {"user": user, "group_form": forms.UserGroupForm()}
71 return TemplateResponse(request, "user_admin/user.html", data)
72
73 def post(self, request, user):
74 """update user group"""
75 user = get_object_or_404(models.User, id=user)
76 form = forms.UserGroupForm(request.POST, instance=user)
77 if form.is_valid():
78 form.save()
79 data = {"user": user, "group_form": form}
80 return TemplateResponse(request, "user_admin/user.html", data)
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bookwyrm/views/user_admin.py b/bookwyrm/views/user_admin.py
--- a/bookwyrm/views/user_admin.py
+++ b/bookwyrm/views/user_admin.py
@@ -13,7 +13,7 @@
# pylint: disable= no-self-use
@method_decorator(login_required, name="dispatch")
@method_decorator(
- permission_required("bookwyrm.moderate_users", raise_exception=True),
+ permission_required("bookwyrm.moderate_user", raise_exception=True),
name="dispatch",
)
class UserAdminList(View):
| {"golden_diff": "diff --git a/bookwyrm/views/user_admin.py b/bookwyrm/views/user_admin.py\n--- a/bookwyrm/views/user_admin.py\n+++ b/bookwyrm/views/user_admin.py\n@@ -13,7 +13,7 @@\n # pylint: disable= no-self-use\n @method_decorator(login_required, name=\"dispatch\")\n @method_decorator(\n- permission_required(\"bookwyrm.moderate_users\", raise_exception=True),\n+ permission_required(\"bookwyrm.moderate_user\", raise_exception=True),\n name=\"dispatch\",\n )\n class UserAdminList(View):\n", "issue": "Cannot make other users admin on the website\n**Describe the bug**\r\nFor the moment, there is no way to promote an user to be an admin. One has to do it in the \"./bw-dev shell\"\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to 'Admin' and then the page of the user you want to promote\r\n2. Promote the user and save\r\n3. The \"promoted user\" logins in\r\n4. Nope, not promoted\r\n\r\n**Expected behavior**\r\nThe logged in promoted user should see the admin panel.\r\n\n", "before_files": [{"content": "\"\"\" manage user \"\"\"\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.core.paginator import Paginator\nfrom django.shortcuts import get_object_or_404\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.settings import PAGE_LENGTH\n\n\n# pylint: disable= no-self-use\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"bookwyrm.moderate_users\", raise_exception=True),\n name=\"dispatch\",\n)\nclass UserAdminList(View):\n \"\"\"admin view of users on this server\"\"\"\n\n def get(self, request):\n \"\"\"list of users\"\"\"\n filters = {}\n server = request.GET.get(\"server\")\n if server:\n server = models.FederatedServer.objects.filter(server_name=server).first()\n filters[\"federated_server\"] = server\n filters[\"federated_server__isnull\"] = False\n username = request.GET.get(\"username\")\n if username:\n filters[\"username__icontains\"] = username\n scope = request.GET.get(\"scope\")\n if scope:\n filters[\"local\"] = scope == \"local\"\n\n users = models.User.objects.filter(**filters)\n\n sort = request.GET.get(\"sort\", \"-created_date\")\n sort_fields = [\n \"created_date\",\n \"last_active_date\",\n \"username\",\n \"federated_server__server_name\",\n \"is_active\",\n ]\n if sort in sort_fields + [\"-{:s}\".format(f) for f in sort_fields]:\n users = users.order_by(sort)\n\n paginated = Paginator(users, PAGE_LENGTH)\n data = {\n \"users\": paginated.get_page(request.GET.get(\"page\")),\n \"sort\": sort,\n \"server\": server,\n }\n return TemplateResponse(request, \"user_admin/user_admin.html\", data)\n\n\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"bookwyrm.moderate_users\", raise_exception=True),\n name=\"dispatch\",\n)\nclass UserAdmin(View):\n \"\"\"moderate an individual user\"\"\"\n\n def get(self, request, user):\n \"\"\"user view\"\"\"\n user = get_object_or_404(models.User, id=user)\n data = {\"user\": user, \"group_form\": forms.UserGroupForm()}\n return TemplateResponse(request, \"user_admin/user.html\", data)\n\n def post(self, request, user):\n \"\"\"update user group\"\"\"\n user = get_object_or_404(models.User, id=user)\n form = forms.UserGroupForm(request.POST, instance=user)\n if form.is_valid():\n form.save()\n data = {\"user\": user, \"group_form\": form}\n return TemplateResponse(request, \"user_admin/user.html\", data)\n", "path": "bookwyrm/views/user_admin.py"}], "after_files": [{"content": "\"\"\" manage user \"\"\"\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.core.paginator import Paginator\nfrom django.shortcuts import get_object_or_404\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.settings import PAGE_LENGTH\n\n\n# pylint: disable= no-self-use\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"bookwyrm.moderate_user\", raise_exception=True),\n name=\"dispatch\",\n)\nclass UserAdminList(View):\n \"\"\"admin view of users on this server\"\"\"\n\n def get(self, request):\n \"\"\"list of users\"\"\"\n filters = {}\n server = request.GET.get(\"server\")\n if server:\n server = models.FederatedServer.objects.filter(server_name=server).first()\n filters[\"federated_server\"] = server\n filters[\"federated_server__isnull\"] = False\n username = request.GET.get(\"username\")\n if username:\n filters[\"username__icontains\"] = username\n scope = request.GET.get(\"scope\")\n if scope:\n filters[\"local\"] = scope == \"local\"\n\n users = models.User.objects.filter(**filters)\n\n sort = request.GET.get(\"sort\", \"-created_date\")\n sort_fields = [\n \"created_date\",\n \"last_active_date\",\n \"username\",\n \"federated_server__server_name\",\n \"is_active\",\n ]\n if sort in sort_fields + [\"-{:s}\".format(f) for f in sort_fields]:\n users = users.order_by(sort)\n\n paginated = Paginator(users, PAGE_LENGTH)\n data = {\n \"users\": paginated.get_page(request.GET.get(\"page\")),\n \"sort\": sort,\n \"server\": server,\n }\n return TemplateResponse(request, \"user_admin/user_admin.html\", data)\n\n\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"bookwyrm.moderate_users\", raise_exception=True),\n name=\"dispatch\",\n)\nclass UserAdmin(View):\n \"\"\"moderate an individual user\"\"\"\n\n def get(self, request, user):\n \"\"\"user view\"\"\"\n user = get_object_or_404(models.User, id=user)\n data = {\"user\": user, \"group_form\": forms.UserGroupForm()}\n return TemplateResponse(request, \"user_admin/user.html\", data)\n\n def post(self, request, user):\n \"\"\"update user group\"\"\"\n user = get_object_or_404(models.User, id=user)\n form = forms.UserGroupForm(request.POST, instance=user)\n if form.is_valid():\n form.save()\n data = {\"user\": user, \"group_form\": form}\n return TemplateResponse(request, \"user_admin/user.html\", data)\n", "path": "bookwyrm/views/user_admin.py"}]} | 1,136 | 121 |
gh_patches_debug_557 | rasdani/github-patches | git_diff | pex-tool__pex-743 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 1.6.8
On the docket:
+ [x] Fixup pex re-exec during bootstrap. #741
+ [x] Pex should not re-exec when the current interpreter satifies constraints #709
+ [x] Pex should not lose PEX_PYTHON or PEX_PYTHON_PATH when re-exec-ing #710
+ [x] Fix resolution of `setup.py` project extras. #739
Deferred:
+ [ ] Remove PEX_HTTP_RETRIES and push into a flag for the pex tool #94
+ [ ] Sdist resolution is not always reproducible #735
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = '1.6.7'
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = '1.6.7'
+__version__ = '1.6.8'
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = '1.6.7'\n+__version__ = '1.6.8'\n", "issue": "Release 1.6.8\nOn the docket:\r\n\r\n+ [x] Fixup pex re-exec during bootstrap. #741 \r\n + [x] Pex should not re-exec when the current interpreter satifies constraints #709\r\n + [x] Pex should not lose PEX_PYTHON or PEX_PYTHON_PATH when re-exec-ing #710\r\n+ [x] Fix resolution of `setup.py` project extras. #739\r\n\r\nDeferred:\r\n\r\n+ [ ] Remove PEX_HTTP_RETRIES and push into a flag for the pex tool #94\r\n+ [ ] Sdist resolution is not always reproducible #735\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '1.6.7'\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '1.6.8'\n", "path": "pex/version.py"}]} | 449 | 94 |
gh_patches_debug_13557 | rasdani/github-patches | git_diff | mesonbuild__meson-3715 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Windows module fails on multiple resource files with same name
I have a project with multiple subfolders that contain resource scripts named 'rsrc.rc', this worked with at least 0.44.0, but fails with current master:
> meson.build:7:0: ERROR: Tried to create target "Windows resource for file 'rsrc.rc'", but a target of that name already exists.
Here is a small testcase: [rsrcbug.zip](https://github.com/mesonbuild/meson/files/2007861/rsrcbug.zip)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mesonbuild/modules/windows.py`
Content:
```
1 # Copyright 2015 The Meson development team
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16
17 from .. import mlog
18 from .. import mesonlib, dependencies, build
19 from ..mesonlib import MesonException, extract_as_list
20 from . import get_include_args
21 from . import ModuleReturnValue
22 from . import ExtensionModule
23 from ..interpreterbase import permittedKwargs, FeatureNewKwargs
24
25 class WindowsModule(ExtensionModule):
26
27 def detect_compiler(self, compilers):
28 for l in ('c', 'cpp'):
29 if l in compilers:
30 return compilers[l]
31 raise MesonException('Resource compilation requires a C or C++ compiler.')
32
33 @FeatureNewKwargs('windows.compile_resources', '0.47.0', ['depend_files'])
34 @permittedKwargs({'args', 'include_directories', 'depend_files'})
35 def compile_resources(self, state, args, kwargs):
36 comp = self.detect_compiler(state.compilers)
37
38 extra_args = mesonlib.stringlistify(kwargs.get('args', []))
39 wrc_deps = extract_as_list(kwargs, 'depend_files', pop = True)
40 inc_dirs = extract_as_list(kwargs, 'include_directories', pop = True)
41 for incd in inc_dirs:
42 if not isinstance(incd.held_object, (str, build.IncludeDirs)):
43 raise MesonException('Resource include dirs should be include_directories().')
44 extra_args += get_include_args(inc_dirs)
45
46 if comp.id == 'msvc':
47 rescomp = dependencies.ExternalProgram('rc', silent=True)
48 res_args = extra_args + ['/nologo', '/fo@OUTPUT@', '@INPUT@']
49 suffix = 'res'
50 else:
51 m = 'Argument {!r} has a space which may not work with windres due to ' \
52 'a MinGW bug: https://sourceware.org/bugzilla/show_bug.cgi?id=4933'
53 for arg in extra_args:
54 if ' ' in arg:
55 mlog.warning(m.format(arg))
56 rescomp_name = None
57 # FIXME: Does not handle `native: true` executables, see
58 # https://github.com/mesonbuild/meson/issues/1531
59 if state.environment.is_cross_build():
60 # If cross compiling see if windres has been specified in the
61 # cross file before trying to find it another way.
62 rescomp_name = state.environment.cross_info.config['binaries'].get('windres')
63 if rescomp_name is None:
64 # Pick-up env var WINDRES if set. This is often used for
65 # specifying an arch-specific windres.
66 rescomp_name = os.environ.get('WINDRES', 'windres')
67 rescomp = dependencies.ExternalProgram(rescomp_name, silent=True)
68 res_args = extra_args + ['@INPUT@', '@OUTPUT@']
69 suffix = 'o'
70 if not rescomp.found():
71 raise MesonException('Could not find Windows resource compiler "%s".' % rescomp_name)
72
73 res_targets = []
74
75 def add_target(src):
76 if isinstance(src, list):
77 for subsrc in src:
78 add_target(subsrc)
79 return
80
81 if hasattr(src, 'held_object'):
82 src = src.held_object
83
84 res_kwargs = {
85 'output': '@BASENAME@.' + suffix,
86 'input': [src],
87 'command': [rescomp] + res_args,
88 'depend_files': wrc_deps,
89 }
90
91 if isinstance(src, (str, mesonlib.File)):
92 name = 'file {!r}'.format(str(src))
93 elif isinstance(src, build.CustomTarget):
94 if len(src.get_outputs()) > 1:
95 raise MesonException('windows.compile_resources does not accept custom targets with more than 1 output.')
96
97 name = 'target {!r}'.format(src.get_id())
98 else:
99 raise MesonException('Unexpected source type {!r}. windows.compile_resources accepts only strings, files, custom targets, and lists thereof.'.format(src))
100
101 # Path separators are not allowed in target names
102 name = name.replace('/', '_').replace('\\', '_')
103
104 # instruct binutils windres to generate a preprocessor depfile
105 if comp.id != 'msvc':
106 res_kwargs['depfile'] = res_kwargs['output'] + '.d'
107 res_kwargs['command'] += ['--preprocessor-arg=-MD', '--preprocessor-arg=-MQ@OUTPUT@', '--preprocessor-arg=-MF@DEPFILE@']
108
109 res_targets.append(build.CustomTarget('Windows resource for ' + name, state.subdir, state.subproject, res_kwargs))
110
111 add_target(args)
112
113 return ModuleReturnValue(res_targets, [res_targets])
114
115 def initialize(*args, **kwargs):
116 return WindowsModule(*args, **kwargs)
117
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mesonbuild/modules/windows.py b/mesonbuild/modules/windows.py
--- a/mesonbuild/modules/windows.py
+++ b/mesonbuild/modules/windows.py
@@ -88,8 +88,10 @@
'depend_files': wrc_deps,
}
- if isinstance(src, (str, mesonlib.File)):
- name = 'file {!r}'.format(str(src))
+ if isinstance(src, str):
+ name = 'file {!r}'.format(os.path.join(state.subdir, src))
+ elif isinstance(src, mesonlib.File):
+ name = 'file {!r}'.format(src.relative_name())
elif isinstance(src, build.CustomTarget):
if len(src.get_outputs()) > 1:
raise MesonException('windows.compile_resources does not accept custom targets with more than 1 output.')
| {"golden_diff": "diff --git a/mesonbuild/modules/windows.py b/mesonbuild/modules/windows.py\n--- a/mesonbuild/modules/windows.py\n+++ b/mesonbuild/modules/windows.py\n@@ -88,8 +88,10 @@\n 'depend_files': wrc_deps,\n }\n \n- if isinstance(src, (str, mesonlib.File)):\n- name = 'file {!r}'.format(str(src))\n+ if isinstance(src, str):\n+ name = 'file {!r}'.format(os.path.join(state.subdir, src))\n+ elif isinstance(src, mesonlib.File):\n+ name = 'file {!r}'.format(src.relative_name())\n elif isinstance(src, build.CustomTarget):\n if len(src.get_outputs()) > 1:\n raise MesonException('windows.compile_resources does not accept custom targets with more than 1 output.')\n", "issue": "Windows module fails on multiple resource files with same name\nI have a project with multiple subfolders that contain resource scripts named 'rsrc.rc', this worked with at least 0.44.0, but fails with current master:\r\n\r\n> meson.build:7:0: ERROR: Tried to create target \"Windows resource for file 'rsrc.rc'\", but a target of that name already exists.\r\n\r\nHere is a small testcase: [rsrcbug.zip](https://github.com/mesonbuild/meson/files/2007861/rsrcbug.zip)\r\n\n", "before_files": [{"content": "# Copyright 2015 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nfrom .. import mlog\nfrom .. import mesonlib, dependencies, build\nfrom ..mesonlib import MesonException, extract_as_list\nfrom . import get_include_args\nfrom . import ModuleReturnValue\nfrom . import ExtensionModule\nfrom ..interpreterbase import permittedKwargs, FeatureNewKwargs\n\nclass WindowsModule(ExtensionModule):\n\n def detect_compiler(self, compilers):\n for l in ('c', 'cpp'):\n if l in compilers:\n return compilers[l]\n raise MesonException('Resource compilation requires a C or C++ compiler.')\n\n @FeatureNewKwargs('windows.compile_resources', '0.47.0', ['depend_files'])\n @permittedKwargs({'args', 'include_directories', 'depend_files'})\n def compile_resources(self, state, args, kwargs):\n comp = self.detect_compiler(state.compilers)\n\n extra_args = mesonlib.stringlistify(kwargs.get('args', []))\n wrc_deps = extract_as_list(kwargs, 'depend_files', pop = True)\n inc_dirs = extract_as_list(kwargs, 'include_directories', pop = True)\n for incd in inc_dirs:\n if not isinstance(incd.held_object, (str, build.IncludeDirs)):\n raise MesonException('Resource include dirs should be include_directories().')\n extra_args += get_include_args(inc_dirs)\n\n if comp.id == 'msvc':\n rescomp = dependencies.ExternalProgram('rc', silent=True)\n res_args = extra_args + ['/nologo', '/fo@OUTPUT@', '@INPUT@']\n suffix = 'res'\n else:\n m = 'Argument {!r} has a space which may not work with windres due to ' \\\n 'a MinGW bug: https://sourceware.org/bugzilla/show_bug.cgi?id=4933'\n for arg in extra_args:\n if ' ' in arg:\n mlog.warning(m.format(arg))\n rescomp_name = None\n # FIXME: Does not handle `native: true` executables, see\n # https://github.com/mesonbuild/meson/issues/1531\n if state.environment.is_cross_build():\n # If cross compiling see if windres has been specified in the\n # cross file before trying to find it another way.\n rescomp_name = state.environment.cross_info.config['binaries'].get('windres')\n if rescomp_name is None:\n # Pick-up env var WINDRES if set. This is often used for\n # specifying an arch-specific windres.\n rescomp_name = os.environ.get('WINDRES', 'windres')\n rescomp = dependencies.ExternalProgram(rescomp_name, silent=True)\n res_args = extra_args + ['@INPUT@', '@OUTPUT@']\n suffix = 'o'\n if not rescomp.found():\n raise MesonException('Could not find Windows resource compiler \"%s\".' % rescomp_name)\n\n res_targets = []\n\n def add_target(src):\n if isinstance(src, list):\n for subsrc in src:\n add_target(subsrc)\n return\n\n if hasattr(src, 'held_object'):\n src = src.held_object\n\n res_kwargs = {\n 'output': '@BASENAME@.' + suffix,\n 'input': [src],\n 'command': [rescomp] + res_args,\n 'depend_files': wrc_deps,\n }\n\n if isinstance(src, (str, mesonlib.File)):\n name = 'file {!r}'.format(str(src))\n elif isinstance(src, build.CustomTarget):\n if len(src.get_outputs()) > 1:\n raise MesonException('windows.compile_resources does not accept custom targets with more than 1 output.')\n\n name = 'target {!r}'.format(src.get_id())\n else:\n raise MesonException('Unexpected source type {!r}. windows.compile_resources accepts only strings, files, custom targets, and lists thereof.'.format(src))\n\n # Path separators are not allowed in target names\n name = name.replace('/', '_').replace('\\\\', '_')\n\n # instruct binutils windres to generate a preprocessor depfile\n if comp.id != 'msvc':\n res_kwargs['depfile'] = res_kwargs['output'] + '.d'\n res_kwargs['command'] += ['--preprocessor-arg=-MD', '--preprocessor-arg=-MQ@OUTPUT@', '--preprocessor-arg=-MF@DEPFILE@']\n\n res_targets.append(build.CustomTarget('Windows resource for ' + name, state.subdir, state.subproject, res_kwargs))\n\n add_target(args)\n\n return ModuleReturnValue(res_targets, [res_targets])\n\ndef initialize(*args, **kwargs):\n return WindowsModule(*args, **kwargs)\n", "path": "mesonbuild/modules/windows.py"}], "after_files": [{"content": "# Copyright 2015 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nfrom .. import mlog\nfrom .. import mesonlib, dependencies, build\nfrom ..mesonlib import MesonException, extract_as_list\nfrom . import get_include_args\nfrom . import ModuleReturnValue\nfrom . import ExtensionModule\nfrom ..interpreterbase import permittedKwargs, FeatureNewKwargs\n\nclass WindowsModule(ExtensionModule):\n\n def detect_compiler(self, compilers):\n for l in ('c', 'cpp'):\n if l in compilers:\n return compilers[l]\n raise MesonException('Resource compilation requires a C or C++ compiler.')\n\n @FeatureNewKwargs('windows.compile_resources', '0.47.0', ['depend_files'])\n @permittedKwargs({'args', 'include_directories', 'depend_files'})\n def compile_resources(self, state, args, kwargs):\n comp = self.detect_compiler(state.compilers)\n\n extra_args = mesonlib.stringlistify(kwargs.get('args', []))\n wrc_deps = extract_as_list(kwargs, 'depend_files', pop = True)\n inc_dirs = extract_as_list(kwargs, 'include_directories', pop = True)\n for incd in inc_dirs:\n if not isinstance(incd.held_object, (str, build.IncludeDirs)):\n raise MesonException('Resource include dirs should be include_directories().')\n extra_args += get_include_args(inc_dirs)\n\n if comp.id == 'msvc':\n rescomp = dependencies.ExternalProgram('rc', silent=True)\n res_args = extra_args + ['/nologo', '/fo@OUTPUT@', '@INPUT@']\n suffix = 'res'\n else:\n m = 'Argument {!r} has a space which may not work with windres due to ' \\\n 'a MinGW bug: https://sourceware.org/bugzilla/show_bug.cgi?id=4933'\n for arg in extra_args:\n if ' ' in arg:\n mlog.warning(m.format(arg))\n rescomp_name = None\n # FIXME: Does not handle `native: true` executables, see\n # https://github.com/mesonbuild/meson/issues/1531\n if state.environment.is_cross_build():\n # If cross compiling see if windres has been specified in the\n # cross file before trying to find it another way.\n rescomp_name = state.environment.cross_info.config['binaries'].get('windres')\n if rescomp_name is None:\n # Pick-up env var WINDRES if set. This is often used for\n # specifying an arch-specific windres.\n rescomp_name = os.environ.get('WINDRES', 'windres')\n rescomp = dependencies.ExternalProgram(rescomp_name, silent=True)\n res_args = extra_args + ['@INPUT@', '@OUTPUT@']\n suffix = 'o'\n if not rescomp.found():\n raise MesonException('Could not find Windows resource compiler \"%s\".' % rescomp_name)\n\n res_targets = []\n\n def add_target(src):\n if isinstance(src, list):\n for subsrc in src:\n add_target(subsrc)\n return\n\n if hasattr(src, 'held_object'):\n src = src.held_object\n\n res_kwargs = {\n 'output': '@BASENAME@.' + suffix,\n 'input': [src],\n 'command': [rescomp] + res_args,\n 'depend_files': wrc_deps,\n }\n\n if isinstance(src, str):\n name = 'file {!r}'.format(os.path.join(state.subdir, src))\n elif isinstance(src, mesonlib.File):\n name = 'file {!r}'.format(src.relative_name())\n elif isinstance(src, build.CustomTarget):\n if len(src.get_outputs()) > 1:\n raise MesonException('windows.compile_resources does not accept custom targets with more than 1 output.')\n\n name = 'target {!r}'.format(src.get_id())\n else:\n raise MesonException('Unexpected source type {!r}. windows.compile_resources accepts only strings, files, custom targets, and lists thereof.'.format(src))\n\n # Path separators are not allowed in target names\n name = name.replace('/', '_').replace('\\\\', '_')\n\n # instruct binutils windres to generate a preprocessor depfile\n if comp.id != 'msvc':\n res_kwargs['depfile'] = res_kwargs['output'] + '.d'\n res_kwargs['command'] += ['--preprocessor-arg=-MD', '--preprocessor-arg=-MQ@OUTPUT@', '--preprocessor-arg=-MF@DEPFILE@']\n\n res_targets.append(build.CustomTarget('Windows resource for ' + name, state.subdir, state.subproject, res_kwargs))\n\n add_target(args)\n\n return ModuleReturnValue(res_targets, [res_targets])\n\ndef initialize(*args, **kwargs):\n return WindowsModule(*args, **kwargs)\n", "path": "mesonbuild/modules/windows.py"}]} | 1,776 | 181 |
gh_patches_debug_36499 | rasdani/github-patches | git_diff | pytorch__ignite-380 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Issue with metric arithmetics
I'm trying to define my metric as
```python
from ignite.metrics import Accuracy
accuracy = Accuracy()
error_metric = 1.0 - accuracy
```
and I got the following error:
```
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-70-c4c69e70a6d5> in <module>()
2
3 accuracy = Accuracy()
----> 4 error_metric = 1.0 - accuracy
TypeError: unsupported operand type(s) for -: 'float' and 'Accuracy'
```
But I can define
```python
from ignite.metrics import Accuracy
accuracy = Accuracy()
error_metric = (accuracy - 1.0) * -1.0
```
cc @zasdfgbnm
Issue with metric arithmetics
I'm trying to define my metric as
```python
from ignite.metrics import Accuracy
accuracy = Accuracy()
error_metric = 1.0 - accuracy
```
and I got the following error:
```
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-70-c4c69e70a6d5> in <module>()
2
3 accuracy = Accuracy()
----> 4 error_metric = 1.0 - accuracy
TypeError: unsupported operand type(s) for -: 'float' and 'Accuracy'
```
But I can define
```python
from ignite.metrics import Accuracy
accuracy = Accuracy()
error_metric = (accuracy - 1.0) * -1.0
```
cc @zasdfgbnm
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ignite/metrics/metric.py`
Content:
```
1 from abc import ABCMeta, abstractmethod
2 from ignite._six import with_metaclass
3 from ignite.engine import Events
4 import torch
5
6
7 class Metric(with_metaclass(ABCMeta, object)):
8 """
9 Base class for all Metrics.
10
11 Args:
12 output_transform (callable, optional): a callable that is used to transform the
13 :class:`ignite.engine.Engine`'s `process_function`'s output into the
14 form expected by the metric. This can be useful if, for example, you have a multi-output model and
15 you want to compute the metric with respect to one of the outputs.
16
17 """
18
19 def __init__(self, output_transform=lambda x: x):
20 self._output_transform = output_transform
21 self.reset()
22
23 @abstractmethod
24 def reset(self):
25 """
26 Resets the metric to to it's initial state.
27
28 This is called at the start of each epoch.
29 """
30 pass
31
32 @abstractmethod
33 def update(self, output):
34 """
35 Updates the metric's state using the passed batch output.
36
37 This is called once for each batch.
38
39 Args:
40 output: the is the output from the engine's process function
41 """
42 pass
43
44 @abstractmethod
45 def compute(self):
46 """
47 Computes the metric based on it's accumulated state.
48
49 This is called at the end of each epoch.
50
51 Returns:
52 Any: the actual quantity of interest
53
54 Raises:
55 NotComputableError: raised when the metric cannot be computed
56 """
57 pass
58
59 def started(self, engine):
60 self.reset()
61
62 @torch.no_grad()
63 def iteration_completed(self, engine):
64 output = self._output_transform(engine.state.output)
65 self.update(output)
66
67 def completed(self, engine, name):
68 engine.state.metrics[name] = self.compute()
69
70 def attach(self, engine, name):
71 engine.add_event_handler(Events.EPOCH_COMPLETED, self.completed, name)
72 if not engine.has_event_handler(self.started, Events.EPOCH_STARTED):
73 engine.add_event_handler(Events.EPOCH_STARTED, self.started)
74 if not engine.has_event_handler(self.iteration_completed, Events.ITERATION_COMPLETED):
75 engine.add_event_handler(Events.ITERATION_COMPLETED, self.iteration_completed)
76
77 def __add__(self, other):
78 from ignite.metrics import MetricsLambda
79 return MetricsLambda(lambda x, y: x + y, self, other)
80
81 def __sub__(self, other):
82 from ignite.metrics import MetricsLambda
83 return MetricsLambda(lambda x, y: x - y, self, other)
84
85 def __mul__(self, other):
86 from ignite.metrics import MetricsLambda
87 return MetricsLambda(lambda x, y: x * y, self, other)
88
89 def __pow__(self, other):
90 from ignite.metrics import MetricsLambda
91 return MetricsLambda(lambda x, y: x ** y, self, other)
92
93 def __mod__(self, other):
94 from ignite.metrics import MetricsLambda
95 return MetricsLambda(lambda x, y: x % y, self, other)
96
97 def __div__(self, other):
98 from ignite.metrics import MetricsLambda
99 return MetricsLambda(lambda x, y: x.__div__(y), self, other)
100
101 def __truediv__(self, other):
102 from ignite.metrics import MetricsLambda
103 return MetricsLambda(lambda x, y: x.__truediv__(y), self, other)
104
105 def __floordiv__(self, other):
106 from ignite.metrics import MetricsLambda
107 return MetricsLambda(lambda x, y: x // y, self, other)
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ignite/metrics/metric.py b/ignite/metrics/metric.py
--- a/ignite/metrics/metric.py
+++ b/ignite/metrics/metric.py
@@ -78,18 +78,34 @@
from ignite.metrics import MetricsLambda
return MetricsLambda(lambda x, y: x + y, self, other)
+ def __radd__(self, other):
+ from ignite.metrics import MetricsLambda
+ return MetricsLambda(lambda x, y: x + y, other, self)
+
def __sub__(self, other):
from ignite.metrics import MetricsLambda
return MetricsLambda(lambda x, y: x - y, self, other)
+ def __rsub__(self, other):
+ from ignite.metrics import MetricsLambda
+ return MetricsLambda(lambda x, y: x - y, other, self)
+
def __mul__(self, other):
from ignite.metrics import MetricsLambda
return MetricsLambda(lambda x, y: x * y, self, other)
+ def __rmul__(self, other):
+ from ignite.metrics import MetricsLambda
+ return MetricsLambda(lambda x, y: x * y, other, self)
+
def __pow__(self, other):
from ignite.metrics import MetricsLambda
return MetricsLambda(lambda x, y: x ** y, self, other)
+ def __rpow__(self, other):
+ from ignite.metrics import MetricsLambda
+ return MetricsLambda(lambda x, y: x ** y, other, self)
+
def __mod__(self, other):
from ignite.metrics import MetricsLambda
return MetricsLambda(lambda x, y: x % y, self, other)
@@ -98,10 +114,18 @@
from ignite.metrics import MetricsLambda
return MetricsLambda(lambda x, y: x.__div__(y), self, other)
+ def __rdiv__(self, other):
+ from ignite.metrics import MetricsLambda
+ return MetricsLambda(lambda x, y: x.__div__(y), other, self)
+
def __truediv__(self, other):
from ignite.metrics import MetricsLambda
return MetricsLambda(lambda x, y: x.__truediv__(y), self, other)
+ def __rtruediv__(self, other):
+ from ignite.metrics import MetricsLambda
+ return MetricsLambda(lambda x, y: x.__truediv__(y), other, self)
+
def __floordiv__(self, other):
from ignite.metrics import MetricsLambda
return MetricsLambda(lambda x, y: x // y, self, other)
| {"golden_diff": "diff --git a/ignite/metrics/metric.py b/ignite/metrics/metric.py\n--- a/ignite/metrics/metric.py\n+++ b/ignite/metrics/metric.py\n@@ -78,18 +78,34 @@\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x + y, self, other)\n \n+ def __radd__(self, other):\n+ from ignite.metrics import MetricsLambda\n+ return MetricsLambda(lambda x, y: x + y, other, self)\n+\n def __sub__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x - y, self, other)\n \n+ def __rsub__(self, other):\n+ from ignite.metrics import MetricsLambda\n+ return MetricsLambda(lambda x, y: x - y, other, self)\n+\n def __mul__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x * y, self, other)\n \n+ def __rmul__(self, other):\n+ from ignite.metrics import MetricsLambda\n+ return MetricsLambda(lambda x, y: x * y, other, self)\n+\n def __pow__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x ** y, self, other)\n \n+ def __rpow__(self, other):\n+ from ignite.metrics import MetricsLambda\n+ return MetricsLambda(lambda x, y: x ** y, other, self)\n+\n def __mod__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x % y, self, other)\n@@ -98,10 +114,18 @@\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x.__div__(y), self, other)\n \n+ def __rdiv__(self, other):\n+ from ignite.metrics import MetricsLambda\n+ return MetricsLambda(lambda x, y: x.__div__(y), other, self)\n+\n def __truediv__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x.__truediv__(y), self, other)\n \n+ def __rtruediv__(self, other):\n+ from ignite.metrics import MetricsLambda\n+ return MetricsLambda(lambda x, y: x.__truediv__(y), other, self)\n+\n def __floordiv__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x // y, self, other)\n", "issue": "Issue with metric arithmetics\nI'm trying to define my metric as \r\n```python\r\nfrom ignite.metrics import Accuracy\r\n\r\naccuracy = Accuracy()\r\nerror_metric = 1.0 - accuracy\r\n```\r\nand I got the following error:\r\n```\r\n---------------------------------------------------------------------------\r\nTypeError Traceback (most recent call last)\r\n<ipython-input-70-c4c69e70a6d5> in <module>()\r\n 2 \r\n 3 accuracy = Accuracy()\r\n----> 4 error_metric = 1.0 - accuracy\r\n\r\nTypeError: unsupported operand type(s) for -: 'float' and 'Accuracy'\r\n```\r\nBut I can define \r\n```python\r\nfrom ignite.metrics import Accuracy\r\n\r\naccuracy = Accuracy()\r\nerror_metric = (accuracy - 1.0) * -1.0\r\n```\r\n\r\ncc @zasdfgbnm \nIssue with metric arithmetics\nI'm trying to define my metric as \r\n```python\r\nfrom ignite.metrics import Accuracy\r\n\r\naccuracy = Accuracy()\r\nerror_metric = 1.0 - accuracy\r\n```\r\nand I got the following error:\r\n```\r\n---------------------------------------------------------------------------\r\nTypeError Traceback (most recent call last)\r\n<ipython-input-70-c4c69e70a6d5> in <module>()\r\n 2 \r\n 3 accuracy = Accuracy()\r\n----> 4 error_metric = 1.0 - accuracy\r\n\r\nTypeError: unsupported operand type(s) for -: 'float' and 'Accuracy'\r\n```\r\nBut I can define \r\n```python\r\nfrom ignite.metrics import Accuracy\r\n\r\naccuracy = Accuracy()\r\nerror_metric = (accuracy - 1.0) * -1.0\r\n```\r\n\r\ncc @zasdfgbnm \n", "before_files": [{"content": "from abc import ABCMeta, abstractmethod\nfrom ignite._six import with_metaclass\nfrom ignite.engine import Events\nimport torch\n\n\nclass Metric(with_metaclass(ABCMeta, object)):\n \"\"\"\n Base class for all Metrics.\n\n Args:\n output_transform (callable, optional): a callable that is used to transform the\n :class:`ignite.engine.Engine`'s `process_function`'s output into the\n form expected by the metric. This can be useful if, for example, you have a multi-output model and\n you want to compute the metric with respect to one of the outputs.\n\n \"\"\"\n\n def __init__(self, output_transform=lambda x: x):\n self._output_transform = output_transform\n self.reset()\n\n @abstractmethod\n def reset(self):\n \"\"\"\n Resets the metric to to it's initial state.\n\n This is called at the start of each epoch.\n \"\"\"\n pass\n\n @abstractmethod\n def update(self, output):\n \"\"\"\n Updates the metric's state using the passed batch output.\n\n This is called once for each batch.\n\n Args:\n output: the is the output from the engine's process function\n \"\"\"\n pass\n\n @abstractmethod\n def compute(self):\n \"\"\"\n Computes the metric based on it's accumulated state.\n\n This is called at the end of each epoch.\n\n Returns:\n Any: the actual quantity of interest\n\n Raises:\n NotComputableError: raised when the metric cannot be computed\n \"\"\"\n pass\n\n def started(self, engine):\n self.reset()\n\n @torch.no_grad()\n def iteration_completed(self, engine):\n output = self._output_transform(engine.state.output)\n self.update(output)\n\n def completed(self, engine, name):\n engine.state.metrics[name] = self.compute()\n\n def attach(self, engine, name):\n engine.add_event_handler(Events.EPOCH_COMPLETED, self.completed, name)\n if not engine.has_event_handler(self.started, Events.EPOCH_STARTED):\n engine.add_event_handler(Events.EPOCH_STARTED, self.started)\n if not engine.has_event_handler(self.iteration_completed, Events.ITERATION_COMPLETED):\n engine.add_event_handler(Events.ITERATION_COMPLETED, self.iteration_completed)\n\n def __add__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x + y, self, other)\n\n def __sub__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x - y, self, other)\n\n def __mul__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x * y, self, other)\n\n def __pow__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x ** y, self, other)\n\n def __mod__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x % y, self, other)\n\n def __div__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x.__div__(y), self, other)\n\n def __truediv__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x.__truediv__(y), self, other)\n\n def __floordiv__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x // y, self, other)\n", "path": "ignite/metrics/metric.py"}], "after_files": [{"content": "from abc import ABCMeta, abstractmethod\nfrom ignite._six import with_metaclass\nfrom ignite.engine import Events\nimport torch\n\n\nclass Metric(with_metaclass(ABCMeta, object)):\n \"\"\"\n Base class for all Metrics.\n\n Args:\n output_transform (callable, optional): a callable that is used to transform the\n :class:`ignite.engine.Engine`'s `process_function`'s output into the\n form expected by the metric. This can be useful if, for example, you have a multi-output model and\n you want to compute the metric with respect to one of the outputs.\n\n \"\"\"\n\n def __init__(self, output_transform=lambda x: x):\n self._output_transform = output_transform\n self.reset()\n\n @abstractmethod\n def reset(self):\n \"\"\"\n Resets the metric to to it's initial state.\n\n This is called at the start of each epoch.\n \"\"\"\n pass\n\n @abstractmethod\n def update(self, output):\n \"\"\"\n Updates the metric's state using the passed batch output.\n\n This is called once for each batch.\n\n Args:\n output: the is the output from the engine's process function\n \"\"\"\n pass\n\n @abstractmethod\n def compute(self):\n \"\"\"\n Computes the metric based on it's accumulated state.\n\n This is called at the end of each epoch.\n\n Returns:\n Any: the actual quantity of interest\n\n Raises:\n NotComputableError: raised when the metric cannot be computed\n \"\"\"\n pass\n\n def started(self, engine):\n self.reset()\n\n @torch.no_grad()\n def iteration_completed(self, engine):\n output = self._output_transform(engine.state.output)\n self.update(output)\n\n def completed(self, engine, name):\n engine.state.metrics[name] = self.compute()\n\n def attach(self, engine, name):\n engine.add_event_handler(Events.EPOCH_COMPLETED, self.completed, name)\n if not engine.has_event_handler(self.started, Events.EPOCH_STARTED):\n engine.add_event_handler(Events.EPOCH_STARTED, self.started)\n if not engine.has_event_handler(self.iteration_completed, Events.ITERATION_COMPLETED):\n engine.add_event_handler(Events.ITERATION_COMPLETED, self.iteration_completed)\n\n def __add__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x + y, self, other)\n\n def __radd__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x + y, other, self)\n\n def __sub__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x - y, self, other)\n\n def __rsub__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x - y, other, self)\n\n def __mul__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x * y, self, other)\n\n def __rmul__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x * y, other, self)\n\n def __pow__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x ** y, self, other)\n\n def __rpow__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x ** y, other, self)\n\n def __mod__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x % y, self, other)\n\n def __div__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x.__div__(y), self, other)\n\n def __rdiv__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x.__div__(y), other, self)\n\n def __truediv__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x.__truediv__(y), self, other)\n\n def __rtruediv__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x.__truediv__(y), other, self)\n\n def __floordiv__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x // y, self, other)\n", "path": "ignite/metrics/metric.py"}]} | 1,598 | 581 |
gh_patches_debug_11137 | rasdani/github-patches | git_diff | DataDog__dd-trace-py-3035 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
tuple index out of range of threading.py
After upgrading from ddtrace==0.46.0 to version ddtrace==0.55.4 my service crash with IndexError.
```
Traceback (most recent call last):
File "/my_service/services/base_service.py", line 105, in run
futures.append(executor.submit(fn=self._single_entry_point_run, entry_point=entry_point))
File "/my_service/venv/lib/python3.7/site-packages/ddtrace/contrib/futures/threading.py", line 26, in _wrap_submit
fn = args[0]
IndexError: tuple index out of range
```
I'm facing this issue even when setting futures=False.
`patch_all(celery=True, django=True, psycopg2=True, redis=True, futures=True)`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddtrace/contrib/futures/threading.py`
Content:
```
1 import ddtrace
2
3
4 def _wrap_submit(func, instance, args, kwargs):
5 """
6 Wrap `Executor` method used to submit a work executed in another
7 thread. This wrapper ensures that a new `Context` is created and
8 properly propagated using an intermediate function.
9 """
10 # If there isn't a currently active context, then do not create one
11 # DEV: Calling `.active()` when there isn't an active context will create a new context
12 # DEV: We need to do this in case they are either:
13 # - Starting nested futures
14 # - Starting futures from outside of an existing context
15 #
16 # In either of these cases we essentially will propagate the wrong context between futures
17 #
18 # The resolution is to not create/propagate a new context if one does not exist, but let the
19 # future's thread create the context instead.
20 current_ctx = None
21 if ddtrace.tracer.context_provider._has_active_context():
22 current_ctx = ddtrace.tracer.context_provider.active()
23
24 # extract the target function that must be executed in
25 # a new thread and the `target` arguments
26 fn = args[0]
27 fn_args = args[1:]
28 return func(_wrap_execution, current_ctx, fn, fn_args, kwargs)
29
30
31 def _wrap_execution(ctx, fn, args, kwargs):
32 """
33 Intermediate target function that is executed in a new thread;
34 it receives the original function with arguments and keyword
35 arguments, including our tracing `Context`. The current context
36 provider sets the Active context in a thread local storage
37 variable because it's outside the asynchronous loop.
38 """
39 if ctx is not None:
40 ddtrace.tracer.context_provider.activate(ctx)
41 return fn(*args, **kwargs)
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ddtrace/contrib/futures/threading.py b/ddtrace/contrib/futures/threading.py
--- a/ddtrace/contrib/futures/threading.py
+++ b/ddtrace/contrib/futures/threading.py
@@ -21,10 +21,12 @@
if ddtrace.tracer.context_provider._has_active_context():
current_ctx = ddtrace.tracer.context_provider.active()
- # extract the target function that must be executed in
- # a new thread and the `target` arguments
- fn = args[0]
- fn_args = args[1:]
+ # The target function can be provided as a kwarg argument "fn" or the first positional argument
+ if "fn" in kwargs:
+ fn = kwargs.pop("fn")
+ fn_args = args
+ else:
+ fn, fn_args = args[0], args[1:]
return func(_wrap_execution, current_ctx, fn, fn_args, kwargs)
| {"golden_diff": "diff --git a/ddtrace/contrib/futures/threading.py b/ddtrace/contrib/futures/threading.py\n--- a/ddtrace/contrib/futures/threading.py\n+++ b/ddtrace/contrib/futures/threading.py\n@@ -21,10 +21,12 @@\n if ddtrace.tracer.context_provider._has_active_context():\n current_ctx = ddtrace.tracer.context_provider.active()\n \n- # extract the target function that must be executed in\n- # a new thread and the `target` arguments\n- fn = args[0]\n- fn_args = args[1:]\n+ # The target function can be provided as a kwarg argument \"fn\" or the first positional argument\n+ if \"fn\" in kwargs:\n+ fn = kwargs.pop(\"fn\")\n+ fn_args = args\n+ else:\n+ fn, fn_args = args[0], args[1:]\n return func(_wrap_execution, current_ctx, fn, fn_args, kwargs)\n", "issue": "tuple index out of range of threading.py\nAfter upgrading from ddtrace==0.46.0 to version ddtrace==0.55.4 my service crash with IndexError.\r\n```\r\nTraceback (most recent call last):\r\n File \"/my_service/services/base_service.py\", line 105, in run\r\n futures.append(executor.submit(fn=self._single_entry_point_run, entry_point=entry_point))\r\n File \"/my_service/venv/lib/python3.7/site-packages/ddtrace/contrib/futures/threading.py\", line 26, in _wrap_submit\r\n fn = args[0]\r\nIndexError: tuple index out of range\r\n```\r\n\r\nI'm facing this issue even when setting futures=False.\r\n`patch_all(celery=True, django=True, psycopg2=True, redis=True, futures=True)`\r\n\n", "before_files": [{"content": "import ddtrace\n\n\ndef _wrap_submit(func, instance, args, kwargs):\n \"\"\"\n Wrap `Executor` method used to submit a work executed in another\n thread. This wrapper ensures that a new `Context` is created and\n properly propagated using an intermediate function.\n \"\"\"\n # If there isn't a currently active context, then do not create one\n # DEV: Calling `.active()` when there isn't an active context will create a new context\n # DEV: We need to do this in case they are either:\n # - Starting nested futures\n # - Starting futures from outside of an existing context\n #\n # In either of these cases we essentially will propagate the wrong context between futures\n #\n # The resolution is to not create/propagate a new context if one does not exist, but let the\n # future's thread create the context instead.\n current_ctx = None\n if ddtrace.tracer.context_provider._has_active_context():\n current_ctx = ddtrace.tracer.context_provider.active()\n\n # extract the target function that must be executed in\n # a new thread and the `target` arguments\n fn = args[0]\n fn_args = args[1:]\n return func(_wrap_execution, current_ctx, fn, fn_args, kwargs)\n\n\ndef _wrap_execution(ctx, fn, args, kwargs):\n \"\"\"\n Intermediate target function that is executed in a new thread;\n it receives the original function with arguments and keyword\n arguments, including our tracing `Context`. The current context\n provider sets the Active context in a thread local storage\n variable because it's outside the asynchronous loop.\n \"\"\"\n if ctx is not None:\n ddtrace.tracer.context_provider.activate(ctx)\n return fn(*args, **kwargs)\n", "path": "ddtrace/contrib/futures/threading.py"}], "after_files": [{"content": "import ddtrace\n\n\ndef _wrap_submit(func, instance, args, kwargs):\n \"\"\"\n Wrap `Executor` method used to submit a work executed in another\n thread. This wrapper ensures that a new `Context` is created and\n properly propagated using an intermediate function.\n \"\"\"\n # If there isn't a currently active context, then do not create one\n # DEV: Calling `.active()` when there isn't an active context will create a new context\n # DEV: We need to do this in case they are either:\n # - Starting nested futures\n # - Starting futures from outside of an existing context\n #\n # In either of these cases we essentially will propagate the wrong context between futures\n #\n # The resolution is to not create/propagate a new context if one does not exist, but let the\n # future's thread create the context instead.\n current_ctx = None\n if ddtrace.tracer.context_provider._has_active_context():\n current_ctx = ddtrace.tracer.context_provider.active()\n\n # The target function can be provided as a kwarg argument \"fn\" or the first positional argument\n if \"fn\" in kwargs:\n fn = kwargs.pop(\"fn\")\n fn_args = args\n else:\n fn, fn_args = args[0], args[1:]\n return func(_wrap_execution, current_ctx, fn, fn_args, kwargs)\n\n\ndef _wrap_execution(ctx, fn, args, kwargs):\n \"\"\"\n Intermediate target function that is executed in a new thread;\n it receives the original function with arguments and keyword\n arguments, including our tracing `Context`. The current context\n provider sets the Active context in a thread local storage\n variable because it's outside the asynchronous loop.\n \"\"\"\n if ctx is not None:\n ddtrace.tracer.context_provider.activate(ctx)\n return fn(*args, **kwargs)\n", "path": "ddtrace/contrib/futures/threading.py"}]} | 895 | 215 |
gh_patches_debug_20162 | rasdani/github-patches | git_diff | Kinto__kinto-120 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Default bucket UUID doesn't have dashes
I've seen that default record ID's have got dashes whereas default bucket id doesn't.
Does it makes sense to try to be consistent here?
```
$ http GET http://localhost:8888/v1/buckets/e93a0bb5b7d16d4f9bfd81b6d737271c -v --auth 'mary:marypassword'
{
"data": {
"id": "e93a0bb5b7d16d4f9bfd81b6d737271c",
"last_modified": 1436191171386
},
[...]
}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/views/buckets.py`
Content:
```
1 from pyramid.httpexceptions import HTTPForbidden, HTTPPreconditionFailed
2 from pyramid.security import NO_PERMISSION_REQUIRED
3 from pyramid.view import view_config
4
5 from cliquet import resource
6 from cliquet.utils import hmac_digest, build_request
7
8 from kinto.views import NameGenerator
9
10
11 def create_bucket(request, bucket_id):
12 """Create a bucket if it doesn't exists."""
13 bucket_put = (request.method.lower() == 'put' and
14 request.path.endswith('buckets/default'))
15
16 if not bucket_put:
17 subrequest = build_request(request, {
18 'method': 'PUT',
19 'path': '/buckets/%s' % bucket_id,
20 'body': {"data": {}},
21 'headers': {'If-None-Match': '*'.encode('utf-8')}
22 })
23
24 try:
25 request.invoke_subrequest(subrequest)
26 except HTTPPreconditionFailed:
27 # The bucket already exists
28 pass
29
30
31 def create_collection(request, bucket_id):
32 subpath = request.matchdict['subpath']
33 if subpath.startswith('/collections/'):
34 collection_id = subpath.split('/')[2]
35 collection_put = (request.method.lower() == 'put' and
36 request.path.endswith(collection_id))
37 if not collection_put:
38 subrequest = build_request(request, {
39 'method': 'PUT',
40 'path': '/buckets/%s/collections/%s' % (
41 bucket_id, collection_id),
42 'body': {"data": {}},
43 'headers': {'If-None-Match': '*'.encode('utf-8')}
44 })
45 try:
46 request.invoke_subrequest(subrequest)
47 except HTTPPreconditionFailed:
48 # The collection already exists
49 pass
50
51
52 @view_config(route_name='default_bucket', permission=NO_PERMISSION_REQUIRED)
53 def default_bucket(request):
54 if getattr(request, 'prefixed_userid', None) is None:
55 raise HTTPForbidden # Pass through the forbidden_view_config
56
57 settings = request.registry.settings
58 hmac_secret = settings['cliquet.userid_hmac_secret']
59 # Build the user unguessable bucket_id UUID from its user_id
60 bucket_id = hmac_digest(hmac_secret, request.prefixed_userid)[:32]
61 path = request.path.replace('default', bucket_id)
62 querystring = request.url[(request.url.index(request.path) +
63 len(request.path)):]
64
65 # Make sure bucket exists
66 create_bucket(request, bucket_id)
67
68 # Make sure the collection exists
69 create_collection(request, bucket_id)
70
71 subrequest = build_request(request, {
72 'method': request.method,
73 'path': path + querystring,
74 'body': request.body
75 })
76
77 return request.invoke_subrequest(subrequest)
78
79
80 @resource.register(name='bucket',
81 collection_methods=('GET',),
82 collection_path='/buckets',
83 record_path='/buckets/{{id}}')
84 class Bucket(resource.ProtectedResource):
85 permissions = ('read', 'write', 'collection:create', 'group:create')
86
87 def __init__(self, *args, **kwargs):
88 super(Bucket, self).__init__(*args, **kwargs)
89 self.collection.id_generator = NameGenerator()
90
91 def get_parent_id(self, request):
92 # Buckets are not isolated by user, unlike Cliquet resources.
93 return ''
94
95 def delete(self):
96 result = super(Bucket, self).delete()
97
98 # Delete groups.
99 storage = self.collection.storage
100 parent_id = '/buckets/%s' % self.record_id
101 storage.delete_all(collection_id='group', parent_id=parent_id)
102
103 # Delete collections.
104 deleted = storage.delete_all(collection_id='collection',
105 parent_id=parent_id)
106
107 # Delete records.
108 id_field = self.collection.id_field
109 for collection in deleted:
110 parent_id = '/buckets/%s/collections/%s' % (self.record_id,
111 collection[id_field])
112 storage.delete_all(collection_id='record', parent_id=parent_id)
113
114 return result
115
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kinto/views/buckets.py b/kinto/views/buckets.py
--- a/kinto/views/buckets.py
+++ b/kinto/views/buckets.py
@@ -1,3 +1,6 @@
+from six import text_type
+from uuid import UUID
+
from pyramid.httpexceptions import HTTPForbidden, HTTPPreconditionFailed
from pyramid.security import NO_PERMISSION_REQUIRED
from pyramid.view import view_config
@@ -57,7 +60,8 @@
settings = request.registry.settings
hmac_secret = settings['cliquet.userid_hmac_secret']
# Build the user unguessable bucket_id UUID from its user_id
- bucket_id = hmac_digest(hmac_secret, request.prefixed_userid)[:32]
+ digest = hmac_digest(hmac_secret, request.prefixed_userid)
+ bucket_id = text_type(UUID(digest[:32]))
path = request.path.replace('default', bucket_id)
querystring = request.url[(request.url.index(request.path) +
len(request.path)):]
| {"golden_diff": "diff --git a/kinto/views/buckets.py b/kinto/views/buckets.py\n--- a/kinto/views/buckets.py\n+++ b/kinto/views/buckets.py\n@@ -1,3 +1,6 @@\n+from six import text_type\n+from uuid import UUID\n+\n from pyramid.httpexceptions import HTTPForbidden, HTTPPreconditionFailed\n from pyramid.security import NO_PERMISSION_REQUIRED\n from pyramid.view import view_config\n@@ -57,7 +60,8 @@\n settings = request.registry.settings\n hmac_secret = settings['cliquet.userid_hmac_secret']\n # Build the user unguessable bucket_id UUID from its user_id\n- bucket_id = hmac_digest(hmac_secret, request.prefixed_userid)[:32]\n+ digest = hmac_digest(hmac_secret, request.prefixed_userid)\n+ bucket_id = text_type(UUID(digest[:32]))\n path = request.path.replace('default', bucket_id)\n querystring = request.url[(request.url.index(request.path) +\n len(request.path)):]\n", "issue": "Default bucket UUID doesn't have dashes\nI've seen that default record ID's have got dashes whereas default bucket id doesn't.\n\nDoes it makes sense to try to be consistent here?\n\n```\n$ http GET http://localhost:8888/v1/buckets/e93a0bb5b7d16d4f9bfd81b6d737271c -v --auth 'mary:marypassword'\n{\n \"data\": {\n \"id\": \"e93a0bb5b7d16d4f9bfd81b6d737271c\", \n \"last_modified\": 1436191171386\n }, \n [...]\n}\n```\n\n", "before_files": [{"content": "from pyramid.httpexceptions import HTTPForbidden, HTTPPreconditionFailed\nfrom pyramid.security import NO_PERMISSION_REQUIRED\nfrom pyramid.view import view_config\n\nfrom cliquet import resource\nfrom cliquet.utils import hmac_digest, build_request\n\nfrom kinto.views import NameGenerator\n\n\ndef create_bucket(request, bucket_id):\n \"\"\"Create a bucket if it doesn't exists.\"\"\"\n bucket_put = (request.method.lower() == 'put' and\n request.path.endswith('buckets/default'))\n\n if not bucket_put:\n subrequest = build_request(request, {\n 'method': 'PUT',\n 'path': '/buckets/%s' % bucket_id,\n 'body': {\"data\": {}},\n 'headers': {'If-None-Match': '*'.encode('utf-8')}\n })\n\n try:\n request.invoke_subrequest(subrequest)\n except HTTPPreconditionFailed:\n # The bucket already exists\n pass\n\n\ndef create_collection(request, bucket_id):\n subpath = request.matchdict['subpath']\n if subpath.startswith('/collections/'):\n collection_id = subpath.split('/')[2]\n collection_put = (request.method.lower() == 'put' and\n request.path.endswith(collection_id))\n if not collection_put:\n subrequest = build_request(request, {\n 'method': 'PUT',\n 'path': '/buckets/%s/collections/%s' % (\n bucket_id, collection_id),\n 'body': {\"data\": {}},\n 'headers': {'If-None-Match': '*'.encode('utf-8')}\n })\n try:\n request.invoke_subrequest(subrequest)\n except HTTPPreconditionFailed:\n # The collection already exists\n pass\n\n\n@view_config(route_name='default_bucket', permission=NO_PERMISSION_REQUIRED)\ndef default_bucket(request):\n if getattr(request, 'prefixed_userid', None) is None:\n raise HTTPForbidden # Pass through the forbidden_view_config\n\n settings = request.registry.settings\n hmac_secret = settings['cliquet.userid_hmac_secret']\n # Build the user unguessable bucket_id UUID from its user_id\n bucket_id = hmac_digest(hmac_secret, request.prefixed_userid)[:32]\n path = request.path.replace('default', bucket_id)\n querystring = request.url[(request.url.index(request.path) +\n len(request.path)):]\n\n # Make sure bucket exists\n create_bucket(request, bucket_id)\n\n # Make sure the collection exists\n create_collection(request, bucket_id)\n\n subrequest = build_request(request, {\n 'method': request.method,\n 'path': path + querystring,\n 'body': request.body\n })\n\n return request.invoke_subrequest(subrequest)\n\n\[email protected](name='bucket',\n collection_methods=('GET',),\n collection_path='/buckets',\n record_path='/buckets/{{id}}')\nclass Bucket(resource.ProtectedResource):\n permissions = ('read', 'write', 'collection:create', 'group:create')\n\n def __init__(self, *args, **kwargs):\n super(Bucket, self).__init__(*args, **kwargs)\n self.collection.id_generator = NameGenerator()\n\n def get_parent_id(self, request):\n # Buckets are not isolated by user, unlike Cliquet resources.\n return ''\n\n def delete(self):\n result = super(Bucket, self).delete()\n\n # Delete groups.\n storage = self.collection.storage\n parent_id = '/buckets/%s' % self.record_id\n storage.delete_all(collection_id='group', parent_id=parent_id)\n\n # Delete collections.\n deleted = storage.delete_all(collection_id='collection',\n parent_id=parent_id)\n\n # Delete records.\n id_field = self.collection.id_field\n for collection in deleted:\n parent_id = '/buckets/%s/collections/%s' % (self.record_id,\n collection[id_field])\n storage.delete_all(collection_id='record', parent_id=parent_id)\n\n return result\n", "path": "kinto/views/buckets.py"}], "after_files": [{"content": "from six import text_type\nfrom uuid import UUID\n\nfrom pyramid.httpexceptions import HTTPForbidden, HTTPPreconditionFailed\nfrom pyramid.security import NO_PERMISSION_REQUIRED\nfrom pyramid.view import view_config\n\nfrom cliquet import resource\nfrom cliquet.utils import hmac_digest, build_request\n\nfrom kinto.views import NameGenerator\n\n\ndef create_bucket(request, bucket_id):\n \"\"\"Create a bucket if it doesn't exists.\"\"\"\n bucket_put = (request.method.lower() == 'put' and\n request.path.endswith('buckets/default'))\n\n if not bucket_put:\n subrequest = build_request(request, {\n 'method': 'PUT',\n 'path': '/buckets/%s' % bucket_id,\n 'body': {\"data\": {}},\n 'headers': {'If-None-Match': '*'.encode('utf-8')}\n })\n\n try:\n request.invoke_subrequest(subrequest)\n except HTTPPreconditionFailed:\n # The bucket already exists\n pass\n\n\ndef create_collection(request, bucket_id):\n subpath = request.matchdict['subpath']\n if subpath.startswith('/collections/'):\n collection_id = subpath.split('/')[2]\n collection_put = (request.method.lower() == 'put' and\n request.path.endswith(collection_id))\n if not collection_put:\n subrequest = build_request(request, {\n 'method': 'PUT',\n 'path': '/buckets/%s/collections/%s' % (\n bucket_id, collection_id),\n 'body': {\"data\": {}},\n 'headers': {'If-None-Match': '*'.encode('utf-8')}\n })\n try:\n request.invoke_subrequest(subrequest)\n except HTTPPreconditionFailed:\n # The collection already exists\n pass\n\n\n@view_config(route_name='default_bucket', permission=NO_PERMISSION_REQUIRED)\ndef default_bucket(request):\n if getattr(request, 'prefixed_userid', None) is None:\n raise HTTPForbidden # Pass through the forbidden_view_config\n\n settings = request.registry.settings\n hmac_secret = settings['cliquet.userid_hmac_secret']\n # Build the user unguessable bucket_id UUID from its user_id\n digest = hmac_digest(hmac_secret, request.prefixed_userid)\n bucket_id = text_type(UUID(digest[:32]))\n path = request.path.replace('default', bucket_id)\n querystring = request.url[(request.url.index(request.path) +\n len(request.path)):]\n\n # Make sure bucket exists\n create_bucket(request, bucket_id)\n\n # Make sure the collection exists\n create_collection(request, bucket_id)\n\n subrequest = build_request(request, {\n 'method': request.method,\n 'path': path + querystring,\n 'body': request.body\n })\n\n return request.invoke_subrequest(subrequest)\n\n\[email protected](name='bucket',\n collection_methods=('GET',),\n collection_path='/buckets',\n record_path='/buckets/{{id}}')\nclass Bucket(resource.ProtectedResource):\n permissions = ('read', 'write', 'collection:create', 'group:create')\n\n def __init__(self, *args, **kwargs):\n super(Bucket, self).__init__(*args, **kwargs)\n self.collection.id_generator = NameGenerator()\n\n def get_parent_id(self, request):\n # Buckets are not isolated by user, unlike Cliquet resources.\n return ''\n\n def delete(self):\n result = super(Bucket, self).delete()\n\n # Delete groups.\n storage = self.collection.storage\n parent_id = '/buckets/%s' % self.record_id\n storage.delete_all(collection_id='group', parent_id=parent_id)\n\n # Delete collections.\n deleted = storage.delete_all(collection_id='collection',\n parent_id=parent_id)\n\n # Delete records.\n id_field = self.collection.id_field\n for collection in deleted:\n parent_id = '/buckets/%s/collections/%s' % (self.record_id,\n collection[id_field])\n storage.delete_all(collection_id='record', parent_id=parent_id)\n\n return result\n", "path": "kinto/views/buckets.py"}]} | 1,503 | 217 |
gh_patches_debug_16866 | rasdani/github-patches | git_diff | plone__Products.CMFPlone-1528 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CSS bundles generation breaks background images relative urls
This is a bug related to PR #1300.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Products/CMFPlone/resources/browser/combine.py`
Content:
```
1 import re
2 from zExceptions import NotFound
3 from Acquisition import aq_base
4 from datetime import datetime
5 from plone.registry.interfaces import IRegistry
6 from plone.resource.file import FilesystemFile
7 from plone.resource.interfaces import IResourceDirectory
8 from Products.CMFPlone.interfaces import IBundleRegistry
9 from Products.CMFPlone.interfaces.resources import (
10 OVERRIDE_RESOURCE_DIRECTORY_NAME,
11 )
12 from StringIO import StringIO
13 from zope.component import getUtility
14 from zope.component import queryUtility
15
16 PRODUCTION_RESOURCE_DIRECTORY = "production"
17
18
19 def get_production_resource_directory():
20 persistent_directory = queryUtility(IResourceDirectory, name="persistent")
21 if persistent_directory is None:
22 return ''
23 container = persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]
24 try:
25 production_folder = container[PRODUCTION_RESOURCE_DIRECTORY]
26 except NotFound:
27 return "%s/++unique++1" % PRODUCTION_RESOURCE_DIRECTORY
28 timestamp = production_folder.readFile('timestamp.txt')
29 return "%s/++unique++%s" % (
30 PRODUCTION_RESOURCE_DIRECTORY, timestamp)
31
32
33 def get_resource(context, path):
34 if path.startswith('++plone++'):
35 # ++plone++ resources can be customized, we return their override
36 # value if any
37 overrides = get_override_directory(context)
38 filepath = path[9:]
39 if overrides.isFile(filepath):
40 return overrides.readFile(filepath)
41
42 resource = context.unrestrictedTraverse(path)
43 if isinstance(resource, FilesystemFile):
44 (directory, sep, filename) = path.rpartition('/')
45 return context.unrestrictedTraverse(directory).readFile(filename)
46 else:
47 if hasattr(aq_base(resource), 'GET'):
48 # for FileResource
49 return resource.GET()
50 else:
51 # any BrowserView
52 return resource()
53
54
55 def write_js(context, folder, meta_bundle):
56 registry = getUtility(IRegistry)
57 resources = []
58
59 # default resources
60 if meta_bundle == 'default' and registry.records.get(
61 'plone.resources/jquery.js'
62 ):
63 resources.append(get_resource(context,
64 registry.records['plone.resources/jquery.js'].value))
65 resources.append(get_resource(context,
66 registry.records['plone.resources.requirejs'].value))
67 resources.append(get_resource(context,
68 registry.records['plone.resources.configjs'].value))
69
70 # bundles
71 bundles = registry.collectionOfInterface(
72 IBundleRegistry, prefix="plone.bundles", check=False)
73 for bundle in bundles.values():
74 if bundle.merge_with == meta_bundle and bundle.jscompilation:
75 resources.append(get_resource(context, bundle.jscompilation))
76
77 fi = StringIO()
78 for script in resources:
79 fi.write(script + '\n')
80 folder.writeFile(meta_bundle + ".js", fi)
81
82
83 def write_css(context, folder, meta_bundle):
84 registry = getUtility(IRegistry)
85 resources = []
86
87 bundles = registry.collectionOfInterface(
88 IBundleRegistry, prefix="plone.bundles", check=False)
89 for bundle in bundles.values():
90 if bundle.merge_with == meta_bundle and bundle.csscompilation:
91 css = get_resource(context, bundle.csscompilation)
92 # Preserve relative urls:
93 # we prefix with '../'' any url not starting with '/'
94 # or http: or data:
95 css = re.sub(
96 r"""(url\(['"]?(?!['"]?([a-z]+:|\/)))""",
97 r'\1../',
98 css)
99 resources.append(css)
100
101 fi = StringIO()
102 for script in resources:
103 fi.write(script + '\n')
104 folder.writeFile(meta_bundle + ".css", fi)
105
106
107 def get_override_directory(context):
108 persistent_directory = queryUtility(IResourceDirectory, name="persistent")
109 if persistent_directory is None:
110 return
111 if OVERRIDE_RESOURCE_DIRECTORY_NAME not in persistent_directory:
112 persistent_directory.makeDirectory(OVERRIDE_RESOURCE_DIRECTORY_NAME)
113 return persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]
114
115
116 def combine_bundles(context):
117 container = get_override_directory(context)
118 if PRODUCTION_RESOURCE_DIRECTORY not in container:
119 container.makeDirectory(PRODUCTION_RESOURCE_DIRECTORY)
120 production_folder = container[PRODUCTION_RESOURCE_DIRECTORY]
121
122 # store timestamp
123 fi = StringIO()
124 fi.write(datetime.now().isoformat())
125 production_folder.writeFile("timestamp.txt", fi)
126
127 # generate new combined bundles
128 write_js(context, production_folder, 'default')
129 write_js(context, production_folder, 'logged-in')
130 write_css(context, production_folder, 'default')
131 write_css(context, production_folder, 'logged-in')
132
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/Products/CMFPlone/resources/browser/combine.py b/Products/CMFPlone/resources/browser/combine.py
--- a/Products/CMFPlone/resources/browser/combine.py
+++ b/Products/CMFPlone/resources/browser/combine.py
@@ -89,12 +89,13 @@
for bundle in bundles.values():
if bundle.merge_with == meta_bundle and bundle.csscompilation:
css = get_resource(context, bundle.csscompilation)
- # Preserve relative urls:
- # we prefix with '../'' any url not starting with '/'
- # or http: or data:
+ (path, sep, filename) = bundle.csscompilation.rpartition('/')
+ # Process relative urls:
+ # we prefix with current resource path any url not starting with
+ # '/' or http: or data:
css = re.sub(
r"""(url\(['"]?(?!['"]?([a-z]+:|\/)))""",
- r'\1../',
+ r'\1%s/' % path,
css)
resources.append(css)
| {"golden_diff": "diff --git a/Products/CMFPlone/resources/browser/combine.py b/Products/CMFPlone/resources/browser/combine.py\n--- a/Products/CMFPlone/resources/browser/combine.py\n+++ b/Products/CMFPlone/resources/browser/combine.py\n@@ -89,12 +89,13 @@\n for bundle in bundles.values():\n if bundle.merge_with == meta_bundle and bundle.csscompilation:\n css = get_resource(context, bundle.csscompilation)\n- # Preserve relative urls:\n- # we prefix with '../'' any url not starting with '/'\n- # or http: or data:\n+ (path, sep, filename) = bundle.csscompilation.rpartition('/')\n+ # Process relative urls:\n+ # we prefix with current resource path any url not starting with\n+ # '/' or http: or data:\n css = re.sub(\n r\"\"\"(url\\(['\"]?(?!['\"]?([a-z]+:|\\/)))\"\"\",\n- r'\\1../',\n+ r'\\1%s/' % path,\n css)\n resources.append(css)\n", "issue": "CSS bundles generation breaks background images relative urls\nThis is a bug related to PR #1300.\n\n", "before_files": [{"content": "import re\nfrom zExceptions import NotFound\nfrom Acquisition import aq_base\nfrom datetime import datetime\nfrom plone.registry.interfaces import IRegistry\nfrom plone.resource.file import FilesystemFile\nfrom plone.resource.interfaces import IResourceDirectory\nfrom Products.CMFPlone.interfaces import IBundleRegistry\nfrom Products.CMFPlone.interfaces.resources import (\n OVERRIDE_RESOURCE_DIRECTORY_NAME,\n)\nfrom StringIO import StringIO\nfrom zope.component import getUtility\nfrom zope.component import queryUtility\n\nPRODUCTION_RESOURCE_DIRECTORY = \"production\"\n\n\ndef get_production_resource_directory():\n persistent_directory = queryUtility(IResourceDirectory, name=\"persistent\")\n if persistent_directory is None:\n return ''\n container = persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]\n try:\n production_folder = container[PRODUCTION_RESOURCE_DIRECTORY]\n except NotFound:\n return \"%s/++unique++1\" % PRODUCTION_RESOURCE_DIRECTORY\n timestamp = production_folder.readFile('timestamp.txt')\n return \"%s/++unique++%s\" % (\n PRODUCTION_RESOURCE_DIRECTORY, timestamp)\n\n\ndef get_resource(context, path):\n if path.startswith('++plone++'):\n # ++plone++ resources can be customized, we return their override\n # value if any\n overrides = get_override_directory(context)\n filepath = path[9:]\n if overrides.isFile(filepath):\n return overrides.readFile(filepath)\n\n resource = context.unrestrictedTraverse(path)\n if isinstance(resource, FilesystemFile):\n (directory, sep, filename) = path.rpartition('/')\n return context.unrestrictedTraverse(directory).readFile(filename)\n else:\n if hasattr(aq_base(resource), 'GET'):\n # for FileResource\n return resource.GET()\n else:\n # any BrowserView\n return resource()\n\n\ndef write_js(context, folder, meta_bundle):\n registry = getUtility(IRegistry)\n resources = []\n\n # default resources\n if meta_bundle == 'default' and registry.records.get(\n 'plone.resources/jquery.js'\n ):\n resources.append(get_resource(context,\n registry.records['plone.resources/jquery.js'].value))\n resources.append(get_resource(context,\n registry.records['plone.resources.requirejs'].value))\n resources.append(get_resource(context,\n registry.records['plone.resources.configjs'].value))\n\n # bundles\n bundles = registry.collectionOfInterface(\n IBundleRegistry, prefix=\"plone.bundles\", check=False)\n for bundle in bundles.values():\n if bundle.merge_with == meta_bundle and bundle.jscompilation:\n resources.append(get_resource(context, bundle.jscompilation))\n\n fi = StringIO()\n for script in resources:\n fi.write(script + '\\n')\n folder.writeFile(meta_bundle + \".js\", fi)\n\n\ndef write_css(context, folder, meta_bundle):\n registry = getUtility(IRegistry)\n resources = []\n\n bundles = registry.collectionOfInterface(\n IBundleRegistry, prefix=\"plone.bundles\", check=False)\n for bundle in bundles.values():\n if bundle.merge_with == meta_bundle and bundle.csscompilation:\n css = get_resource(context, bundle.csscompilation)\n # Preserve relative urls:\n # we prefix with '../'' any url not starting with '/'\n # or http: or data:\n css = re.sub(\n r\"\"\"(url\\(['\"]?(?!['\"]?([a-z]+:|\\/)))\"\"\",\n r'\\1../',\n css)\n resources.append(css)\n\n fi = StringIO()\n for script in resources:\n fi.write(script + '\\n')\n folder.writeFile(meta_bundle + \".css\", fi)\n\n\ndef get_override_directory(context):\n persistent_directory = queryUtility(IResourceDirectory, name=\"persistent\")\n if persistent_directory is None:\n return\n if OVERRIDE_RESOURCE_DIRECTORY_NAME not in persistent_directory:\n persistent_directory.makeDirectory(OVERRIDE_RESOURCE_DIRECTORY_NAME)\n return persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]\n\n\ndef combine_bundles(context):\n container = get_override_directory(context)\n if PRODUCTION_RESOURCE_DIRECTORY not in container:\n container.makeDirectory(PRODUCTION_RESOURCE_DIRECTORY)\n production_folder = container[PRODUCTION_RESOURCE_DIRECTORY]\n\n # store timestamp\n fi = StringIO()\n fi.write(datetime.now().isoformat())\n production_folder.writeFile(\"timestamp.txt\", fi)\n\n # generate new combined bundles\n write_js(context, production_folder, 'default')\n write_js(context, production_folder, 'logged-in')\n write_css(context, production_folder, 'default')\n write_css(context, production_folder, 'logged-in')\n", "path": "Products/CMFPlone/resources/browser/combine.py"}], "after_files": [{"content": "import re\nfrom zExceptions import NotFound\nfrom Acquisition import aq_base\nfrom datetime import datetime\nfrom plone.registry.interfaces import IRegistry\nfrom plone.resource.file import FilesystemFile\nfrom plone.resource.interfaces import IResourceDirectory\nfrom Products.CMFPlone.interfaces import IBundleRegistry\nfrom Products.CMFPlone.interfaces.resources import (\n OVERRIDE_RESOURCE_DIRECTORY_NAME,\n)\nfrom StringIO import StringIO\nfrom zope.component import getUtility\nfrom zope.component import queryUtility\n\nPRODUCTION_RESOURCE_DIRECTORY = \"production\"\n\n\ndef get_production_resource_directory():\n persistent_directory = queryUtility(IResourceDirectory, name=\"persistent\")\n if persistent_directory is None:\n return ''\n container = persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]\n try:\n production_folder = container[PRODUCTION_RESOURCE_DIRECTORY]\n except NotFound:\n return \"%s/++unique++1\" % PRODUCTION_RESOURCE_DIRECTORY\n timestamp = production_folder.readFile('timestamp.txt')\n return \"%s/++unique++%s\" % (\n PRODUCTION_RESOURCE_DIRECTORY, timestamp)\n\n\ndef get_resource(context, path):\n if path.startswith('++plone++'):\n # ++plone++ resources can be customized, we return their override\n # value if any\n overrides = get_override_directory(context)\n filepath = path[9:]\n if overrides.isFile(filepath):\n return overrides.readFile(filepath)\n\n resource = context.unrestrictedTraverse(path)\n if isinstance(resource, FilesystemFile):\n (directory, sep, filename) = path.rpartition('/')\n return context.unrestrictedTraverse(directory).readFile(filename)\n else:\n if hasattr(aq_base(resource), 'GET'):\n # for FileResource\n return resource.GET()\n else:\n # any BrowserView\n return resource()\n\n\ndef write_js(context, folder, meta_bundle):\n registry = getUtility(IRegistry)\n resources = []\n\n # default resources\n if meta_bundle == 'default' and registry.records.get(\n 'plone.resources/jquery.js'\n ):\n resources.append(get_resource(context,\n registry.records['plone.resources/jquery.js'].value))\n resources.append(get_resource(context,\n registry.records['plone.resources.requirejs'].value))\n resources.append(get_resource(context,\n registry.records['plone.resources.configjs'].value))\n\n # bundles\n bundles = registry.collectionOfInterface(\n IBundleRegistry, prefix=\"plone.bundles\", check=False)\n for bundle in bundles.values():\n if bundle.merge_with == meta_bundle and bundle.jscompilation:\n resources.append(get_resource(context, bundle.jscompilation))\n\n fi = StringIO()\n for script in resources:\n fi.write(script + '\\n')\n folder.writeFile(meta_bundle + \".js\", fi)\n\n\ndef write_css(context, folder, meta_bundle):\n registry = getUtility(IRegistry)\n resources = []\n\n bundles = registry.collectionOfInterface(\n IBundleRegistry, prefix=\"plone.bundles\", check=False)\n for bundle in bundles.values():\n if bundle.merge_with == meta_bundle and bundle.csscompilation:\n css = get_resource(context, bundle.csscompilation)\n (path, sep, filename) = bundle.csscompilation.rpartition('/')\n # Process relative urls:\n # we prefix with current resource path any url not starting with\n # '/' or http: or data:\n css = re.sub(\n r\"\"\"(url\\(['\"]?(?!['\"]?([a-z]+:|\\/)))\"\"\",\n r'\\1%s/' % path,\n css)\n resources.append(css)\n\n fi = StringIO()\n for script in resources:\n fi.write(script + '\\n')\n folder.writeFile(meta_bundle + \".css\", fi)\n\n\ndef get_override_directory(context):\n persistent_directory = queryUtility(IResourceDirectory, name=\"persistent\")\n if persistent_directory is None:\n return\n if OVERRIDE_RESOURCE_DIRECTORY_NAME not in persistent_directory:\n persistent_directory.makeDirectory(OVERRIDE_RESOURCE_DIRECTORY_NAME)\n return persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]\n\n\ndef combine_bundles(context):\n container = get_override_directory(context)\n if PRODUCTION_RESOURCE_DIRECTORY not in container:\n container.makeDirectory(PRODUCTION_RESOURCE_DIRECTORY)\n production_folder = container[PRODUCTION_RESOURCE_DIRECTORY]\n\n # store timestamp\n fi = StringIO()\n fi.write(datetime.now().isoformat())\n production_folder.writeFile(\"timestamp.txt\", fi)\n\n # generate new combined bundles\n write_js(context, production_folder, 'default')\n write_js(context, production_folder, 'logged-in')\n write_css(context, production_folder, 'default')\n write_css(context, production_folder, 'logged-in')\n", "path": "Products/CMFPlone/resources/browser/combine.py"}]} | 1,523 | 239 |
gh_patches_debug_8379 | rasdani/github-patches | git_diff | kedro-org__kedro-3013 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Document the LIFO order in which hooks are executed in `settings.py`
### Description
We mention that hook implementations registered in `settings.py` run in LIFO order and that auto discovered hooks run before hooks in `settings.py`.
- [ ] We need to also document what the order is in which auto-discovered hooks run. Add this to: https://kedro.readthedocs.io/en/stable/hooks/introduction.html To verify the run order, create a project and install several plugins with hooks to test.
- [ ] Add a comment in the `settings.py` template file to explain the run order of hooks
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/settings.py`
Content:
```
1 """Project settings. There is no need to edit this file unless you want to change values
2 from the Kedro defaults. For further information, including these default values, see
3 https://kedro.readthedocs.io/en/stable/kedro_project_setup/settings.html."""
4
5 # Instantiated project hooks.
6 # For example, after creating a hooks.py and defining a ProjectHooks class there, do
7 # from {{cookiecutter.python_package}}.hooks import ProjectHooks
8 # HOOKS = (ProjectHooks(),)
9
10 # Installed plugins for which to disable hook auto-registration.
11 # DISABLE_HOOKS_FOR_PLUGINS = ("kedro-viz",)
12
13 # Class that manages storing KedroSession data.
14 # from kedro.framework.session.store import BaseSessionStore
15 # SESSION_STORE_CLASS = BaseSessionStore
16 # Keyword arguments to pass to the `SESSION_STORE_CLASS` constructor.
17 # SESSION_STORE_ARGS = {
18 # "path": "./sessions"
19 # }
20
21 # Directory that holds configuration.
22 # CONF_SOURCE = "conf"
23
24 # Class that manages how configuration is loaded.
25 from kedro.config import OmegaConfigLoader # noqa: import-outside-toplevel
26
27 CONFIG_LOADER_CLASS = OmegaConfigLoader
28 # Keyword arguments to pass to the `CONFIG_LOADER_CLASS` constructor.
29 # CONFIG_LOADER_ARGS = {
30 # "config_patterns": {
31 # "spark" : ["spark*/"],
32 # "parameters": ["parameters*", "parameters*/**", "**/parameters*"],
33 # }
34 # }
35
36 # Class that manages Kedro's library components.
37 # from kedro.framework.context import KedroContext
38 # CONTEXT_CLASS = KedroContext
39
40 # Class that manages the Data Catalog.
41 # from kedro.io import DataCatalog
42 # DATA_CATALOG_CLASS = DataCatalog
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/settings.py b/kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/settings.py
--- a/kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/settings.py
+++ b/kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/settings.py
@@ -5,6 +5,7 @@
# Instantiated project hooks.
# For example, after creating a hooks.py and defining a ProjectHooks class there, do
# from {{cookiecutter.python_package}}.hooks import ProjectHooks
+# Hooks are executed in a Last-In-First-Out (LIFO) order.
# HOOKS = (ProjectHooks(),)
# Installed plugins for which to disable hook auto-registration.
| {"golden_diff": "diff --git a/kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/settings.py b/kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/settings.py\n--- a/kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/settings.py\t\n+++ b/kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/settings.py\t\n@@ -5,6 +5,7 @@\n # Instantiated project hooks.\n # For example, after creating a hooks.py and defining a ProjectHooks class there, do\n # from {{cookiecutter.python_package}}.hooks import ProjectHooks\n+# Hooks are executed in a Last-In-First-Out (LIFO) order.\n # HOOKS = (ProjectHooks(),)\n \n # Installed plugins for which to disable hook auto-registration.\n", "issue": "Document the LIFO order in which hooks are executed in `settings.py`\n### Description\r\n\r\nWe mention that hook implementations registered in `settings.py` run in LIFO order and that auto discovered hooks run before hooks in `settings.py`. \r\n\r\n- [ ] We need to also document what the order is in which auto-discovered hooks run. Add this to: https://kedro.readthedocs.io/en/stable/hooks/introduction.html To verify the run order, create a project and install several plugins with hooks to test.\r\n- [ ] Add a comment in the `settings.py` template file to explain the run order of hooks\n", "before_files": [{"content": "\"\"\"Project settings. There is no need to edit this file unless you want to change values\nfrom the Kedro defaults. For further information, including these default values, see\nhttps://kedro.readthedocs.io/en/stable/kedro_project_setup/settings.html.\"\"\"\n\n# Instantiated project hooks.\n# For example, after creating a hooks.py and defining a ProjectHooks class there, do\n# from {{cookiecutter.python_package}}.hooks import ProjectHooks\n# HOOKS = (ProjectHooks(),)\n\n# Installed plugins for which to disable hook auto-registration.\n# DISABLE_HOOKS_FOR_PLUGINS = (\"kedro-viz\",)\n\n# Class that manages storing KedroSession data.\n# from kedro.framework.session.store import BaseSessionStore\n# SESSION_STORE_CLASS = BaseSessionStore\n# Keyword arguments to pass to the `SESSION_STORE_CLASS` constructor.\n# SESSION_STORE_ARGS = {\n# \"path\": \"./sessions\"\n# }\n\n# Directory that holds configuration.\n# CONF_SOURCE = \"conf\"\n\n# Class that manages how configuration is loaded.\nfrom kedro.config import OmegaConfigLoader # noqa: import-outside-toplevel\n\nCONFIG_LOADER_CLASS = OmegaConfigLoader\n# Keyword arguments to pass to the `CONFIG_LOADER_CLASS` constructor.\n# CONFIG_LOADER_ARGS = {\n# \"config_patterns\": {\n# \"spark\" : [\"spark*/\"],\n# \"parameters\": [\"parameters*\", \"parameters*/**\", \"**/parameters*\"],\n# }\n# }\n\n# Class that manages Kedro's library components.\n# from kedro.framework.context import KedroContext\n# CONTEXT_CLASS = KedroContext\n\n# Class that manages the Data Catalog.\n# from kedro.io import DataCatalog\n# DATA_CATALOG_CLASS = DataCatalog\n", "path": "kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/settings.py"}], "after_files": [{"content": "\"\"\"Project settings. There is no need to edit this file unless you want to change values\nfrom the Kedro defaults. For further information, including these default values, see\nhttps://kedro.readthedocs.io/en/stable/kedro_project_setup/settings.html.\"\"\"\n\n# Instantiated project hooks.\n# For example, after creating a hooks.py and defining a ProjectHooks class there, do\n# from {{cookiecutter.python_package}}.hooks import ProjectHooks\n# Hooks are executed in a Last-In-First-Out (LIFO) order.\n# HOOKS = (ProjectHooks(),)\n\n# Installed plugins for which to disable hook auto-registration.\n# DISABLE_HOOKS_FOR_PLUGINS = (\"kedro-viz\",)\n\n# Class that manages storing KedroSession data.\n# from kedro.framework.session.store import BaseSessionStore\n# SESSION_STORE_CLASS = BaseSessionStore\n# Keyword arguments to pass to the `SESSION_STORE_CLASS` constructor.\n# SESSION_STORE_ARGS = {\n# \"path\": \"./sessions\"\n# }\n\n# Directory that holds configuration.\n# CONF_SOURCE = \"conf\"\n\n# Class that manages how configuration is loaded.\nfrom kedro.config import OmegaConfigLoader # noqa: import-outside-toplevel\n\nCONFIG_LOADER_CLASS = OmegaConfigLoader\n# Keyword arguments to pass to the `CONFIG_LOADER_CLASS` constructor.\n# CONFIG_LOADER_ARGS = {\n# \"config_patterns\": {\n# \"spark\" : [\"spark*/\"],\n# \"parameters\": [\"parameters*\", \"parameters*/**\", \"**/parameters*\"],\n# }\n# }\n\n# Class that manages Kedro's library components.\n# from kedro.framework.context import KedroContext\n# CONTEXT_CLASS = KedroContext\n\n# Class that manages the Data Catalog.\n# from kedro.io import DataCatalog\n# DATA_CATALOG_CLASS = DataCatalog\n", "path": "kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/settings.py"}]} | 850 | 189 |
gh_patches_debug_35493 | rasdani/github-patches | git_diff | rasterio__rasterio-287 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Reprojection Example/Documentation
I was having some trouble following the [reprojection](https://github.com/mapbox/rasterio/blob/master/examples/reproject.py) example; the Affine parameters for `dst_transform` aren't referenced anywhere before they are applied:
https://github.com/mapbox/rasterio/blob/master/examples/reproject.py#L29
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rasterio/transform.py`
Content:
```
1 import warnings
2
3 from affine import Affine
4
5 IDENTITY = Affine.identity()
6
7
8 def tastes_like_gdal(seq):
9 """Return True if `seq` matches the GDAL geotransform pattern."""
10 return seq[2] == seq[4] == 0.0 and seq[1] > 0 and seq[5] < 0
11
12
13 def guard_transform(transform):
14 """Return an Affine transformation instance"""
15 if not isinstance(transform, Affine):
16 if tastes_like_gdal(transform):
17 warnings.warn(
18 "GDAL-style transforms are deprecated and will not "
19 "be supported in Rasterio 1.0.",
20 FutureWarning,
21 stacklevel=2)
22 transform = Affine.from_gdal(*transform)
23 else:
24 transform = Affine(*transform)
25 return transform
26
```
Path: `examples/reproject.py`
Content:
```
1 import os
2 import shutil
3 import subprocess
4 import tempfile
5
6 import numpy
7 import rasterio
8 from rasterio import Affine as A
9 from rasterio.warp import reproject, RESAMPLING
10
11 tempdir = '/tmp'
12 tiffname = os.path.join(tempdir, 'example.tif')
13
14 with rasterio.drivers():
15
16 # Consider a 512 x 512 raster centered on 0 degrees E and 0 degrees N
17 # with each pixel covering 15".
18 rows, cols = src_shape = (512, 512)
19 dpp = 1.0/240 # decimal degrees per pixel
20 # The following is equivalent to
21 # A(dpp, 0, -cols*dpp/2, 0, -dpp, rows*dpp/2).
22 src_transform = A.translation(-cols*dpp/2, rows*dpp/2) * A.scale(dpp, -dpp)
23 src_crs = {'init': 'EPSG:4326'}
24 source = numpy.ones(src_shape, numpy.uint8)*255
25
26 # Prepare to reproject this rasters to a 1024 x 1024 dataset in
27 # Web Mercator (EPSG:3857) with origin at -8928592, 2999585.
28 dst_shape = (1024, 1024)
29 dst_transform = A.from_gdal(-237481.5, 425.0, 0.0, 237536.4, 0.0, -425.0)
30 dst_transform = dst_transform.to_gdal()
31 dst_crs = {'init': 'EPSG:3857'}
32 destination = numpy.zeros(dst_shape, numpy.uint8)
33
34 reproject(
35 source,
36 destination,
37 src_transform=src_transform,
38 src_crs=src_crs,
39 dst_transform=dst_transform,
40 dst_crs=dst_crs,
41 resampling=RESAMPLING.nearest)
42
43 # Assert that the destination is only partly filled.
44 assert destination.any()
45 assert not destination.all()
46
47 # Write it out to a file.
48 with rasterio.open(
49 tiffname,
50 'w',
51 driver='GTiff',
52 width=dst_shape[1],
53 height=dst_shape[0],
54 count=1,
55 dtype=numpy.uint8,
56 nodata=0,
57 transform=dst_transform,
58 crs=dst_crs) as dst:
59 dst.write_band(1, destination)
60
61 info = subprocess.call(['open', tiffname])
62
63
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/reproject.py b/examples/reproject.py
--- a/examples/reproject.py
+++ b/examples/reproject.py
@@ -5,7 +5,7 @@
import numpy
import rasterio
-from rasterio import Affine as A
+from rasterio import transform
from rasterio.warp import reproject, RESAMPLING
tempdir = '/tmp'
@@ -17,17 +17,15 @@
# with each pixel covering 15".
rows, cols = src_shape = (512, 512)
dpp = 1.0/240 # decimal degrees per pixel
- # The following is equivalent to
- # A(dpp, 0, -cols*dpp/2, 0, -dpp, rows*dpp/2).
- src_transform = A.translation(-cols*dpp/2, rows*dpp/2) * A.scale(dpp, -dpp)
+ west, south, east, north = -cols*dpp/2, -rows*dpp/2, cols*dpp/2, rows*dpp/2
+ src_transform = transform.from_bounds(west, south, east, north, cols, rows)
src_crs = {'init': 'EPSG:4326'}
source = numpy.ones(src_shape, numpy.uint8)*255
# Prepare to reproject this rasters to a 1024 x 1024 dataset in
- # Web Mercator (EPSG:3857) with origin at -8928592, 2999585.
+ # Web Mercator (EPSG:3857) with origin at -237481.5, 237536.4.
dst_shape = (1024, 1024)
- dst_transform = A.from_gdal(-237481.5, 425.0, 0.0, 237536.4, 0.0, -425.0)
- dst_transform = dst_transform.to_gdal()
+ dst_transform = transform.from_origin(-237481.5, 237536.4, 425.0, 425.0)
dst_crs = {'init': 'EPSG:3857'}
destination = numpy.zeros(dst_shape, numpy.uint8)
@@ -59,4 +57,3 @@
dst.write_band(1, destination)
info = subprocess.call(['open', tiffname])
-
diff --git a/rasterio/transform.py b/rasterio/transform.py
--- a/rasterio/transform.py
+++ b/rasterio/transform.py
@@ -23,3 +23,18 @@
else:
transform = Affine(*transform)
return transform
+
+
+def from_origin(west, north, xsize, ysize):
+ """Return an Affine transformation for a georeferenced raster given
+ the coordinates of its upper left corner `west`, `north` and pixel
+ sizes `xsize`, `ysize`."""
+ return Affine.translation(west, north) * Affine.scale(xsize, -ysize)
+
+
+def from_bounds(west, south, east, north, width, height):
+ """Return an Affine transformation for a georeferenced raster given
+ its bounds `west`, `south`, `east`, `north` and its `width` and
+ `height` in number of pixels."""
+ return Affine.translation(west, north) * Affine.scale(
+ (east - west)/width, (south - north)/height)
| {"golden_diff": "diff --git a/examples/reproject.py b/examples/reproject.py\n--- a/examples/reproject.py\n+++ b/examples/reproject.py\n@@ -5,7 +5,7 @@\n \n import numpy\n import rasterio\n-from rasterio import Affine as A\n+from rasterio import transform\n from rasterio.warp import reproject, RESAMPLING\n \n tempdir = '/tmp'\n@@ -17,17 +17,15 @@\n # with each pixel covering 15\".\n rows, cols = src_shape = (512, 512)\n dpp = 1.0/240 # decimal degrees per pixel\n- # The following is equivalent to \n- # A(dpp, 0, -cols*dpp/2, 0, -dpp, rows*dpp/2).\n- src_transform = A.translation(-cols*dpp/2, rows*dpp/2) * A.scale(dpp, -dpp)\n+ west, south, east, north = -cols*dpp/2, -rows*dpp/2, cols*dpp/2, rows*dpp/2\n+ src_transform = transform.from_bounds(west, south, east, north, cols, rows)\n src_crs = {'init': 'EPSG:4326'}\n source = numpy.ones(src_shape, numpy.uint8)*255\n \n # Prepare to reproject this rasters to a 1024 x 1024 dataset in\n- # Web Mercator (EPSG:3857) with origin at -8928592, 2999585.\n+ # Web Mercator (EPSG:3857) with origin at -237481.5, 237536.4.\n dst_shape = (1024, 1024)\n- dst_transform = A.from_gdal(-237481.5, 425.0, 0.0, 237536.4, 0.0, -425.0)\n- dst_transform = dst_transform.to_gdal()\n+ dst_transform = transform.from_origin(-237481.5, 237536.4, 425.0, 425.0)\n dst_crs = {'init': 'EPSG:3857'}\n destination = numpy.zeros(dst_shape, numpy.uint8)\n \n@@ -59,4 +57,3 @@\n dst.write_band(1, destination)\n \n info = subprocess.call(['open', tiffname])\n-\ndiff --git a/rasterio/transform.py b/rasterio/transform.py\n--- a/rasterio/transform.py\n+++ b/rasterio/transform.py\n@@ -23,3 +23,18 @@\n else:\n transform = Affine(*transform)\n return transform\n+\n+\n+def from_origin(west, north, xsize, ysize):\n+ \"\"\"Return an Affine transformation for a georeferenced raster given\n+ the coordinates of its upper left corner `west`, `north` and pixel\n+ sizes `xsize`, `ysize`.\"\"\"\n+ return Affine.translation(west, north) * Affine.scale(xsize, -ysize)\n+\n+\n+def from_bounds(west, south, east, north, width, height):\n+ \"\"\"Return an Affine transformation for a georeferenced raster given\n+ its bounds `west`, `south`, `east`, `north` and its `width` and\n+ `height` in number of pixels.\"\"\"\n+ return Affine.translation(west, north) * Affine.scale(\n+ (east - west)/width, (south - north)/height)\n", "issue": "Reprojection Example/Documentation\nI was having some trouble following the [reprojection](https://github.com/mapbox/rasterio/blob/master/examples/reproject.py) example; the Affine parameters for `dst_transform` aren't referenced anywhere before they are applied:\n\nhttps://github.com/mapbox/rasterio/blob/master/examples/reproject.py#L29\n\n", "before_files": [{"content": "import warnings\n\nfrom affine import Affine\n\nIDENTITY = Affine.identity()\n\n\ndef tastes_like_gdal(seq):\n \"\"\"Return True if `seq` matches the GDAL geotransform pattern.\"\"\"\n return seq[2] == seq[4] == 0.0 and seq[1] > 0 and seq[5] < 0\n\n\ndef guard_transform(transform):\n \"\"\"Return an Affine transformation instance\"\"\"\n if not isinstance(transform, Affine):\n if tastes_like_gdal(transform):\n warnings.warn(\n \"GDAL-style transforms are deprecated and will not \"\n \"be supported in Rasterio 1.0.\",\n FutureWarning,\n stacklevel=2)\n transform = Affine.from_gdal(*transform)\n else:\n transform = Affine(*transform)\n return transform\n", "path": "rasterio/transform.py"}, {"content": "import os\nimport shutil\nimport subprocess\nimport tempfile\n\nimport numpy\nimport rasterio\nfrom rasterio import Affine as A\nfrom rasterio.warp import reproject, RESAMPLING\n\ntempdir = '/tmp'\ntiffname = os.path.join(tempdir, 'example.tif')\n\nwith rasterio.drivers():\n\n # Consider a 512 x 512 raster centered on 0 degrees E and 0 degrees N\n # with each pixel covering 15\".\n rows, cols = src_shape = (512, 512)\n dpp = 1.0/240 # decimal degrees per pixel\n # The following is equivalent to \n # A(dpp, 0, -cols*dpp/2, 0, -dpp, rows*dpp/2).\n src_transform = A.translation(-cols*dpp/2, rows*dpp/2) * A.scale(dpp, -dpp)\n src_crs = {'init': 'EPSG:4326'}\n source = numpy.ones(src_shape, numpy.uint8)*255\n\n # Prepare to reproject this rasters to a 1024 x 1024 dataset in\n # Web Mercator (EPSG:3857) with origin at -8928592, 2999585.\n dst_shape = (1024, 1024)\n dst_transform = A.from_gdal(-237481.5, 425.0, 0.0, 237536.4, 0.0, -425.0)\n dst_transform = dst_transform.to_gdal()\n dst_crs = {'init': 'EPSG:3857'}\n destination = numpy.zeros(dst_shape, numpy.uint8)\n\n reproject(\n source, \n destination, \n src_transform=src_transform,\n src_crs=src_crs,\n dst_transform=dst_transform,\n dst_crs=dst_crs,\n resampling=RESAMPLING.nearest)\n\n # Assert that the destination is only partly filled.\n assert destination.any()\n assert not destination.all()\n\n # Write it out to a file.\n with rasterio.open(\n tiffname, \n 'w',\n driver='GTiff',\n width=dst_shape[1],\n height=dst_shape[0],\n count=1,\n dtype=numpy.uint8,\n nodata=0,\n transform=dst_transform,\n crs=dst_crs) as dst:\n dst.write_band(1, destination)\n\ninfo = subprocess.call(['open', tiffname])\n\n", "path": "examples/reproject.py"}], "after_files": [{"content": "import warnings\n\nfrom affine import Affine\n\nIDENTITY = Affine.identity()\n\n\ndef tastes_like_gdal(seq):\n \"\"\"Return True if `seq` matches the GDAL geotransform pattern.\"\"\"\n return seq[2] == seq[4] == 0.0 and seq[1] > 0 and seq[5] < 0\n\n\ndef guard_transform(transform):\n \"\"\"Return an Affine transformation instance\"\"\"\n if not isinstance(transform, Affine):\n if tastes_like_gdal(transform):\n warnings.warn(\n \"GDAL-style transforms are deprecated and will not \"\n \"be supported in Rasterio 1.0.\",\n FutureWarning,\n stacklevel=2)\n transform = Affine.from_gdal(*transform)\n else:\n transform = Affine(*transform)\n return transform\n\n\ndef from_origin(west, north, xsize, ysize):\n \"\"\"Return an Affine transformation for a georeferenced raster given\n the coordinates of its upper left corner `west`, `north` and pixel\n sizes `xsize`, `ysize`.\"\"\"\n return Affine.translation(west, north) * Affine.scale(xsize, -ysize)\n\n\ndef from_bounds(west, south, east, north, width, height):\n \"\"\"Return an Affine transformation for a georeferenced raster given\n its bounds `west`, `south`, `east`, `north` and its `width` and\n `height` in number of pixels.\"\"\"\n return Affine.translation(west, north) * Affine.scale(\n (east - west)/width, (south - north)/height)\n", "path": "rasterio/transform.py"}, {"content": "import os\nimport shutil\nimport subprocess\nimport tempfile\n\nimport numpy\nimport rasterio\nfrom rasterio import transform\nfrom rasterio.warp import reproject, RESAMPLING\n\ntempdir = '/tmp'\ntiffname = os.path.join(tempdir, 'example.tif')\n\nwith rasterio.drivers():\n\n # Consider a 512 x 512 raster centered on 0 degrees E and 0 degrees N\n # with each pixel covering 15\".\n rows, cols = src_shape = (512, 512)\n dpp = 1.0/240 # decimal degrees per pixel\n west, south, east, north = -cols*dpp/2, -rows*dpp/2, cols*dpp/2, rows*dpp/2\n src_transform = transform.from_bounds(west, south, east, north, cols, rows)\n src_crs = {'init': 'EPSG:4326'}\n source = numpy.ones(src_shape, numpy.uint8)*255\n\n # Prepare to reproject this rasters to a 1024 x 1024 dataset in\n # Web Mercator (EPSG:3857) with origin at -237481.5, 237536.4.\n dst_shape = (1024, 1024)\n dst_transform = transform.from_origin(-237481.5, 237536.4, 425.0, 425.0)\n dst_crs = {'init': 'EPSG:3857'}\n destination = numpy.zeros(dst_shape, numpy.uint8)\n\n reproject(\n source, \n destination, \n src_transform=src_transform,\n src_crs=src_crs,\n dst_transform=dst_transform,\n dst_crs=dst_crs,\n resampling=RESAMPLING.nearest)\n\n # Assert that the destination is only partly filled.\n assert destination.any()\n assert not destination.all()\n\n # Write it out to a file.\n with rasterio.open(\n tiffname, \n 'w',\n driver='GTiff',\n width=dst_shape[1],\n height=dst_shape[0],\n count=1,\n dtype=numpy.uint8,\n nodata=0,\n transform=dst_transform,\n crs=dst_crs) as dst:\n dst.write_band(1, destination)\n\ninfo = subprocess.call(['open', tiffname])\n", "path": "examples/reproject.py"}]} | 1,277 | 834 |
gh_patches_debug_1604 | rasdani/github-patches | git_diff | swcarpentry__python-novice-inflammation-946 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Code provided for students contain python code not compatible with python 3
At least one file in the `code` directory, e.g., `gen_inflammation.py` fails when running it with python 3. The [problem is the "division" not giving an integer](https://github.com/swcarpentry/python-novice-inflammation/blob/11643f14d31726f2f60873c4ca1230fff0bbf108/code/gen_inflammation.py#L19). It needs to be changed to
```diff
- upper / 4
+ upper // 4
```
This was spotted by a student trying to check their installation and running different files.
Other files may have similar errors. I'd suggest running and testing via CI everything we provide to the students.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `code/gen_inflammation.py`
Content:
```
1 #!/usr/bin/env python
2
3 """
4 Generate pseudo-random patient inflammation data for use in Python lessons.
5 """
6
7 import random
8
9 n_patients = 60
10 n_days = 40
11 n_range = 20
12
13 middle = n_days / 2
14
15 for p in range(n_patients):
16 vals = []
17 for d in range(n_days):
18 upper = max(n_range - abs(d - middle), 0)
19 vals.append(random.randint(upper/4, upper))
20 print(','.join([str(v) for v in vals]))
21
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/code/gen_inflammation.py b/code/gen_inflammation.py
--- a/code/gen_inflammation.py
+++ b/code/gen_inflammation.py
@@ -16,5 +16,5 @@
vals = []
for d in range(n_days):
upper = max(n_range - abs(d - middle), 0)
- vals.append(random.randint(upper/4, upper))
+ vals.append(random.randint(upper//4, upper))
print(','.join([str(v) for v in vals]))
| {"golden_diff": "diff --git a/code/gen_inflammation.py b/code/gen_inflammation.py\n--- a/code/gen_inflammation.py\n+++ b/code/gen_inflammation.py\n@@ -16,5 +16,5 @@\n vals = []\n for d in range(n_days):\n upper = max(n_range - abs(d - middle), 0)\n- vals.append(random.randint(upper/4, upper))\n+ vals.append(random.randint(upper//4, upper))\n print(','.join([str(v) for v in vals]))\n", "issue": "Code provided for students contain python code not compatible with python 3\nAt least one file in the `code` directory, e.g., `gen_inflammation.py` fails when running it with python 3. The [problem is the \"division\" not giving an integer](https://github.com/swcarpentry/python-novice-inflammation/blob/11643f14d31726f2f60873c4ca1230fff0bbf108/code/gen_inflammation.py#L19). It needs to be changed to\r\n```diff\r\n- upper / 4\r\n+ upper // 4\r\n```\r\n\r\nThis was spotted by a student trying to check their installation and running different files.\r\nOther files may have similar errors. I'd suggest running and testing via CI everything we provide to the students.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\nGenerate pseudo-random patient inflammation data for use in Python lessons.\n\"\"\"\n\nimport random\n\nn_patients = 60\nn_days = 40\nn_range = 20\n\nmiddle = n_days / 2\n\nfor p in range(n_patients):\n vals = []\n for d in range(n_days):\n upper = max(n_range - abs(d - middle), 0)\n vals.append(random.randint(upper/4, upper))\n print(','.join([str(v) for v in vals]))\n", "path": "code/gen_inflammation.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\nGenerate pseudo-random patient inflammation data for use in Python lessons.\n\"\"\"\n\nimport random\n\nn_patients = 60\nn_days = 40\nn_range = 20\n\nmiddle = n_days / 2\n\nfor p in range(n_patients):\n vals = []\n for d in range(n_days):\n upper = max(n_range - abs(d - middle), 0)\n vals.append(random.randint(upper//4, upper))\n print(','.join([str(v) for v in vals]))\n", "path": "code/gen_inflammation.py"}]} | 598 | 115 |
gh_patches_debug_8998 | rasdani/github-patches | git_diff | Gallopsled__pwntools-1706 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
socket.socket doesnt have unrecv. bug in udp server
https://github.com/Gallopsled/pwntools/blob/5e279e7993f1f57cba2ba128f6bd8a27c19ea25f/pwnlib/tubes/server.py#L120
as mentioned above this is a bug.
line should be
```python
self.unrecv(data)
```
if its necessary at all
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwnlib/tubes/server.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import division
3
4 import errno
5 import socket
6 import threading
7
8 from pwnlib.context import context
9 from pwnlib.log import getLogger
10 from pwnlib.tubes.sock import sock
11 from pwnlib.tubes.remote import remote
12
13 log = getLogger(__name__)
14
15 class server(sock):
16 r"""Creates an TCP or UDP-server to listen for connections. It supports
17 both IPv4 and IPv6.
18
19 Arguments:
20 port(int): The port to connect to.
21 Defaults to a port auto-selected by the operating system.
22 bindaddr(str): The address to bind to.
23 Defaults to ``0.0.0.0`` / `::`.
24 fam: The string "any", "ipv4" or "ipv6" or an integer to pass to :func:`socket.getaddrinfo`.
25 typ: The string "tcp" or "udp" or an integer to pass to :func:`socket.getaddrinfo`.
26 callback: A function to be started on incoming connections. It should take a :class:`pwnlib.tubes.remote` as its only argument.
27
28 Examples:
29
30 >>> s = server(8888)
31 >>> client_conn = remote('localhost', s.lport)
32 >>> server_conn = s.next_connection()
33 >>> client_conn.sendline(b'Hello')
34 >>> server_conn.recvline()
35 b'Hello\n'
36 >>> def cb(r):
37 ... client_input = r.readline()
38 ... r.send(client_input[::-1])
39 ...
40 >>> t = server(8889, callback=cb)
41 >>> client_conn = remote('localhost', t.lport)
42 >>> client_conn.sendline(b'callback')
43 >>> client_conn.recv()
44 b'\nkcabllac'
45 """
46
47 #: Local port
48 lport = 0
49
50 #: Local host
51 lhost = None
52
53 #: Socket type (e.g. socket.SOCK_STREAM)
54 type = None
55
56 #: Socket family
57 family = None
58
59 #: Socket protocol
60 protocol = None
61
62 #: Canonical name of the listening interface
63 canonname = None
64
65 #: Sockaddr structure that is being listened on
66 sockaddr = None
67
68 _accepter = None
69
70 def __init__(self, port=0, bindaddr = "0.0.0.0", fam = "any", typ = "tcp",
71 callback = None, blocking = False, *args, **kwargs):
72 super(server, self).__init__(*args, **kwargs)
73
74 port = int(port)
75 fam = {socket.AF_INET: 'ipv4',
76 socket.AF_INET6: 'ipv6'}.get(fam, fam)
77
78 fam = self._get_family(fam)
79 typ = self._get_type(typ)
80
81 if fam == socket.AF_INET6 and bindaddr == '0.0.0.0':
82 bindaddr = '::'
83
84 h = self.waitfor('Trying to bind to %s on port %d' % (bindaddr, port))
85
86 for res in socket.getaddrinfo(bindaddr, port, fam, typ, 0, socket.AI_PASSIVE):
87 self.family, self.type, self.proto, self.canonname, self.sockaddr = res
88
89 if self.type not in [socket.SOCK_STREAM, socket.SOCK_DGRAM]:
90 continue
91
92 h.status("Trying %s" % self.sockaddr[0])
93 listen_sock = socket.socket(self.family, self.type, self.proto)
94 listen_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
95 listen_sock.bind(self.sockaddr)
96 self.lhost, self.lport = listen_sock.getsockname()[:2]
97 if self.type == socket.SOCK_STREAM:
98 listen_sock.listen(1)
99 break
100 else:
101 h.failure()
102 self.error("Could not bind to %s on port %d" % (bindaddr, port))
103
104 h.success()
105
106 self.sock = listen_sock
107 self.connections_waiting = threading.Event()
108 self.connections = []
109 def accepter():
110 while True:
111 h = self.waitfor('Waiting for connections on %s:%s' % (self.lhost, self.lport))
112 while True:
113 try:
114 if self.type == socket.SOCK_STREAM:
115 sock, rhost = listen_sock.accept()
116 else:
117 data, rhost = listen_sock.recvfrom(4096)
118 listen_sock.connect(rhost)
119 sock = listen_sock
120 sock.unrecv(data)
121 sock.settimeout(self.timeout)
122 break
123 except socket.error as e:
124 if e.errno == errno.EINTR:
125 continue
126 h.failure()
127 self.exception("Socket failure while waiting for connection")
128 sock = None
129 return
130
131 self.rhost, self.rport = rhost[:2]
132 r = remote(self.rhost, self.rport, sock = sock)
133 h.success('Got connection from %s on port %d' % (self.rhost, self.rport))
134 if callback:
135 if not blocking:
136 t = context.Thread(target = callback, args = (r,))
137 t.daemon = True
138 t.start()
139 else:
140 callback(r)
141 else:
142 self.connections.append(r)
143 if not self.connections_waiting.is_set():
144 self.connections_waiting.set()
145
146 self._accepter = context.Thread(target = accepter)
147 self._accepter.daemon = True
148 self._accepter.start()
149
150 def next_connection(self):
151 if not self.connections_waiting.is_set():
152 self.connections_waiting.wait()
153 conn = self.connections.pop(0)
154 if not self.connections:
155 self.connections_waiting.clear()
156 return conn
157
158 def close(self):
159 # since `close` is scheduled to run on exit we must check that we got
160 # a connection or the program will hang in the `join` call above
161 if self._accepter and self._accepter.is_alive():
162 return
163 super(server, self).close()
164
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pwnlib/tubes/server.py b/pwnlib/tubes/server.py
--- a/pwnlib/tubes/server.py
+++ b/pwnlib/tubes/server.py
@@ -117,7 +117,7 @@
data, rhost = listen_sock.recvfrom(4096)
listen_sock.connect(rhost)
sock = listen_sock
- sock.unrecv(data)
+ self.unrecv(data)
sock.settimeout(self.timeout)
break
except socket.error as e:
| {"golden_diff": "diff --git a/pwnlib/tubes/server.py b/pwnlib/tubes/server.py\n--- a/pwnlib/tubes/server.py\n+++ b/pwnlib/tubes/server.py\n@@ -117,7 +117,7 @@\n data, rhost = listen_sock.recvfrom(4096)\n listen_sock.connect(rhost)\n sock = listen_sock\n- sock.unrecv(data)\n+ self.unrecv(data)\n sock.settimeout(self.timeout)\n break\n except socket.error as e:\n", "issue": "socket.socket doesnt have unrecv. bug in udp server\nhttps://github.com/Gallopsled/pwntools/blob/5e279e7993f1f57cba2ba128f6bd8a27c19ea25f/pwnlib/tubes/server.py#L120\r\n\r\nas mentioned above this is a bug.\r\n\r\nline should be\r\n```python\r\nself.unrecv(data)\r\n```\r\nif its necessary at all\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\n\nimport errno\nimport socket\nimport threading\n\nfrom pwnlib.context import context\nfrom pwnlib.log import getLogger\nfrom pwnlib.tubes.sock import sock\nfrom pwnlib.tubes.remote import remote\n\nlog = getLogger(__name__)\n\nclass server(sock):\n r\"\"\"Creates an TCP or UDP-server to listen for connections. It supports\n both IPv4 and IPv6.\n\n Arguments:\n port(int): The port to connect to.\n Defaults to a port auto-selected by the operating system.\n bindaddr(str): The address to bind to.\n Defaults to ``0.0.0.0`` / `::`.\n fam: The string \"any\", \"ipv4\" or \"ipv6\" or an integer to pass to :func:`socket.getaddrinfo`.\n typ: The string \"tcp\" or \"udp\" or an integer to pass to :func:`socket.getaddrinfo`.\n callback: A function to be started on incoming connections. It should take a :class:`pwnlib.tubes.remote` as its only argument.\n\n Examples:\n\n >>> s = server(8888)\n >>> client_conn = remote('localhost', s.lport)\n >>> server_conn = s.next_connection()\n >>> client_conn.sendline(b'Hello')\n >>> server_conn.recvline()\n b'Hello\\n'\n >>> def cb(r):\n ... client_input = r.readline()\n ... r.send(client_input[::-1])\n ...\n >>> t = server(8889, callback=cb)\n >>> client_conn = remote('localhost', t.lport)\n >>> client_conn.sendline(b'callback')\n >>> client_conn.recv()\n b'\\nkcabllac'\n \"\"\"\n\n #: Local port\n lport = 0\n\n #: Local host\n lhost = None\n\n #: Socket type (e.g. socket.SOCK_STREAM)\n type = None\n\n #: Socket family\n family = None\n\n #: Socket protocol\n protocol = None\n\n #: Canonical name of the listening interface\n canonname = None\n\n #: Sockaddr structure that is being listened on\n sockaddr = None\n\n _accepter = None\n\n def __init__(self, port=0, bindaddr = \"0.0.0.0\", fam = \"any\", typ = \"tcp\",\n callback = None, blocking = False, *args, **kwargs):\n super(server, self).__init__(*args, **kwargs)\n\n port = int(port)\n fam = {socket.AF_INET: 'ipv4',\n socket.AF_INET6: 'ipv6'}.get(fam, fam)\n\n fam = self._get_family(fam)\n typ = self._get_type(typ)\n\n if fam == socket.AF_INET6 and bindaddr == '0.0.0.0':\n bindaddr = '::'\n\n h = self.waitfor('Trying to bind to %s on port %d' % (bindaddr, port))\n\n for res in socket.getaddrinfo(bindaddr, port, fam, typ, 0, socket.AI_PASSIVE):\n self.family, self.type, self.proto, self.canonname, self.sockaddr = res\n\n if self.type not in [socket.SOCK_STREAM, socket.SOCK_DGRAM]:\n continue\n\n h.status(\"Trying %s\" % self.sockaddr[0])\n listen_sock = socket.socket(self.family, self.type, self.proto)\n listen_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n listen_sock.bind(self.sockaddr)\n self.lhost, self.lport = listen_sock.getsockname()[:2]\n if self.type == socket.SOCK_STREAM:\n listen_sock.listen(1)\n break\n else:\n h.failure()\n self.error(\"Could not bind to %s on port %d\" % (bindaddr, port))\n\n h.success()\n\n self.sock = listen_sock\n self.connections_waiting = threading.Event()\n self.connections = []\n def accepter():\n while True:\n h = self.waitfor('Waiting for connections on %s:%s' % (self.lhost, self.lport))\n while True:\n try:\n if self.type == socket.SOCK_STREAM:\n sock, rhost = listen_sock.accept()\n else:\n data, rhost = listen_sock.recvfrom(4096)\n listen_sock.connect(rhost)\n sock = listen_sock\n sock.unrecv(data)\n sock.settimeout(self.timeout)\n break\n except socket.error as e:\n if e.errno == errno.EINTR:\n continue\n h.failure()\n self.exception(\"Socket failure while waiting for connection\")\n sock = None\n return\n\n self.rhost, self.rport = rhost[:2]\n r = remote(self.rhost, self.rport, sock = sock)\n h.success('Got connection from %s on port %d' % (self.rhost, self.rport))\n if callback:\n if not blocking:\n t = context.Thread(target = callback, args = (r,))\n t.daemon = True\n t.start()\n else:\n callback(r)\n else:\n self.connections.append(r)\n if not self.connections_waiting.is_set():\n self.connections_waiting.set()\n\n self._accepter = context.Thread(target = accepter)\n self._accepter.daemon = True\n self._accepter.start()\n\n def next_connection(self):\n if not self.connections_waiting.is_set():\n self.connections_waiting.wait()\n conn = self.connections.pop(0)\n if not self.connections:\n self.connections_waiting.clear()\n return conn\n\n def close(self):\n # since `close` is scheduled to run on exit we must check that we got\n # a connection or the program will hang in the `join` call above\n if self._accepter and self._accepter.is_alive():\n return\n super(server, self).close()\n", "path": "pwnlib/tubes/server.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\n\nimport errno\nimport socket\nimport threading\n\nfrom pwnlib.context import context\nfrom pwnlib.log import getLogger\nfrom pwnlib.tubes.sock import sock\nfrom pwnlib.tubes.remote import remote\n\nlog = getLogger(__name__)\n\nclass server(sock):\n r\"\"\"Creates an TCP or UDP-server to listen for connections. It supports\n both IPv4 and IPv6.\n\n Arguments:\n port(int): The port to connect to.\n Defaults to a port auto-selected by the operating system.\n bindaddr(str): The address to bind to.\n Defaults to ``0.0.0.0`` / `::`.\n fam: The string \"any\", \"ipv4\" or \"ipv6\" or an integer to pass to :func:`socket.getaddrinfo`.\n typ: The string \"tcp\" or \"udp\" or an integer to pass to :func:`socket.getaddrinfo`.\n callback: A function to be started on incoming connections. It should take a :class:`pwnlib.tubes.remote` as its only argument.\n\n Examples:\n\n >>> s = server(8888)\n >>> client_conn = remote('localhost', s.lport)\n >>> server_conn = s.next_connection()\n >>> client_conn.sendline(b'Hello')\n >>> server_conn.recvline()\n b'Hello\\n'\n >>> def cb(r):\n ... client_input = r.readline()\n ... r.send(client_input[::-1])\n ...\n >>> t = server(8889, callback=cb)\n >>> client_conn = remote('localhost', t.lport)\n >>> client_conn.sendline(b'callback')\n >>> client_conn.recv()\n b'\\nkcabllac'\n \"\"\"\n\n #: Local port\n lport = 0\n\n #: Local host\n lhost = None\n\n #: Socket type (e.g. socket.SOCK_STREAM)\n type = None\n\n #: Socket family\n family = None\n\n #: Socket protocol\n protocol = None\n\n #: Canonical name of the listening interface\n canonname = None\n\n #: Sockaddr structure that is being listened on\n sockaddr = None\n\n _accepter = None\n\n def __init__(self, port=0, bindaddr = \"0.0.0.0\", fam = \"any\", typ = \"tcp\",\n callback = None, blocking = False, *args, **kwargs):\n super(server, self).__init__(*args, **kwargs)\n\n port = int(port)\n fam = {socket.AF_INET: 'ipv4',\n socket.AF_INET6: 'ipv6'}.get(fam, fam)\n\n fam = self._get_family(fam)\n typ = self._get_type(typ)\n\n if fam == socket.AF_INET6 and bindaddr == '0.0.0.0':\n bindaddr = '::'\n\n h = self.waitfor('Trying to bind to %s on port %d' % (bindaddr, port))\n\n for res in socket.getaddrinfo(bindaddr, port, fam, typ, 0, socket.AI_PASSIVE):\n self.family, self.type, self.proto, self.canonname, self.sockaddr = res\n\n if self.type not in [socket.SOCK_STREAM, socket.SOCK_DGRAM]:\n continue\n\n h.status(\"Trying %s\" % self.sockaddr[0])\n listen_sock = socket.socket(self.family, self.type, self.proto)\n listen_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n listen_sock.bind(self.sockaddr)\n self.lhost, self.lport = listen_sock.getsockname()[:2]\n if self.type == socket.SOCK_STREAM:\n listen_sock.listen(1)\n break\n else:\n h.failure()\n self.error(\"Could not bind to %s on port %d\" % (bindaddr, port))\n\n h.success()\n\n self.sock = listen_sock\n self.connections_waiting = threading.Event()\n self.connections = []\n def accepter():\n while True:\n h = self.waitfor('Waiting for connections on %s:%s' % (self.lhost, self.lport))\n while True:\n try:\n if self.type == socket.SOCK_STREAM:\n sock, rhost = listen_sock.accept()\n else:\n data, rhost = listen_sock.recvfrom(4096)\n listen_sock.connect(rhost)\n sock = listen_sock\n self.unrecv(data)\n sock.settimeout(self.timeout)\n break\n except socket.error as e:\n if e.errno == errno.EINTR:\n continue\n h.failure()\n self.exception(\"Socket failure while waiting for connection\")\n sock = None\n return\n\n self.rhost, self.rport = rhost[:2]\n r = remote(self.rhost, self.rport, sock = sock)\n h.success('Got connection from %s on port %d' % (self.rhost, self.rport))\n if callback:\n if not blocking:\n t = context.Thread(target = callback, args = (r,))\n t.daemon = True\n t.start()\n else:\n callback(r)\n else:\n self.connections.append(r)\n if not self.connections_waiting.is_set():\n self.connections_waiting.set()\n\n self._accepter = context.Thread(target = accepter)\n self._accepter.daemon = True\n self._accepter.start()\n\n def next_connection(self):\n if not self.connections_waiting.is_set():\n self.connections_waiting.wait()\n conn = self.connections.pop(0)\n if not self.connections:\n self.connections_waiting.clear()\n return conn\n\n def close(self):\n # since `close` is scheduled to run on exit we must check that we got\n # a connection or the program will hang in the `join` call above\n if self._accepter and self._accepter.is_alive():\n return\n super(server, self).close()\n", "path": "pwnlib/tubes/server.py"}]} | 2,042 | 114 |
gh_patches_debug_24699 | rasdani/github-patches | git_diff | HypothesisWorks__hypothesis-285 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
django_polymorphic breaks model generation
`django_polymorphic` adds mandatory fields (named `*_ptr`) to models, but gives them values when the model is created. Hypothesis sees these as normal non-nullable fields, which trigger the relevant health check. However, explicitly providing a value for one of these fields causes an exception to be thrown in the model's constructor.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/hypothesis/extra/django/models.py`
Content:
```
1 # coding=utf-8
2 #
3 # This file is part of Hypothesis (https://github.com/DRMacIver/hypothesis)
4 #
5 # Most of this work is copyright (C) 2013-2015 David R. MacIver
6 # ([email protected]), but it contains contributions by others. See
7 # https://github.com/DRMacIver/hypothesis/blob/master/CONTRIBUTING.rst for a
8 # full list of people who may hold copyright, and consult the git log if you
9 # need to determine who owns an individual contribution.
10 #
11 # This Source Code Form is subject to the terms of the Mozilla Public License,
12 # v. 2.0. If a copy of the MPL was not distributed with this file, You can
13 # obtain one at http://mozilla.org/MPL/2.0/.
14 #
15 # END HEADER
16
17 from __future__ import division, print_function, absolute_import
18
19 import django.db.models as dm
20 from django.db import IntegrityError
21
22 import hypothesis.strategies as st
23 import hypothesis.extra.fakefactory as ff
24 from hypothesis.errors import InvalidArgument
25 from hypothesis.extra.datetime import datetimes
26 from hypothesis.searchstrategy.strategies import SearchStrategy
27
28
29 class ModelNotSupported(Exception):
30 pass
31
32
33 def referenced_models(model, seen=None):
34 if seen is None:
35 seen = set()
36 for f in model._meta.concrete_fields:
37 if isinstance(f, dm.ForeignKey):
38 t = f.rel.to
39 if t not in seen:
40 seen.add(t)
41 referenced_models(t, seen)
42 return seen
43
44
45 __default_field_mappings = None
46
47
48 def field_mappings():
49 global __default_field_mappings
50
51 if __default_field_mappings is None:
52 __default_field_mappings = {
53 dm.SmallIntegerField: st.integers(-32768, 32767),
54 dm.IntegerField: st.integers(-2147483648, 2147483647),
55 dm.BigIntegerField:
56 st.integers(-9223372036854775808, 9223372036854775807),
57 dm.PositiveIntegerField: st.integers(0, 2147483647),
58 dm.PositiveSmallIntegerField: st.integers(0, 32767),
59 dm.BinaryField: st.binary(),
60 dm.BooleanField: st.booleans(),
61 dm.CharField: st.text(),
62 dm.TextField: st.text(),
63 dm.DateTimeField: datetimes(allow_naive=False),
64 dm.EmailField: ff.fake_factory(u'email'),
65 dm.FloatField: st.floats(),
66 dm.NullBooleanField: st.one_of(st.none(), st.booleans()),
67 }
68 return __default_field_mappings
69
70
71 def add_default_field_mapping(field_type, strategy):
72 field_mappings()[field_type] = strategy
73
74
75 def models(model, **extra):
76 result = {}
77 mappings = field_mappings()
78 mandatory = set()
79 for f in model._meta.concrete_fields:
80 if isinstance(f, dm.AutoField):
81 continue
82 try:
83 mapped = mappings[type(f)]
84 except KeyError:
85 if not f.null:
86 mandatory.add(f.name)
87 continue
88 if f.null:
89 mapped = st.one_of(st.none(), mapped)
90 result[f.name] = mapped
91 missed = {x for x in mandatory if x not in extra}
92 if missed:
93 raise InvalidArgument((
94 u'Missing arguments for mandatory field%s %s for model %s' % (
95 u's' if len(missed) > 1 else u'',
96 u', '.join(missed),
97 model.__name__,
98 )))
99 for k, v in extra.items():
100 if isinstance(v, SearchStrategy):
101 result[k] = v
102 else:
103 result[k] = st.just(v)
104 result.update(extra)
105 return ModelStrategy(model, result)
106
107
108 class ModelStrategy(SearchStrategy):
109
110 def __init__(self, model, mappings):
111 super(ModelStrategy, self).__init__()
112 self.model = model
113 self.arg_strategy = st.fixed_dictionaries(mappings)
114
115 def __repr__(self):
116 return u'ModelStrategy(%s)' % (self.model.__name__,)
117
118 def do_draw(self, data):
119 try:
120 result, _ = self.model.objects.get_or_create(
121 **self.arg_strategy.do_draw(data)
122 )
123 return result
124 except IntegrityError:
125 data.mark_invalid()
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/hypothesis/extra/django/models.py b/src/hypothesis/extra/django/models.py
--- a/src/hypothesis/extra/django/models.py
+++ b/src/hypothesis/extra/django/models.py
@@ -23,6 +23,7 @@
import hypothesis.extra.fakefactory as ff
from hypothesis.errors import InvalidArgument
from hypothesis.extra.datetime import datetimes
+from hypothesis.utils.conventions import UniqueIdentifier
from hypothesis.searchstrategy.strategies import SearchStrategy
@@ -72,6 +73,9 @@
field_mappings()[field_type] = strategy
+default_value = UniqueIdentifier(u'default_value')
+
+
def models(model, **extra):
result = {}
mappings = field_mappings()
@@ -96,12 +100,9 @@
u', '.join(missed),
model.__name__,
)))
- for k, v in extra.items():
- if isinstance(v, SearchStrategy):
- result[k] = v
- else:
- result[k] = st.just(v)
result.update(extra)
+ # Remove default_values so we don't try to generate anything for those.
+ result = {k: v for k, v in result.items() if v is not default_value}
return ModelStrategy(model, result)
| {"golden_diff": "diff --git a/src/hypothesis/extra/django/models.py b/src/hypothesis/extra/django/models.py\n--- a/src/hypothesis/extra/django/models.py\n+++ b/src/hypothesis/extra/django/models.py\n@@ -23,6 +23,7 @@\n import hypothesis.extra.fakefactory as ff\n from hypothesis.errors import InvalidArgument\n from hypothesis.extra.datetime import datetimes\n+from hypothesis.utils.conventions import UniqueIdentifier\n from hypothesis.searchstrategy.strategies import SearchStrategy\n \n \n@@ -72,6 +73,9 @@\n field_mappings()[field_type] = strategy\n \n \n+default_value = UniqueIdentifier(u'default_value')\n+\n+\n def models(model, **extra):\n result = {}\n mappings = field_mappings()\n@@ -96,12 +100,9 @@\n u', '.join(missed),\n model.__name__,\n )))\n- for k, v in extra.items():\n- if isinstance(v, SearchStrategy):\n- result[k] = v\n- else:\n- result[k] = st.just(v)\n result.update(extra)\n+ # Remove default_values so we don't try to generate anything for those.\n+ result = {k: v for k, v in result.items() if v is not default_value}\n return ModelStrategy(model, result)\n", "issue": "django_polymorphic breaks model generation\n`django_polymorphic` adds mandatory fields (named `*_ptr`) to models, but gives them values when the model is created. Hypothesis sees these as normal non-nullable fields, which trigger the relevant health check. However, explicitly providing a value for one of these fields causes an exception to be thrown in the model's constructor.\n\n", "before_files": [{"content": "# coding=utf-8\n#\n# This file is part of Hypothesis (https://github.com/DRMacIver/hypothesis)\n#\n# Most of this work is copyright (C) 2013-2015 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# https://github.com/DRMacIver/hypothesis/blob/master/CONTRIBUTING.rst for a\n# full list of people who may hold copyright, and consult the git log if you\n# need to determine who owns an individual contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at http://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\nfrom __future__ import division, print_function, absolute_import\n\nimport django.db.models as dm\nfrom django.db import IntegrityError\n\nimport hypothesis.strategies as st\nimport hypothesis.extra.fakefactory as ff\nfrom hypothesis.errors import InvalidArgument\nfrom hypothesis.extra.datetime import datetimes\nfrom hypothesis.searchstrategy.strategies import SearchStrategy\n\n\nclass ModelNotSupported(Exception):\n pass\n\n\ndef referenced_models(model, seen=None):\n if seen is None:\n seen = set()\n for f in model._meta.concrete_fields:\n if isinstance(f, dm.ForeignKey):\n t = f.rel.to\n if t not in seen:\n seen.add(t)\n referenced_models(t, seen)\n return seen\n\n\n__default_field_mappings = None\n\n\ndef field_mappings():\n global __default_field_mappings\n\n if __default_field_mappings is None:\n __default_field_mappings = {\n dm.SmallIntegerField: st.integers(-32768, 32767),\n dm.IntegerField: st.integers(-2147483648, 2147483647),\n dm.BigIntegerField:\n st.integers(-9223372036854775808, 9223372036854775807),\n dm.PositiveIntegerField: st.integers(0, 2147483647),\n dm.PositiveSmallIntegerField: st.integers(0, 32767),\n dm.BinaryField: st.binary(),\n dm.BooleanField: st.booleans(),\n dm.CharField: st.text(),\n dm.TextField: st.text(),\n dm.DateTimeField: datetimes(allow_naive=False),\n dm.EmailField: ff.fake_factory(u'email'),\n dm.FloatField: st.floats(),\n dm.NullBooleanField: st.one_of(st.none(), st.booleans()),\n }\n return __default_field_mappings\n\n\ndef add_default_field_mapping(field_type, strategy):\n field_mappings()[field_type] = strategy\n\n\ndef models(model, **extra):\n result = {}\n mappings = field_mappings()\n mandatory = set()\n for f in model._meta.concrete_fields:\n if isinstance(f, dm.AutoField):\n continue\n try:\n mapped = mappings[type(f)]\n except KeyError:\n if not f.null:\n mandatory.add(f.name)\n continue\n if f.null:\n mapped = st.one_of(st.none(), mapped)\n result[f.name] = mapped\n missed = {x for x in mandatory if x not in extra}\n if missed:\n raise InvalidArgument((\n u'Missing arguments for mandatory field%s %s for model %s' % (\n u's' if len(missed) > 1 else u'',\n u', '.join(missed),\n model.__name__,\n )))\n for k, v in extra.items():\n if isinstance(v, SearchStrategy):\n result[k] = v\n else:\n result[k] = st.just(v)\n result.update(extra)\n return ModelStrategy(model, result)\n\n\nclass ModelStrategy(SearchStrategy):\n\n def __init__(self, model, mappings):\n super(ModelStrategy, self).__init__()\n self.model = model\n self.arg_strategy = st.fixed_dictionaries(mappings)\n\n def __repr__(self):\n return u'ModelStrategy(%s)' % (self.model.__name__,)\n\n def do_draw(self, data):\n try:\n result, _ = self.model.objects.get_or_create(\n **self.arg_strategy.do_draw(data)\n )\n return result\n except IntegrityError:\n data.mark_invalid()\n", "path": "src/hypothesis/extra/django/models.py"}], "after_files": [{"content": "# coding=utf-8\n#\n# This file is part of Hypothesis (https://github.com/DRMacIver/hypothesis)\n#\n# Most of this work is copyright (C) 2013-2015 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# https://github.com/DRMacIver/hypothesis/blob/master/CONTRIBUTING.rst for a\n# full list of people who may hold copyright, and consult the git log if you\n# need to determine who owns an individual contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at http://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\nfrom __future__ import division, print_function, absolute_import\n\nimport django.db.models as dm\nfrom django.db import IntegrityError\n\nimport hypothesis.strategies as st\nimport hypothesis.extra.fakefactory as ff\nfrom hypothesis.errors import InvalidArgument\nfrom hypothesis.extra.datetime import datetimes\nfrom hypothesis.utils.conventions import UniqueIdentifier\nfrom hypothesis.searchstrategy.strategies import SearchStrategy\n\n\nclass ModelNotSupported(Exception):\n pass\n\n\ndef referenced_models(model, seen=None):\n if seen is None:\n seen = set()\n for f in model._meta.concrete_fields:\n if isinstance(f, dm.ForeignKey):\n t = f.rel.to\n if t not in seen:\n seen.add(t)\n referenced_models(t, seen)\n return seen\n\n\n__default_field_mappings = None\n\n\ndef field_mappings():\n global __default_field_mappings\n\n if __default_field_mappings is None:\n __default_field_mappings = {\n dm.SmallIntegerField: st.integers(-32768, 32767),\n dm.IntegerField: st.integers(-2147483648, 2147483647),\n dm.BigIntegerField:\n st.integers(-9223372036854775808, 9223372036854775807),\n dm.PositiveIntegerField: st.integers(0, 2147483647),\n dm.PositiveSmallIntegerField: st.integers(0, 32767),\n dm.BinaryField: st.binary(),\n dm.BooleanField: st.booleans(),\n dm.CharField: st.text(),\n dm.TextField: st.text(),\n dm.DateTimeField: datetimes(allow_naive=False),\n dm.EmailField: ff.fake_factory(u'email'),\n dm.FloatField: st.floats(),\n dm.NullBooleanField: st.one_of(st.none(), st.booleans()),\n }\n return __default_field_mappings\n\n\ndef add_default_field_mapping(field_type, strategy):\n field_mappings()[field_type] = strategy\n\n\ndefault_value = UniqueIdentifier(u'default_value')\n\n\ndef models(model, **extra):\n result = {}\n mappings = field_mappings()\n mandatory = set()\n for f in model._meta.concrete_fields:\n if isinstance(f, dm.AutoField):\n continue\n try:\n mapped = mappings[type(f)]\n except KeyError:\n if not f.null:\n mandatory.add(f.name)\n continue\n if f.null:\n mapped = st.one_of(st.none(), mapped)\n result[f.name] = mapped\n missed = {x for x in mandatory if x not in extra}\n if missed:\n raise InvalidArgument((\n u'Missing arguments for mandatory field%s %s for model %s' % (\n u's' if len(missed) > 1 else u'',\n u', '.join(missed),\n model.__name__,\n )))\n result.update(extra)\n # Remove default_values so we don't try to generate anything for those.\n result = {k: v for k, v in result.items() if v is not default_value}\n return ModelStrategy(model, result)\n\n\nclass ModelStrategy(SearchStrategy):\n\n def __init__(self, model, mappings):\n super(ModelStrategy, self).__init__()\n self.model = model\n self.arg_strategy = st.fixed_dictionaries(mappings)\n\n def __repr__(self):\n return u'ModelStrategy(%s)' % (self.model.__name__,)\n\n def do_draw(self, data):\n try:\n result, _ = self.model.objects.get_or_create(\n **self.arg_strategy.do_draw(data)\n )\n return result\n except IntegrityError:\n data.mark_invalid()\n", "path": "src/hypothesis/extra/django/models.py"}]} | 1,591 | 286 |
gh_patches_debug_1180 | rasdani/github-patches | git_diff | encode__httpx-1054 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Type-checking our tests
I know this is not a standard thing to do across Encode projects, but I've been wondering if it would be worth starting to type-hint our tests.
I've seen at least two instances of this recently:
- In HTTPX: https://github.com/encode/httpx/pull/648#discussion_r359862603
- In Starlette: https://github.com/encode/starlette/issues/722
My rationale is based on two aspects:
- It improves our upfront knowledge about how users will actually use HTTPX — currently their usage of type hints in the wild is not reflected anywhere.
- It helps us catch type hint inconsistencies we wouldn't see in the core package.
The main counter-argument, I suppose, is that type hinting tests is tedious. I think that's fair, but I believe the two pro's above make it compelling.
Thoughts?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `httpx/_types.py`
Content:
```
1 """
2 Type definitions for type checking purposes.
3 """
4
5 import ssl
6 from http.cookiejar import CookieJar
7 from typing import (
8 IO,
9 TYPE_CHECKING,
10 AsyncIterator,
11 Callable,
12 Dict,
13 Iterator,
14 List,
15 Mapping,
16 Optional,
17 Sequence,
18 Tuple,
19 Union,
20 )
21
22 if TYPE_CHECKING: # pragma: no cover
23 from ._auth import Auth # noqa: F401
24 from ._config import Proxy, Timeout # noqa: F401
25 from ._models import URL, Cookies, Headers, QueryParams, Request # noqa: F401
26
27
28 PrimitiveData = Optional[Union[str, int, float, bool]]
29
30 URLTypes = Union["URL", str]
31
32 QueryParamTypes = Union[
33 "QueryParams",
34 Mapping[str, Union[PrimitiveData, Sequence[PrimitiveData]]],
35 List[Tuple[str, PrimitiveData]],
36 str,
37 ]
38
39 HeaderTypes = Union[
40 "Headers",
41 Dict[str, str],
42 Dict[bytes, bytes],
43 Sequence[Tuple[str, str]],
44 Sequence[Tuple[bytes, bytes]],
45 ]
46
47 CookieTypes = Union["Cookies", CookieJar, Dict[str, str]]
48
49 CertTypes = Union[str, Tuple[str, str], Tuple[str, str, str]]
50 VerifyTypes = Union[str, bool, ssl.SSLContext]
51 TimeoutTypes = Union[
52 Optional[float],
53 Tuple[Optional[float], Optional[float], Optional[float], Optional[float]],
54 "Timeout",
55 ]
56 ProxiesTypes = Union[URLTypes, "Proxy", Dict[URLTypes, Union[URLTypes, "Proxy"]]]
57
58 AuthTypes = Union[
59 Tuple[Union[str, bytes], Union[str, bytes]],
60 Callable[["Request"], "Request"],
61 "Auth",
62 ]
63
64 RequestData = Union[dict, str, bytes, Iterator[bytes], AsyncIterator[bytes]]
65
66 FileContent = Union[IO[str], IO[bytes], str, bytes]
67 FileTypes = Union[
68 # file (or text)
69 FileContent,
70 # (filename, file (or text))
71 Tuple[Optional[str], FileContent],
72 # (filename, file (or text), content_type)
73 Tuple[Optional[str], FileContent, Optional[str]],
74 ]
75 RequestFiles = Union[Mapping[str, FileTypes], List[Tuple[str, FileTypes]]]
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/httpx/_types.py b/httpx/_types.py
--- a/httpx/_types.py
+++ b/httpx/_types.py
@@ -72,4 +72,4 @@
# (filename, file (or text), content_type)
Tuple[Optional[str], FileContent, Optional[str]],
]
-RequestFiles = Union[Mapping[str, FileTypes], List[Tuple[str, FileTypes]]]
+RequestFiles = Union[Mapping[str, FileTypes], Sequence[Tuple[str, FileTypes]]]
| {"golden_diff": "diff --git a/httpx/_types.py b/httpx/_types.py\n--- a/httpx/_types.py\n+++ b/httpx/_types.py\n@@ -72,4 +72,4 @@\n # (filename, file (or text), content_type)\n Tuple[Optional[str], FileContent, Optional[str]],\n ]\n-RequestFiles = Union[Mapping[str, FileTypes], List[Tuple[str, FileTypes]]]\n+RequestFiles = Union[Mapping[str, FileTypes], Sequence[Tuple[str, FileTypes]]]\n", "issue": "Type-checking our tests\nI know this is not a standard thing to do across Encode projects, but I've been wondering if it would be worth starting to type-hint our tests.\r\n\r\nI've seen at least two instances of this recently:\r\n\r\n- In HTTPX: https://github.com/encode/httpx/pull/648#discussion_r359862603\r\n- In Starlette: https://github.com/encode/starlette/issues/722\r\n\r\nMy rationale is based on two aspects:\r\n\r\n- It improves our upfront knowledge about how users will actually use HTTPX \u2014 currently their usage of type hints in the wild is not reflected anywhere.\r\n- It helps us catch type hint inconsistencies we wouldn't see in the core package.\r\n\r\nThe main counter-argument, I suppose, is that type hinting tests is tedious. I think that's fair, but I believe the two pro's above make it compelling.\r\n\r\nThoughts?\n", "before_files": [{"content": "\"\"\"\nType definitions for type checking purposes.\n\"\"\"\n\nimport ssl\nfrom http.cookiejar import CookieJar\nfrom typing import (\n IO,\n TYPE_CHECKING,\n AsyncIterator,\n Callable,\n Dict,\n Iterator,\n List,\n Mapping,\n Optional,\n Sequence,\n Tuple,\n Union,\n)\n\nif TYPE_CHECKING: # pragma: no cover\n from ._auth import Auth # noqa: F401\n from ._config import Proxy, Timeout # noqa: F401\n from ._models import URL, Cookies, Headers, QueryParams, Request # noqa: F401\n\n\nPrimitiveData = Optional[Union[str, int, float, bool]]\n\nURLTypes = Union[\"URL\", str]\n\nQueryParamTypes = Union[\n \"QueryParams\",\n Mapping[str, Union[PrimitiveData, Sequence[PrimitiveData]]],\n List[Tuple[str, PrimitiveData]],\n str,\n]\n\nHeaderTypes = Union[\n \"Headers\",\n Dict[str, str],\n Dict[bytes, bytes],\n Sequence[Tuple[str, str]],\n Sequence[Tuple[bytes, bytes]],\n]\n\nCookieTypes = Union[\"Cookies\", CookieJar, Dict[str, str]]\n\nCertTypes = Union[str, Tuple[str, str], Tuple[str, str, str]]\nVerifyTypes = Union[str, bool, ssl.SSLContext]\nTimeoutTypes = Union[\n Optional[float],\n Tuple[Optional[float], Optional[float], Optional[float], Optional[float]],\n \"Timeout\",\n]\nProxiesTypes = Union[URLTypes, \"Proxy\", Dict[URLTypes, Union[URLTypes, \"Proxy\"]]]\n\nAuthTypes = Union[\n Tuple[Union[str, bytes], Union[str, bytes]],\n Callable[[\"Request\"], \"Request\"],\n \"Auth\",\n]\n\nRequestData = Union[dict, str, bytes, Iterator[bytes], AsyncIterator[bytes]]\n\nFileContent = Union[IO[str], IO[bytes], str, bytes]\nFileTypes = Union[\n # file (or text)\n FileContent,\n # (filename, file (or text))\n Tuple[Optional[str], FileContent],\n # (filename, file (or text), content_type)\n Tuple[Optional[str], FileContent, Optional[str]],\n]\nRequestFiles = Union[Mapping[str, FileTypes], List[Tuple[str, FileTypes]]]\n", "path": "httpx/_types.py"}], "after_files": [{"content": "\"\"\"\nType definitions for type checking purposes.\n\"\"\"\n\nimport ssl\nfrom http.cookiejar import CookieJar\nfrom typing import (\n IO,\n TYPE_CHECKING,\n AsyncIterator,\n Callable,\n Dict,\n Iterator,\n List,\n Mapping,\n Optional,\n Sequence,\n Tuple,\n Union,\n)\n\nif TYPE_CHECKING: # pragma: no cover\n from ._auth import Auth # noqa: F401\n from ._config import Proxy, Timeout # noqa: F401\n from ._models import URL, Cookies, Headers, QueryParams, Request # noqa: F401\n\n\nPrimitiveData = Optional[Union[str, int, float, bool]]\n\nURLTypes = Union[\"URL\", str]\n\nQueryParamTypes = Union[\n \"QueryParams\",\n Mapping[str, Union[PrimitiveData, Sequence[PrimitiveData]]],\n List[Tuple[str, PrimitiveData]],\n str,\n]\n\nHeaderTypes = Union[\n \"Headers\",\n Dict[str, str],\n Dict[bytes, bytes],\n Sequence[Tuple[str, str]],\n Sequence[Tuple[bytes, bytes]],\n]\n\nCookieTypes = Union[\"Cookies\", CookieJar, Dict[str, str]]\n\nCertTypes = Union[str, Tuple[str, str], Tuple[str, str, str]]\nVerifyTypes = Union[str, bool, ssl.SSLContext]\nTimeoutTypes = Union[\n Optional[float],\n Tuple[Optional[float], Optional[float], Optional[float], Optional[float]],\n \"Timeout\",\n]\nProxiesTypes = Union[URLTypes, \"Proxy\", Dict[URLTypes, Union[URLTypes, \"Proxy\"]]]\n\nAuthTypes = Union[\n Tuple[Union[str, bytes], Union[str, bytes]],\n Callable[[\"Request\"], \"Request\"],\n \"Auth\",\n]\n\nRequestData = Union[dict, str, bytes, Iterator[bytes], AsyncIterator[bytes]]\n\nFileContent = Union[IO[str], IO[bytes], str, bytes]\nFileTypes = Union[\n # file (or text)\n FileContent,\n # (filename, file (or text))\n Tuple[Optional[str], FileContent],\n # (filename, file (or text), content_type)\n Tuple[Optional[str], FileContent, Optional[str]],\n]\nRequestFiles = Union[Mapping[str, FileTypes], Sequence[Tuple[str, FileTypes]]]\n", "path": "httpx/_types.py"}]} | 1,091 | 112 |
gh_patches_debug_10160 | rasdani/github-patches | git_diff | comic__grand-challenge.org-581 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CKEditor Image upload makes the GUI undismissable
The browser keeps the changed fields state after the image is uploaded, and for some reason we're unable to dismiss the gui.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/config/urls.py`
Content:
```
1 from django.conf import settings
2 from django.conf.urls import include
3 from django.contrib import admin
4 from django.template.response import TemplateResponse
5 from django.urls import re_path, path
6 from django.views.generic import TemplateView, RedirectView
7
8 from grandchallenge.core.views import comicmain
9 from grandchallenge.pages.views import FaviconView
10
11 admin.autodiscover()
12
13
14 def handler500(request):
15 context = {"request": request}
16 template_name = "500.html"
17 return TemplateResponse(request, template_name, context, status=500)
18
19
20 urlpatterns = [
21 path("", comicmain, name="home"),
22 path(
23 "robots.txt/",
24 TemplateView.as_view(
25 template_name="robots.txt", content_type="text/plain"
26 ),
27 ),
28 # Favicons
29 path(
30 "favicon.ico/",
31 FaviconView.as_view(rel="shortcut icon"),
32 name="favicon",
33 ),
34 path(
35 "apple-touch-icon.png/",
36 FaviconView.as_view(rel="apple-touch-icon"),
37 name="apple-touch-icon",
38 ),
39 path(
40 "apple-touch-icon-precomposed.png/",
41 FaviconView.as_view(rel="apple-touch-icon-precomposed"),
42 name="apple-touch-icon-precomposed",
43 ),
44 path(
45 "apple-touch-icon-<int:size>x<int>.png/",
46 FaviconView.as_view(rel="apple-touch-icon"),
47 name="apple-touch-icon-sized",
48 ),
49 path(
50 "apple-touch-icon-<int:size>x<int>-precomposed.png/",
51 FaviconView.as_view(rel="apple-touch-icon-precomposed"),
52 name="apple-touch-icon-precomposed-sized",
53 ),
54 path(settings.ADMIN_URL, admin.site.urls),
55 path(
56 "site/<slug:challenge_short_name>/",
57 include("grandchallenge.core.urls"),
58 name="site",
59 ),
60 path(
61 "stats/",
62 include("grandchallenge.statistics.urls", namespace="statistics"),
63 ),
64 # Do not change the api namespace without updating the view names in
65 # all of the serializers
66 path("api/", include("grandchallenge.api.urls", namespace="api")),
67 # Used for logging in and managing grandchallenge.profiles. This is done on
68 # the framework level because it is too hard to get this all under each
69 # project
70 path("accounts/", include("grandchallenge.profiles.urls")),
71 path("socialauth/", include("social_django.urls", namespace="social")),
72 path(
73 "challenges/",
74 include("grandchallenge.challenges.urls", namespace="challenges"),
75 ),
76 re_path(
77 r"^(?i)all_challenges/$",
78 RedirectView.as_view(pattern_name="challenges:list", permanent=False),
79 ),
80 path("cases/", include("grandchallenge.cases.urls", namespace="cases")),
81 path(
82 "algorithms/",
83 include("grandchallenge.algorithms.urls", namespace="algorithms"),
84 ),
85 # ========== catch all ====================
86 # when all other urls have been checked, try to load page from main project
87 # keep this url at the bottom of this list, because urls are checked in
88 # order
89 path("<slug:page_title>/", comicmain, name="mainproject-home"),
90 path(
91 "media/", include("grandchallenge.serving.urls", namespace="serving")
92 ),
93 ]
94 if settings.DEBUG and settings.ENABLE_DEBUG_TOOLBAR:
95 import debug_toolbar
96
97 urlpatterns = [
98 path("__debug__/", include(debug_toolbar.urls))
99 ] + urlpatterns
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/config/urls.py b/app/config/urls.py
--- a/app/config/urls.py
+++ b/app/config/urls.py
@@ -73,8 +73,12 @@
"challenges/",
include("grandchallenge.challenges.urls", namespace="challenges"),
),
- re_path(
- r"^(?i)all_challenges/$",
+ path(
+ "all_challenges/",
+ RedirectView.as_view(pattern_name="challenges:list", permanent=False),
+ ),
+ path(
+ "All_Challenges/",
RedirectView.as_view(pattern_name="challenges:list", permanent=False),
),
path("cases/", include("grandchallenge.cases.urls", namespace="cases")),
| {"golden_diff": "diff --git a/app/config/urls.py b/app/config/urls.py\n--- a/app/config/urls.py\n+++ b/app/config/urls.py\n@@ -73,8 +73,12 @@\n \"challenges/\",\n include(\"grandchallenge.challenges.urls\", namespace=\"challenges\"),\n ),\n- re_path(\n- r\"^(?i)all_challenges/$\",\n+ path(\n+ \"all_challenges/\",\n+ RedirectView.as_view(pattern_name=\"challenges:list\", permanent=False),\n+ ),\n+ path(\n+ \"All_Challenges/\",\n RedirectView.as_view(pattern_name=\"challenges:list\", permanent=False),\n ),\n path(\"cases/\", include(\"grandchallenge.cases.urls\", namespace=\"cases\")),\n", "issue": "CKEditor Image upload makes the GUI undismissable\nThe browser keeps the changed fields state after the image is uploaded, and for some reason we're unable to dismiss the gui.\n", "before_files": [{"content": "from django.conf import settings\nfrom django.conf.urls import include\nfrom django.contrib import admin\nfrom django.template.response import TemplateResponse\nfrom django.urls import re_path, path\nfrom django.views.generic import TemplateView, RedirectView\n\nfrom grandchallenge.core.views import comicmain\nfrom grandchallenge.pages.views import FaviconView\n\nadmin.autodiscover()\n\n\ndef handler500(request):\n context = {\"request\": request}\n template_name = \"500.html\"\n return TemplateResponse(request, template_name, context, status=500)\n\n\nurlpatterns = [\n path(\"\", comicmain, name=\"home\"),\n path(\n \"robots.txt/\",\n TemplateView.as_view(\n template_name=\"robots.txt\", content_type=\"text/plain\"\n ),\n ),\n # Favicons\n path(\n \"favicon.ico/\",\n FaviconView.as_view(rel=\"shortcut icon\"),\n name=\"favicon\",\n ),\n path(\n \"apple-touch-icon.png/\",\n FaviconView.as_view(rel=\"apple-touch-icon\"),\n name=\"apple-touch-icon\",\n ),\n path(\n \"apple-touch-icon-precomposed.png/\",\n FaviconView.as_view(rel=\"apple-touch-icon-precomposed\"),\n name=\"apple-touch-icon-precomposed\",\n ),\n path(\n \"apple-touch-icon-<int:size>x<int>.png/\",\n FaviconView.as_view(rel=\"apple-touch-icon\"),\n name=\"apple-touch-icon-sized\",\n ),\n path(\n \"apple-touch-icon-<int:size>x<int>-precomposed.png/\",\n FaviconView.as_view(rel=\"apple-touch-icon-precomposed\"),\n name=\"apple-touch-icon-precomposed-sized\",\n ),\n path(settings.ADMIN_URL, admin.site.urls),\n path(\n \"site/<slug:challenge_short_name>/\",\n include(\"grandchallenge.core.urls\"),\n name=\"site\",\n ),\n path(\n \"stats/\",\n include(\"grandchallenge.statistics.urls\", namespace=\"statistics\"),\n ),\n # Do not change the api namespace without updating the view names in\n # all of the serializers\n path(\"api/\", include(\"grandchallenge.api.urls\", namespace=\"api\")),\n # Used for logging in and managing grandchallenge.profiles. This is done on\n # the framework level because it is too hard to get this all under each\n # project\n path(\"accounts/\", include(\"grandchallenge.profiles.urls\")),\n path(\"socialauth/\", include(\"social_django.urls\", namespace=\"social\")),\n path(\n \"challenges/\",\n include(\"grandchallenge.challenges.urls\", namespace=\"challenges\"),\n ),\n re_path(\n r\"^(?i)all_challenges/$\",\n RedirectView.as_view(pattern_name=\"challenges:list\", permanent=False),\n ),\n path(\"cases/\", include(\"grandchallenge.cases.urls\", namespace=\"cases\")),\n path(\n \"algorithms/\",\n include(\"grandchallenge.algorithms.urls\", namespace=\"algorithms\"),\n ),\n # ========== catch all ====================\n # when all other urls have been checked, try to load page from main project\n # keep this url at the bottom of this list, because urls are checked in\n # order\n path(\"<slug:page_title>/\", comicmain, name=\"mainproject-home\"),\n path(\n \"media/\", include(\"grandchallenge.serving.urls\", namespace=\"serving\")\n ),\n]\nif settings.DEBUG and settings.ENABLE_DEBUG_TOOLBAR:\n import debug_toolbar\n\n urlpatterns = [\n path(\"__debug__/\", include(debug_toolbar.urls))\n ] + urlpatterns\n", "path": "app/config/urls.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.conf.urls import include\nfrom django.contrib import admin\nfrom django.template.response import TemplateResponse\nfrom django.urls import re_path, path\nfrom django.views.generic import TemplateView, RedirectView\n\nfrom grandchallenge.core.views import comicmain\nfrom grandchallenge.pages.views import FaviconView\n\nadmin.autodiscover()\n\n\ndef handler500(request):\n context = {\"request\": request}\n template_name = \"500.html\"\n return TemplateResponse(request, template_name, context, status=500)\n\n\nurlpatterns = [\n path(\"\", comicmain, name=\"home\"),\n path(\n \"robots.txt/\",\n TemplateView.as_view(\n template_name=\"robots.txt\", content_type=\"text/plain\"\n ),\n ),\n # Favicons\n path(\n \"favicon.ico/\",\n FaviconView.as_view(rel=\"shortcut icon\"),\n name=\"favicon\",\n ),\n path(\n \"apple-touch-icon.png/\",\n FaviconView.as_view(rel=\"apple-touch-icon\"),\n name=\"apple-touch-icon\",\n ),\n path(\n \"apple-touch-icon-precomposed.png/\",\n FaviconView.as_view(rel=\"apple-touch-icon-precomposed\"),\n name=\"apple-touch-icon-precomposed\",\n ),\n path(\n \"apple-touch-icon-<int:size>x<int>.png/\",\n FaviconView.as_view(rel=\"apple-touch-icon\"),\n name=\"apple-touch-icon-sized\",\n ),\n path(\n \"apple-touch-icon-<int:size>x<int>-precomposed.png/\",\n FaviconView.as_view(rel=\"apple-touch-icon-precomposed\"),\n name=\"apple-touch-icon-precomposed-sized\",\n ),\n path(settings.ADMIN_URL, admin.site.urls),\n path(\n \"site/<slug:challenge_short_name>/\",\n include(\"grandchallenge.core.urls\"),\n name=\"site\",\n ),\n path(\n \"stats/\",\n include(\"grandchallenge.statistics.urls\", namespace=\"statistics\"),\n ),\n # Do not change the api namespace without updating the view names in\n # all of the serializers\n path(\"api/\", include(\"grandchallenge.api.urls\", namespace=\"api\")),\n # Used for logging in and managing grandchallenge.profiles. This is done on\n # the framework level because it is too hard to get this all under each\n # project\n path(\"accounts/\", include(\"grandchallenge.profiles.urls\")),\n path(\"socialauth/\", include(\"social_django.urls\", namespace=\"social\")),\n path(\n \"challenges/\",\n include(\"grandchallenge.challenges.urls\", namespace=\"challenges\"),\n ),\n path(\n \"all_challenges/\",\n RedirectView.as_view(pattern_name=\"challenges:list\", permanent=False),\n ),\n path(\n \"All_Challenges/\",\n RedirectView.as_view(pattern_name=\"challenges:list\", permanent=False),\n ),\n path(\"cases/\", include(\"grandchallenge.cases.urls\", namespace=\"cases\")),\n path(\n \"algorithms/\",\n include(\"grandchallenge.algorithms.urls\", namespace=\"algorithms\"),\n ),\n # ========== catch all ====================\n # when all other urls have been checked, try to load page from main project\n # keep this url at the bottom of this list, because urls are checked in\n # order\n path(\"<slug:page_title>/\", comicmain, name=\"mainproject-home\"),\n path(\n \"media/\", include(\"grandchallenge.serving.urls\", namespace=\"serving\")\n ),\n]\nif settings.DEBUG and settings.ENABLE_DEBUG_TOOLBAR:\n import debug_toolbar\n\n urlpatterns = [\n path(\"__debug__/\", include(debug_toolbar.urls))\n ] + urlpatterns\n", "path": "app/config/urls.py"}]} | 1,224 | 159 |
gh_patches_debug_3171 | rasdani/github-patches | git_diff | svthalia__concrexit-1743 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot enter float number as contribution during benefactor renewal
### Describe the bug
Cannot enter float number as contribution during benefactor renewal
### How to reproduce
Steps to reproduce the behaviour:
1. Go to http://localhost:8000/user/membership/
2. Add a benefactor membership renewal
3. The form will not error when trying to send
### Expected behaviour
Can enter float numbers as contribution. Like 7.69.
### Screenshots
<img width="691" alt="Screenshot 2021-06-20 at 18 48 35" src="https://user-images.githubusercontent.com/1799914/122682192-4edaf880-d1f8-11eb-99e2-26eaf5379ae8.png">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/registrations/forms.py`
Content:
```
1 """The forms defined by the registrations package."""
2 from django import forms
3 from django.core.exceptions import NON_FIELD_ERRORS, ValidationError
4 from django.forms import TypedChoiceField
5 from django.urls import reverse_lazy
6 from django.utils import timezone
7 from django.utils.safestring import mark_safe
8 from django.utils.text import capfirst
9 from django.utils.translation import gettext_lazy as _
10
11 from members.models import Membership
12 from payments.widgets import SignatureWidget
13 from registrations import services
14 from .models import Registration, Renewal, Reference
15
16
17 class BaseRegistrationForm(forms.ModelForm):
18 """Base form for membership registrations."""
19
20 birthday = forms.DateField(
21 widget=forms.widgets.SelectDateWidget(
22 years=range(timezone.now().year - 50, timezone.now().year - 10)
23 ),
24 label=capfirst(_("birthday")),
25 )
26
27 privacy_policy = forms.BooleanField(required=True,)
28
29 def __init__(self, *args, **kwargs):
30 super().__init__(*args, **kwargs)
31 self.fields["privacy_policy"].label = mark_safe(
32 _('I accept the <a href="{}">privacy policy</a>.').format(
33 reverse_lazy("singlepages:privacy-policy")
34 )
35 )
36
37
38 class RegistrationAdminForm(forms.ModelForm):
39 """Custom admin form for Registration model to add the widget for the signature."""
40
41 class Meta:
42 fields = "__all__"
43 model = Registration
44 widgets = {
45 "signature": SignatureWidget(),
46 }
47
48
49 class MemberRegistrationForm(BaseRegistrationForm):
50 """Form for member registrations."""
51
52 this_year = timezone.now().year
53 years = reversed(
54 [(x, "{} - {}".format(x, x + 1)) for x in range(this_year - 20, this_year + 1)]
55 )
56
57 starting_year = TypedChoiceField(
58 choices=years,
59 coerce=int,
60 empty_value=this_year,
61 required=False,
62 help_text=_("What lecture year did you start studying at Radboud University?"),
63 )
64
65 class Meta:
66 model = Registration
67 widgets = {
68 "signature": SignatureWidget(),
69 }
70 fields = (
71 "length",
72 "first_name",
73 "last_name",
74 "birthday",
75 "email",
76 "phone_number",
77 "student_number",
78 "programme",
79 "starting_year",
80 "address_street",
81 "address_street2",
82 "address_postal_code",
83 "address_city",
84 "address_country",
85 "optin_birthday",
86 "optin_mailinglist",
87 "membership_type",
88 "direct_debit",
89 "initials",
90 "iban",
91 "bic",
92 "signature",
93 )
94
95
96 class BenefactorRegistrationForm(BaseRegistrationForm):
97 """Form for benefactor registrations."""
98
99 icis_employee = forms.BooleanField(
100 required=False, label=_("I am an employee of iCIS")
101 )
102
103 class Meta:
104 model = Registration
105 widgets = {
106 "signature": SignatureWidget(),
107 }
108 fields = (
109 "length",
110 "first_name",
111 "last_name",
112 "birthday",
113 "email",
114 "phone_number",
115 "student_number",
116 "address_street",
117 "address_street2",
118 "address_postal_code",
119 "address_city",
120 "address_country",
121 "optin_birthday",
122 "optin_mailinglist",
123 "contribution",
124 "membership_type",
125 "direct_debit",
126 "initials",
127 "iban",
128 "bic",
129 "signature",
130 )
131
132
133 class RenewalForm(forms.ModelForm):
134 """Form for membership renewals."""
135
136 privacy_policy = forms.BooleanField(required=True,)
137
138 icis_employee = forms.BooleanField(
139 required=False, label=_("I am an employee of iCIS")
140 )
141
142 contribution = forms.IntegerField(required=False,)
143
144 def __init__(self, *args, **kwargs):
145 super().__init__(*args, **kwargs)
146 self.fields["privacy_policy"].label = mark_safe(
147 _('I accept the <a href="{}">privacy policy</a>.').format(
148 reverse_lazy("singlepages:privacy-policy")
149 )
150 )
151
152 class Meta:
153 model = Renewal
154 fields = (
155 "member",
156 "length",
157 "contribution",
158 "membership_type",
159 "no_references",
160 "remarks",
161 )
162
163
164 class ReferenceForm(forms.ModelForm):
165 def clean(self):
166 super().clean()
167 membership = self.cleaned_data["member"].current_membership
168 if membership and membership.type == Membership.BENEFACTOR:
169 raise ValidationError(_("Benefactors cannot give references."))
170
171 membership = self.cleaned_data["member"].latest_membership
172 if (
173 membership
174 and membership.until
175 and membership.until < services.calculate_membership_since()
176 ):
177 raise ValidationError(
178 _(
179 "It's not possible to give references for "
180 "memberships that start after your own "
181 "membership's end."
182 )
183 )
184
185 class Meta:
186 model = Reference
187 fields = "__all__"
188 error_messages = {
189 NON_FIELD_ERRORS: {
190 "unique_together": _(
191 "You've already given a reference for this person."
192 ),
193 }
194 }
195
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/registrations/forms.py b/website/registrations/forms.py
--- a/website/registrations/forms.py
+++ b/website/registrations/forms.py
@@ -139,7 +139,7 @@
required=False, label=_("I am an employee of iCIS")
)
- contribution = forms.IntegerField(required=False,)
+ contribution = forms.DecimalField(required=False, max_digits=5, decimal_places=2,)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
| {"golden_diff": "diff --git a/website/registrations/forms.py b/website/registrations/forms.py\n--- a/website/registrations/forms.py\n+++ b/website/registrations/forms.py\n@@ -139,7 +139,7 @@\n required=False, label=_(\"I am an employee of iCIS\")\n )\n \n- contribution = forms.IntegerField(required=False,)\n+ contribution = forms.DecimalField(required=False, max_digits=5, decimal_places=2,)\n \n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n", "issue": "Cannot enter float number as contribution during benefactor renewal\n### Describe the bug\r\nCannot enter float number as contribution during benefactor renewal\r\n\r\n### How to reproduce\r\nSteps to reproduce the behaviour:\r\n1. Go to http://localhost:8000/user/membership/\r\n2. Add a benefactor membership renewal \r\n3. The form will not error when trying to send\r\n\r\n### Expected behaviour\r\nCan enter float numbers as contribution. Like 7.69.\r\n\r\n### Screenshots\r\n<img width=\"691\" alt=\"Screenshot 2021-06-20 at 18 48 35\" src=\"https://user-images.githubusercontent.com/1799914/122682192-4edaf880-d1f8-11eb-99e2-26eaf5379ae8.png\">\r\n\r\n\n", "before_files": [{"content": "\"\"\"The forms defined by the registrations package.\"\"\"\nfrom django import forms\nfrom django.core.exceptions import NON_FIELD_ERRORS, ValidationError\nfrom django.forms import TypedChoiceField\nfrom django.urls import reverse_lazy\nfrom django.utils import timezone\nfrom django.utils.safestring import mark_safe\nfrom django.utils.text import capfirst\nfrom django.utils.translation import gettext_lazy as _\n\nfrom members.models import Membership\nfrom payments.widgets import SignatureWidget\nfrom registrations import services\nfrom .models import Registration, Renewal, Reference\n\n\nclass BaseRegistrationForm(forms.ModelForm):\n \"\"\"Base form for membership registrations.\"\"\"\n\n birthday = forms.DateField(\n widget=forms.widgets.SelectDateWidget(\n years=range(timezone.now().year - 50, timezone.now().year - 10)\n ),\n label=capfirst(_(\"birthday\")),\n )\n\n privacy_policy = forms.BooleanField(required=True,)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields[\"privacy_policy\"].label = mark_safe(\n _('I accept the <a href=\"{}\">privacy policy</a>.').format(\n reverse_lazy(\"singlepages:privacy-policy\")\n )\n )\n\n\nclass RegistrationAdminForm(forms.ModelForm):\n \"\"\"Custom admin form for Registration model to add the widget for the signature.\"\"\"\n\n class Meta:\n fields = \"__all__\"\n model = Registration\n widgets = {\n \"signature\": SignatureWidget(),\n }\n\n\nclass MemberRegistrationForm(BaseRegistrationForm):\n \"\"\"Form for member registrations.\"\"\"\n\n this_year = timezone.now().year\n years = reversed(\n [(x, \"{} - {}\".format(x, x + 1)) for x in range(this_year - 20, this_year + 1)]\n )\n\n starting_year = TypedChoiceField(\n choices=years,\n coerce=int,\n empty_value=this_year,\n required=False,\n help_text=_(\"What lecture year did you start studying at Radboud University?\"),\n )\n\n class Meta:\n model = Registration\n widgets = {\n \"signature\": SignatureWidget(),\n }\n fields = (\n \"length\",\n \"first_name\",\n \"last_name\",\n \"birthday\",\n \"email\",\n \"phone_number\",\n \"student_number\",\n \"programme\",\n \"starting_year\",\n \"address_street\",\n \"address_street2\",\n \"address_postal_code\",\n \"address_city\",\n \"address_country\",\n \"optin_birthday\",\n \"optin_mailinglist\",\n \"membership_type\",\n \"direct_debit\",\n \"initials\",\n \"iban\",\n \"bic\",\n \"signature\",\n )\n\n\nclass BenefactorRegistrationForm(BaseRegistrationForm):\n \"\"\"Form for benefactor registrations.\"\"\"\n\n icis_employee = forms.BooleanField(\n required=False, label=_(\"I am an employee of iCIS\")\n )\n\n class Meta:\n model = Registration\n widgets = {\n \"signature\": SignatureWidget(),\n }\n fields = (\n \"length\",\n \"first_name\",\n \"last_name\",\n \"birthday\",\n \"email\",\n \"phone_number\",\n \"student_number\",\n \"address_street\",\n \"address_street2\",\n \"address_postal_code\",\n \"address_city\",\n \"address_country\",\n \"optin_birthday\",\n \"optin_mailinglist\",\n \"contribution\",\n \"membership_type\",\n \"direct_debit\",\n \"initials\",\n \"iban\",\n \"bic\",\n \"signature\",\n )\n\n\nclass RenewalForm(forms.ModelForm):\n \"\"\"Form for membership renewals.\"\"\"\n\n privacy_policy = forms.BooleanField(required=True,)\n\n icis_employee = forms.BooleanField(\n required=False, label=_(\"I am an employee of iCIS\")\n )\n\n contribution = forms.IntegerField(required=False,)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields[\"privacy_policy\"].label = mark_safe(\n _('I accept the <a href=\"{}\">privacy policy</a>.').format(\n reverse_lazy(\"singlepages:privacy-policy\")\n )\n )\n\n class Meta:\n model = Renewal\n fields = (\n \"member\",\n \"length\",\n \"contribution\",\n \"membership_type\",\n \"no_references\",\n \"remarks\",\n )\n\n\nclass ReferenceForm(forms.ModelForm):\n def clean(self):\n super().clean()\n membership = self.cleaned_data[\"member\"].current_membership\n if membership and membership.type == Membership.BENEFACTOR:\n raise ValidationError(_(\"Benefactors cannot give references.\"))\n\n membership = self.cleaned_data[\"member\"].latest_membership\n if (\n membership\n and membership.until\n and membership.until < services.calculate_membership_since()\n ):\n raise ValidationError(\n _(\n \"It's not possible to give references for \"\n \"memberships that start after your own \"\n \"membership's end.\"\n )\n )\n\n class Meta:\n model = Reference\n fields = \"__all__\"\n error_messages = {\n NON_FIELD_ERRORS: {\n \"unique_together\": _(\n \"You've already given a reference for this person.\"\n ),\n }\n }\n", "path": "website/registrations/forms.py"}], "after_files": [{"content": "\"\"\"The forms defined by the registrations package.\"\"\"\nfrom django import forms\nfrom django.core.exceptions import NON_FIELD_ERRORS, ValidationError\nfrom django.forms import TypedChoiceField\nfrom django.urls import reverse_lazy\nfrom django.utils import timezone\nfrom django.utils.safestring import mark_safe\nfrom django.utils.text import capfirst\nfrom django.utils.translation import gettext_lazy as _\n\nfrom members.models import Membership\nfrom payments.widgets import SignatureWidget\nfrom registrations import services\nfrom .models import Registration, Renewal, Reference\n\n\nclass BaseRegistrationForm(forms.ModelForm):\n \"\"\"Base form for membership registrations.\"\"\"\n\n birthday = forms.DateField(\n widget=forms.widgets.SelectDateWidget(\n years=range(timezone.now().year - 50, timezone.now().year - 10)\n ),\n label=capfirst(_(\"birthday\")),\n )\n\n privacy_policy = forms.BooleanField(required=True,)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields[\"privacy_policy\"].label = mark_safe(\n _('I accept the <a href=\"{}\">privacy policy</a>.').format(\n reverse_lazy(\"singlepages:privacy-policy\")\n )\n )\n\n\nclass RegistrationAdminForm(forms.ModelForm):\n \"\"\"Custom admin form for Registration model to add the widget for the signature.\"\"\"\n\n class Meta:\n fields = \"__all__\"\n model = Registration\n widgets = {\n \"signature\": SignatureWidget(),\n }\n\n\nclass MemberRegistrationForm(BaseRegistrationForm):\n \"\"\"Form for member registrations.\"\"\"\n\n this_year = timezone.now().year\n years = reversed(\n [(x, \"{} - {}\".format(x, x + 1)) for x in range(this_year - 20, this_year + 1)]\n )\n\n starting_year = TypedChoiceField(\n choices=years,\n coerce=int,\n empty_value=this_year,\n required=False,\n help_text=_(\"What lecture year did you start studying at Radboud University?\"),\n )\n\n class Meta:\n model = Registration\n widgets = {\n \"signature\": SignatureWidget(),\n }\n fields = (\n \"length\",\n \"first_name\",\n \"last_name\",\n \"birthday\",\n \"email\",\n \"phone_number\",\n \"student_number\",\n \"programme\",\n \"starting_year\",\n \"address_street\",\n \"address_street2\",\n \"address_postal_code\",\n \"address_city\",\n \"address_country\",\n \"optin_birthday\",\n \"optin_mailinglist\",\n \"membership_type\",\n \"direct_debit\",\n \"initials\",\n \"iban\",\n \"bic\",\n \"signature\",\n )\n\n\nclass BenefactorRegistrationForm(BaseRegistrationForm):\n \"\"\"Form for benefactor registrations.\"\"\"\n\n icis_employee = forms.BooleanField(\n required=False, label=_(\"I am an employee of iCIS\")\n )\n\n class Meta:\n model = Registration\n widgets = {\n \"signature\": SignatureWidget(),\n }\n fields = (\n \"length\",\n \"first_name\",\n \"last_name\",\n \"birthday\",\n \"email\",\n \"phone_number\",\n \"student_number\",\n \"address_street\",\n \"address_street2\",\n \"address_postal_code\",\n \"address_city\",\n \"address_country\",\n \"optin_birthday\",\n \"optin_mailinglist\",\n \"contribution\",\n \"membership_type\",\n \"direct_debit\",\n \"initials\",\n \"iban\",\n \"bic\",\n \"signature\",\n )\n\n\nclass RenewalForm(forms.ModelForm):\n \"\"\"Form for membership renewals.\"\"\"\n\n privacy_policy = forms.BooleanField(required=True,)\n\n icis_employee = forms.BooleanField(\n required=False, label=_(\"I am an employee of iCIS\")\n )\n\n contribution = forms.DecimalField(required=False, max_digits=5, decimal_places=2,)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields[\"privacy_policy\"].label = mark_safe(\n _('I accept the <a href=\"{}\">privacy policy</a>.').format(\n reverse_lazy(\"singlepages:privacy-policy\")\n )\n )\n\n class Meta:\n model = Renewal\n fields = (\n \"member\",\n \"length\",\n \"contribution\",\n \"membership_type\",\n \"no_references\",\n \"remarks\",\n )\n\n\nclass ReferenceForm(forms.ModelForm):\n def clean(self):\n super().clean()\n membership = self.cleaned_data[\"member\"].current_membership\n if membership and membership.type == Membership.BENEFACTOR:\n raise ValidationError(_(\"Benefactors cannot give references.\"))\n\n membership = self.cleaned_data[\"member\"].latest_membership\n if (\n membership\n and membership.until\n and membership.until < services.calculate_membership_since()\n ):\n raise ValidationError(\n _(\n \"It's not possible to give references for \"\n \"memberships that start after your own \"\n \"membership's end.\"\n )\n )\n\n class Meta:\n model = Reference\n fields = \"__all__\"\n error_messages = {\n NON_FIELD_ERRORS: {\n \"unique_together\": _(\n \"You've already given a reference for this person.\"\n ),\n }\n }\n", "path": "website/registrations/forms.py"}]} | 2,027 | 124 |
gh_patches_debug_57471 | rasdani/github-patches | git_diff | d2l-ai__d2l-en-2279 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ModuleNotFoundError when running the official pytorch colab notebook

I can replicate the error at multiple official pytorch colab notebooks, e.g.
https://colab.research.google.com/github/d2l-ai/d2l-pytorch-colab/blob/master/chapter_linear-classification/image-classification-dataset.ipynb#scrollTo=ee445cce
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup, find_packages
2 import d2l
3
4 requirements = [
5 'ipython>=7.23',
6 'jupyter',
7 'numpy',
8 'matplotlib',
9 'requests',
10 'pandas',
11 'gym'
12 ]
13
14 setup(
15 name='d2l',
16 version=d2l.__version__,
17 python_requires='>=3.5',
18 author='D2L Developers',
19 author_email='[email protected]',
20 url='https://d2l.ai',
21 description='Dive into Deep Learning',
22 license='MIT-0',
23 packages=find_packages(),
24 zip_safe=True,
25 install_requires=requirements,
26 )
27
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -2,10 +2,10 @@
import d2l
requirements = [
- 'ipython>=7.23',
'jupyter',
'numpy',
'matplotlib',
+ 'matplotlib-inline',
'requests',
'pandas',
'gym'
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -2,10 +2,10 @@\n import d2l\n \n requirements = [\n- 'ipython>=7.23',\n 'jupyter',\n 'numpy',\n 'matplotlib',\n+ 'matplotlib-inline',\n 'requests',\n 'pandas',\n 'gym'\n", "issue": "ModuleNotFoundError when running the official pytorch colab notebook\n\r\n\r\nI can replicate the error at multiple official pytorch colab notebooks, e.g. \r\n\r\nhttps://colab.research.google.com/github/d2l-ai/d2l-pytorch-colab/blob/master/chapter_linear-classification/image-classification-dataset.ipynb#scrollTo=ee445cce\r\n\r\n\r\n\n", "before_files": [{"content": "from setuptools import setup, find_packages\nimport d2l\n\nrequirements = [\n 'ipython>=7.23',\n 'jupyter',\n 'numpy',\n 'matplotlib',\n 'requests',\n 'pandas',\n 'gym'\n]\n\nsetup(\n name='d2l',\n version=d2l.__version__,\n python_requires='>=3.5',\n author='D2L Developers',\n author_email='[email protected]',\n url='https://d2l.ai',\n description='Dive into Deep Learning',\n license='MIT-0',\n packages=find_packages(),\n zip_safe=True,\n install_requires=requirements,\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\nimport d2l\n\nrequirements = [\n 'jupyter',\n 'numpy',\n 'matplotlib',\n 'matplotlib-inline',\n 'requests',\n 'pandas',\n 'gym'\n]\n\nsetup(\n name='d2l',\n version=d2l.__version__,\n python_requires='>=3.5',\n author='D2L Developers',\n author_email='[email protected]',\n url='https://d2l.ai',\n description='Dive into Deep Learning',\n license='MIT-0',\n packages=find_packages(),\n zip_safe=True,\n install_requires=requirements,\n)\n", "path": "setup.py"}]} | 591 | 84 |
gh_patches_debug_892 | rasdani/github-patches | git_diff | rasterio__rasterio-437 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Check for "ndarray-like" instead of ndarray in _warp; other places
I want to use `rasterio.warp.reproject` on an `xray.Dataset` with `xray.Dataset.apply` (http://xray.readthedocs.org/en/stable/). xray has a feature to turn the dataset into a `np.ndarray`, but that means losing all my metadata.
At https://github.com/mapbox/rasterio/blob/master/rasterio/_warp.pyx#L249, _warp checks that the source is an `np.ndarray` (whereas the source in my case is an `xray.DataArray` - satisfying the same interfaces as `np.ndarray`), so I get an invalid source error.
It could be a good idea to check for something like
```
def is_ndarray_like(source):
return hasattr(source, '__array__')
```
instead of
```
isinstance(source, np.ndarray)
```
so other numpy-like arrays can be used.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rasterio/dtypes.py`
Content:
```
1 # Mapping of GDAL to Numpy data types.
2 #
3 # Since 0.13 we are not importing numpy here and data types are strings.
4 # Happily strings can be used throughout Numpy and so existing code will
5 # break.
6 #
7 # Within Rasterio, to test data types, we use Numpy's dtype() factory to
8 # do something like this:
9 #
10 # if np.dtype(destination.dtype) == np.dtype(rasterio.uint8): ...
11 #
12
13 bool_ = 'bool'
14 ubyte = uint8 = 'uint8'
15 uint16 = 'uint16'
16 int16 = 'int16'
17 uint32 = 'uint32'
18 int32 = 'int32'
19 float32 = 'float32'
20 float64 = 'float64'
21 complex_ = 'complex'
22 complex64 = 'complex64'
23 complex128 = 'complex128'
24
25 # Not supported:
26 # GDT_CInt16 = 8, GDT_CInt32 = 9, GDT_CFloat32 = 10, GDT_CFloat64 = 11
27
28 dtype_fwd = {
29 0: None, # GDT_Unknown
30 1: ubyte, # GDT_Byte
31 2: uint16, # GDT_UInt16
32 3: int16, # GDT_Int16
33 4: uint32, # GDT_UInt32
34 5: int32, # GDT_Int32
35 6: float32, # GDT_Float32
36 7: float64, # GDT_Float64
37 8: complex_, # GDT_CInt16
38 9: complex_, # GDT_CInt32
39 10: complex64, # GDT_CFloat32
40 11: complex128 } # GDT_CFloat64
41
42 dtype_rev = dict((v, k) for k, v in dtype_fwd.items())
43 dtype_rev['uint8'] = 1
44
45 typename_fwd = {
46 0: 'Unknown',
47 1: 'Byte',
48 2: 'UInt16',
49 3: 'Int16',
50 4: 'UInt32',
51 5: 'Int32',
52 6: 'Float32',
53 7: 'Float64',
54 8: 'CInt16',
55 9: 'CInt32',
56 10: 'CFloat32',
57 11: 'CFloat64' }
58
59 typename_rev = dict((v, k) for k, v in typename_fwd.items())
60
61 def _gdal_typename(dt):
62 try:
63 return typename_fwd[dtype_rev[dt]]
64 except KeyError:
65 return typename_fwd[dtype_rev[dt().dtype.name]]
66
67 def check_dtype(dt):
68 if dt not in dtype_rev:
69 try:
70 return dt().dtype.name in dtype_rev
71 except:
72 return False
73 return True
74
75
76 def get_minimum_int_dtype(values):
77 """
78 Uses range checking to determine the minimum integer data type required
79 to represent values.
80
81 :param values: numpy array
82 :return: named data type that can be later used to create a numpy dtype
83 """
84
85 min_value = values.min()
86 max_value = values.max()
87
88 if min_value >= 0:
89 if max_value <= 255:
90 return uint8
91 elif max_value <= 65535:
92 return uint16
93 elif max_value <= 4294967295:
94 return uint32
95 elif min_value >= -32768 and max_value <= 32767:
96 return int16
97 elif min_value >= -2147483648 and max_value <= 2147483647:
98 return int32
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rasterio/dtypes.py b/rasterio/dtypes.py
--- a/rasterio/dtypes.py
+++ b/rasterio/dtypes.py
@@ -96,3 +96,9 @@
return int16
elif min_value >= -2147483648 and max_value <= 2147483647:
return int32
+
+
+def is_ndarray(array):
+ import numpy
+
+ return isinstance(array, numpy.ndarray) or hasattr(array, '__array__')
| {"golden_diff": "diff --git a/rasterio/dtypes.py b/rasterio/dtypes.py\n--- a/rasterio/dtypes.py\n+++ b/rasterio/dtypes.py\n@@ -96,3 +96,9 @@\n return int16\n elif min_value >= -2147483648 and max_value <= 2147483647:\n return int32\n+\n+\n+def is_ndarray(array):\n+ import numpy\n+\n+ return isinstance(array, numpy.ndarray) or hasattr(array, '__array__')\n", "issue": "Check for \"ndarray-like\" instead of ndarray in _warp; other places\nI want to use `rasterio.warp.reproject` on an `xray.Dataset` with `xray.Dataset.apply` (http://xray.readthedocs.org/en/stable/). xray has a feature to turn the dataset into a `np.ndarray`, but that means losing all my metadata.\n\nAt https://github.com/mapbox/rasterio/blob/master/rasterio/_warp.pyx#L249, _warp checks that the source is an `np.ndarray` (whereas the source in my case is an `xray.DataArray` - satisfying the same interfaces as `np.ndarray`), so I get an invalid source error.\n\nIt could be a good idea to check for something like\n\n```\ndef is_ndarray_like(source):\n return hasattr(source, '__array__')\n```\n\ninstead of\n\n```\nisinstance(source, np.ndarray)\n```\n\nso other numpy-like arrays can be used.\n\n", "before_files": [{"content": "# Mapping of GDAL to Numpy data types.\n#\n# Since 0.13 we are not importing numpy here and data types are strings.\n# Happily strings can be used throughout Numpy and so existing code will\n# break.\n#\n# Within Rasterio, to test data types, we use Numpy's dtype() factory to \n# do something like this:\n#\n# if np.dtype(destination.dtype) == np.dtype(rasterio.uint8): ...\n#\n\nbool_ = 'bool'\nubyte = uint8 = 'uint8'\nuint16 = 'uint16'\nint16 = 'int16'\nuint32 = 'uint32'\nint32 = 'int32'\nfloat32 = 'float32'\nfloat64 = 'float64'\ncomplex_ = 'complex'\ncomplex64 = 'complex64'\ncomplex128 = 'complex128'\n\n# Not supported:\n# GDT_CInt16 = 8, GDT_CInt32 = 9, GDT_CFloat32 = 10, GDT_CFloat64 = 11\n\ndtype_fwd = {\n 0: None, # GDT_Unknown\n 1: ubyte, # GDT_Byte\n 2: uint16, # GDT_UInt16\n 3: int16, # GDT_Int16\n 4: uint32, # GDT_UInt32\n 5: int32, # GDT_Int32\n 6: float32, # GDT_Float32\n 7: float64, # GDT_Float64\n 8: complex_, # GDT_CInt16\n 9: complex_, # GDT_CInt32\n 10: complex64, # GDT_CFloat32\n 11: complex128 } # GDT_CFloat64\n\ndtype_rev = dict((v, k) for k, v in dtype_fwd.items())\ndtype_rev['uint8'] = 1\n\ntypename_fwd = {\n 0: 'Unknown',\n 1: 'Byte',\n 2: 'UInt16',\n 3: 'Int16',\n 4: 'UInt32',\n 5: 'Int32',\n 6: 'Float32',\n 7: 'Float64',\n 8: 'CInt16',\n 9: 'CInt32',\n 10: 'CFloat32',\n 11: 'CFloat64' }\n\ntypename_rev = dict((v, k) for k, v in typename_fwd.items())\n\ndef _gdal_typename(dt):\n try:\n return typename_fwd[dtype_rev[dt]]\n except KeyError:\n return typename_fwd[dtype_rev[dt().dtype.name]]\n\ndef check_dtype(dt):\n if dt not in dtype_rev:\n try:\n return dt().dtype.name in dtype_rev\n except:\n return False\n return True\n\n\ndef get_minimum_int_dtype(values):\n \"\"\"\n Uses range checking to determine the minimum integer data type required\n to represent values.\n\n :param values: numpy array\n :return: named data type that can be later used to create a numpy dtype\n \"\"\"\n\n min_value = values.min()\n max_value = values.max()\n \n if min_value >= 0:\n if max_value <= 255:\n return uint8\n elif max_value <= 65535:\n return uint16\n elif max_value <= 4294967295:\n return uint32\n elif min_value >= -32768 and max_value <= 32767:\n return int16\n elif min_value >= -2147483648 and max_value <= 2147483647:\n return int32\n", "path": "rasterio/dtypes.py"}], "after_files": [{"content": "# Mapping of GDAL to Numpy data types.\n#\n# Since 0.13 we are not importing numpy here and data types are strings.\n# Happily strings can be used throughout Numpy and so existing code will\n# break.\n#\n# Within Rasterio, to test data types, we use Numpy's dtype() factory to \n# do something like this:\n#\n# if np.dtype(destination.dtype) == np.dtype(rasterio.uint8): ...\n#\n\nbool_ = 'bool'\nubyte = uint8 = 'uint8'\nuint16 = 'uint16'\nint16 = 'int16'\nuint32 = 'uint32'\nint32 = 'int32'\nfloat32 = 'float32'\nfloat64 = 'float64'\ncomplex_ = 'complex'\ncomplex64 = 'complex64'\ncomplex128 = 'complex128'\n\n# Not supported:\n# GDT_CInt16 = 8, GDT_CInt32 = 9, GDT_CFloat32 = 10, GDT_CFloat64 = 11\n\ndtype_fwd = {\n 0: None, # GDT_Unknown\n 1: ubyte, # GDT_Byte\n 2: uint16, # GDT_UInt16\n 3: int16, # GDT_Int16\n 4: uint32, # GDT_UInt32\n 5: int32, # GDT_Int32\n 6: float32, # GDT_Float32\n 7: float64, # GDT_Float64\n 8: complex_, # GDT_CInt16\n 9: complex_, # GDT_CInt32\n 10: complex64, # GDT_CFloat32\n 11: complex128 } # GDT_CFloat64\n\ndtype_rev = dict((v, k) for k, v in dtype_fwd.items())\ndtype_rev['uint8'] = 1\n\ntypename_fwd = {\n 0: 'Unknown',\n 1: 'Byte',\n 2: 'UInt16',\n 3: 'Int16',\n 4: 'UInt32',\n 5: 'Int32',\n 6: 'Float32',\n 7: 'Float64',\n 8: 'CInt16',\n 9: 'CInt32',\n 10: 'CFloat32',\n 11: 'CFloat64' }\n\ntypename_rev = dict((v, k) for k, v in typename_fwd.items())\n\ndef _gdal_typename(dt):\n try:\n return typename_fwd[dtype_rev[dt]]\n except KeyError:\n return typename_fwd[dtype_rev[dt().dtype.name]]\n\ndef check_dtype(dt):\n if dt not in dtype_rev:\n try:\n return dt().dtype.name in dtype_rev\n except:\n return False\n return True\n\n\ndef get_minimum_int_dtype(values):\n \"\"\"\n Uses range checking to determine the minimum integer data type required\n to represent values.\n\n :param values: numpy array\n :return: named data type that can be later used to create a numpy dtype\n \"\"\"\n\n min_value = values.min()\n max_value = values.max()\n \n if min_value >= 0:\n if max_value <= 255:\n return uint8\n elif max_value <= 65535:\n return uint16\n elif max_value <= 4294967295:\n return uint32\n elif min_value >= -32768 and max_value <= 32767:\n return int16\n elif min_value >= -2147483648 and max_value <= 2147483647:\n return int32\n\n\ndef is_ndarray(array):\n import numpy\n\n return isinstance(array, numpy.ndarray) or hasattr(array, '__array__')\n", "path": "rasterio/dtypes.py"}]} | 1,546 | 124 |
gh_patches_debug_63370 | rasdani/github-patches | git_diff | mkdocs__mkdocs-130 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update requirements
While working with Markdown extensions (c.f. #74), I noticed that mkdocs' setup.py has its dependencies [pinned to specific patch versions](https://github.com/tomchristie/mkdocs/blob/master/setup.py#L18):
```
install_requires = [
'Jinja2==2.7.1',
'Markdown==2.3.1',
'PyYAML==3.10',
'watchdog==0.7.0',
'ghp-import==0.4.1'
]
```
Since these dependencies are slightly out of date (e.g., [Jinja2 is at 2.7.3](https://pypi.python.org/pypi/Jinja2) and [Markdown is at 2.4.1](https://pypi.python.org/pypi/Markdown)), it's hard to use mkdocs on a system with other software. Perhaps it's a shame that Python doesn't have npm-like dependency management, but that's the way it is—you'll get a setuptools when trying to run mkdocs error if any other package upgrades Jinja to a bugfix release.
How would the developers feel about loosening these version requirements? An idiomatic approach is to [just use `>=`](https://github.com/mitsuhiko/flask/blob/master/setup.py#L99).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 from __future__ import print_function
5 from setuptools import setup
6 import re
7 import os
8 import sys
9
10
11 name = 'mkdocs'
12 package = 'mkdocs'
13 description = 'In progress.'
14 url = 'http://www.mkdocs.org'
15 author = 'Tom Christie'
16 author_email = '[email protected]'
17 license = 'BSD'
18 install_requires = [
19 'Jinja2==2.7.1',
20 'Markdown==2.3.1',
21 'PyYAML==3.10',
22 'watchdog==0.7.0',
23 'ghp-import==0.4.1'
24 ]
25
26 long_description = """Work in progress."""
27
28
29 def get_version(package):
30 """
31 Return package version as listed in `__version__` in `init.py`.
32 """
33 init_py = open(os.path.join(package, '__init__.py')).read()
34 return re.search("^__version__ = ['\"]([^'\"]+)['\"]", init_py, re.MULTILINE).group(1)
35
36
37 def get_packages(package):
38 """
39 Return root package and all sub-packages.
40 """
41 return [dirpath
42 for dirpath, dirnames, filenames in os.walk(package)
43 if os.path.exists(os.path.join(dirpath, '__init__.py'))]
44
45
46 def get_package_data(package):
47 """
48 Return all files under the root package, that are not in a
49 package themselves.
50 """
51 walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
52 for dirpath, dirnames, filenames in os.walk(package)
53 if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
54
55 filepaths = []
56 for base, filenames in walk:
57 filepaths.extend([os.path.join(base, filename)
58 for filename in filenames])
59 return {package: filepaths}
60
61
62 if sys.argv[-1] == 'publish':
63 os.system("python setup.py sdist upload")
64 args = {'version': get_version(package)}
65 print("You probably want to also tag the version now:")
66 print(" git tag -a %(version)s -m 'version %(version)s'" % args)
67 print(" git push --tags")
68 sys.exit()
69
70
71 setup(
72 name=name,
73 version=get_version(package),
74 url=url,
75 license=license,
76 description=description,
77 long_description=long_description,
78 author=author,
79 author_email=author_email,
80 packages=get_packages(package),
81 package_data=get_package_data(package),
82 install_requires=install_requires,
83 entry_points={
84 'console_scripts': [
85 'mkdocs = mkdocs.main:run_main',
86 ],
87 },
88 classifiers=[
89 'Development Status :: 5 - Production/Stable',
90 'Environment :: Console',
91 'Environment :: Web Environment',
92 'Intended Audience :: Developers',
93 'License :: OSI Approved :: BSD License',
94 'Operating System :: OS Independent',
95 'Programming Language :: Python',
96 'Programming Language :: Python :: 2',
97 'Programming Language :: Python :: 2.6',
98 'Programming Language :: Python :: 2.7',
99 'Programming Language :: Python :: 3',
100 'Programming Language :: Python :: 3.3',
101 'Programming Language :: Python :: 3.4',
102 'Topic :: Documentation',
103 'Topic :: Text Processing',
104 ]
105 )
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -16,11 +16,11 @@
author_email = '[email protected]'
license = 'BSD'
install_requires = [
- 'Jinja2==2.7.1',
- 'Markdown==2.3.1',
- 'PyYAML==3.10',
- 'watchdog==0.7.0',
- 'ghp-import==0.4.1'
+ 'Jinja2>=2.7.1',
+ 'Markdown>=2.3.1,<2.5',
+ 'PyYAML>=3.10',
+ 'watchdog>=0.7.0',
+ 'ghp-import>=0.4.1'
]
long_description = """Work in progress."""
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -16,11 +16,11 @@\n author_email = '[email protected]'\n license = 'BSD'\n install_requires = [\n- 'Jinja2==2.7.1',\n- 'Markdown==2.3.1',\n- 'PyYAML==3.10',\n- 'watchdog==0.7.0',\n- 'ghp-import==0.4.1'\n+ 'Jinja2>=2.7.1',\n+ 'Markdown>=2.3.1,<2.5',\n+ 'PyYAML>=3.10',\n+ 'watchdog>=0.7.0',\n+ 'ghp-import>=0.4.1'\n ]\n \n long_description = \"\"\"Work in progress.\"\"\"\n", "issue": "Update requirements\nWhile working with Markdown extensions (c.f. #74), I noticed that mkdocs' setup.py has its dependencies [pinned to specific patch versions](https://github.com/tomchristie/mkdocs/blob/master/setup.py#L18):\n\n```\ninstall_requires = [\n 'Jinja2==2.7.1',\n 'Markdown==2.3.1',\n 'PyYAML==3.10',\n 'watchdog==0.7.0',\n 'ghp-import==0.4.1'\n]\n```\n\nSince these dependencies are slightly out of date (e.g., [Jinja2 is at 2.7.3](https://pypi.python.org/pypi/Jinja2) and [Markdown is at 2.4.1](https://pypi.python.org/pypi/Markdown)), it's hard to use mkdocs on a system with other software. Perhaps it's a shame that Python doesn't have npm-like dependency management, but that's the way it is\u2014you'll get a setuptools when trying to run mkdocs error if any other package upgrades Jinja to a bugfix release.\n\nHow would the developers feel about loosening these version requirements? An idiomatic approach is to [just use `>=`](https://github.com/mitsuhiko/flask/blob/master/setup.py#L99).\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\nfrom setuptools import setup\nimport re\nimport os\nimport sys\n\n\nname = 'mkdocs'\npackage = 'mkdocs'\ndescription = 'In progress.'\nurl = 'http://www.mkdocs.org'\nauthor = 'Tom Christie'\nauthor_email = '[email protected]'\nlicense = 'BSD'\ninstall_requires = [\n 'Jinja2==2.7.1',\n 'Markdown==2.3.1',\n 'PyYAML==3.10',\n 'watchdog==0.7.0',\n 'ghp-import==0.4.1'\n]\n\nlong_description = \"\"\"Work in progress.\"\"\"\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n init_py = open(os.path.join(package, '__init__.py')).read()\n return re.search(\"^__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py, re.MULTILINE).group(1)\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n\ndef get_package_data(package):\n \"\"\"\n Return all files under the root package, that are not in a\n package themselves.\n \"\"\"\n walk = [(dirpath.replace(package + os.sep, '', 1), filenames)\n for dirpath, dirnames, filenames in os.walk(package)\n if not os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n filepaths = []\n for base, filenames in walk:\n filepaths.extend([os.path.join(base, filename)\n for filename in filenames])\n return {package: filepaths}\n\n\nif sys.argv[-1] == 'publish':\n os.system(\"python setup.py sdist upload\")\n args = {'version': get_version(package)}\n print(\"You probably want to also tag the version now:\")\n print(\" git tag -a %(version)s -m 'version %(version)s'\" % args)\n print(\" git push --tags\")\n sys.exit()\n\n\nsetup(\n name=name,\n version=get_version(package),\n url=url,\n license=license,\n description=description,\n long_description=long_description,\n author=author,\n author_email=author_email,\n packages=get_packages(package),\n package_data=get_package_data(package),\n install_requires=install_requires,\n entry_points={\n 'console_scripts': [\n 'mkdocs = mkdocs.main:run_main',\n ],\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Documentation',\n 'Topic :: Text Processing',\n ]\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\nfrom setuptools import setup\nimport re\nimport os\nimport sys\n\n\nname = 'mkdocs'\npackage = 'mkdocs'\ndescription = 'In progress.'\nurl = 'http://www.mkdocs.org'\nauthor = 'Tom Christie'\nauthor_email = '[email protected]'\nlicense = 'BSD'\ninstall_requires = [\n 'Jinja2>=2.7.1',\n 'Markdown>=2.3.1,<2.5',\n 'PyYAML>=3.10',\n 'watchdog>=0.7.0',\n 'ghp-import>=0.4.1'\n]\n\nlong_description = \"\"\"Work in progress.\"\"\"\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n init_py = open(os.path.join(package, '__init__.py')).read()\n return re.search(\"^__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py, re.MULTILINE).group(1)\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n\ndef get_package_data(package):\n \"\"\"\n Return all files under the root package, that are not in a\n package themselves.\n \"\"\"\n walk = [(dirpath.replace(package + os.sep, '', 1), filenames)\n for dirpath, dirnames, filenames in os.walk(package)\n if not os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n filepaths = []\n for base, filenames in walk:\n filepaths.extend([os.path.join(base, filename)\n for filename in filenames])\n return {package: filepaths}\n\n\nif sys.argv[-1] == 'publish':\n os.system(\"python setup.py sdist upload\")\n args = {'version': get_version(package)}\n print(\"You probably want to also tag the version now:\")\n print(\" git tag -a %(version)s -m 'version %(version)s'\" % args)\n print(\" git push --tags\")\n sys.exit()\n\n\nsetup(\n name=name,\n version=get_version(package),\n url=url,\n license=license,\n description=description,\n long_description=long_description,\n author=author,\n author_email=author_email,\n packages=get_packages(package),\n package_data=get_package_data(package),\n install_requires=install_requires,\n entry_points={\n 'console_scripts': [\n 'mkdocs = mkdocs.main:run_main',\n ],\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Documentation',\n 'Topic :: Text Processing',\n ]\n)\n", "path": "setup.py"}]} | 1,482 | 190 |
gh_patches_debug_49 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-1712 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CI/CD: Verify .pre-commit-config.yaml use latest hooks versions
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 """cookiecutter distutils configuration."""
3 from setuptools import setup
4
5 version = "2.1.2.dev0"
6
7 with open('README.md', encoding='utf-8') as readme_file:
8 readme = readme_file.read()
9
10 requirements = [
11 'binaryornot>=0.4.4',
12 'Jinja2>=2.7,<4.0.0',
13 'click>=7.0,<9.0.0',
14 'pyyaml>=5.3.1',
15 'jinja2-time>=0.2.0',
16 'python-slugify>=4.0.0',
17 'requests>=2.23.0',
18 ]
19
20 setup(
21 name='cookiecutter',
22 version=version,
23 description=(
24 'A command-line utility that creates projects from project '
25 'templates, e.g. creating a Python package project from a '
26 'Python package project template.'
27 ),
28 long_description=readme,
29 long_description_content_type='text/markdown',
30 author='Audrey Feldroy',
31 author_email='[email protected]',
32 url='https://github.com/cookiecutter/cookiecutter',
33 packages=['cookiecutter'],
34 package_dir={'cookiecutter': 'cookiecutter'},
35 entry_points={'console_scripts': ['cookiecutter = cookiecutter.__main__:main']},
36 include_package_data=True,
37 python_requires='>=3.7',
38 install_requires=requirements,
39 license='BSD',
40 zip_safe=False,
41 classifiers=[
42 "Development Status :: 5 - Production/Stable",
43 "Environment :: Console",
44 "Intended Audience :: Developers",
45 "Natural Language :: English",
46 "License :: OSI Approved :: BSD License",
47 "Programming Language :: Python :: 3 :: Only",
48 "Programming Language :: Python :: 3",
49 "Programming Language :: Python :: 3.7",
50 "Programming Language :: Python :: 3.8",
51 "Programming Language :: Python :: 3.9",
52 "Programming Language :: Python :: 3.10",
53 "Programming Language :: Python :: Implementation :: CPython",
54 "Programming Language :: Python :: Implementation :: PyPy",
55 "Programming Language :: Python",
56 "Topic :: Software Development",
57 ],
58 keywords=[
59 "cookiecutter",
60 "Python",
61 "projects",
62 "project templates",
63 "Jinja2",
64 "skeleton",
65 "scaffolding",
66 "project directory",
67 "package",
68 "packaging",
69 ],
70 )
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
"""cookiecutter distutils configuration."""
from setuptools import setup
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,4 +1,3 @@\n-#!/usr/bin/env python\n \"\"\"cookiecutter distutils configuration.\"\"\"\n from setuptools import setup\n", "issue": "CI/CD: Verify .pre-commit-config.yaml use latest hooks versions\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"cookiecutter distutils configuration.\"\"\"\nfrom setuptools import setup\n\nversion = \"2.1.2.dev0\"\n\nwith open('README.md', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\nrequirements = [\n 'binaryornot>=0.4.4',\n 'Jinja2>=2.7,<4.0.0',\n 'click>=7.0,<9.0.0',\n 'pyyaml>=5.3.1',\n 'jinja2-time>=0.2.0',\n 'python-slugify>=4.0.0',\n 'requests>=2.23.0',\n]\n\nsetup(\n name='cookiecutter',\n version=version,\n description=(\n 'A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'\n ),\n long_description=readme,\n long_description_content_type='text/markdown',\n author='Audrey Feldroy',\n author_email='[email protected]',\n url='https://github.com/cookiecutter/cookiecutter',\n packages=['cookiecutter'],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={'console_scripts': ['cookiecutter = cookiecutter.__main__:main']},\n include_package_data=True,\n python_requires='>=3.7',\n install_requires=requirements,\n license='BSD',\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development\",\n ],\n keywords=[\n \"cookiecutter\",\n \"Python\",\n \"projects\",\n \"project templates\",\n \"Jinja2\",\n \"skeleton\",\n \"scaffolding\",\n \"project directory\",\n \"package\",\n \"packaging\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"cookiecutter distutils configuration.\"\"\"\nfrom setuptools import setup\n\nversion = \"2.1.2.dev0\"\n\nwith open('README.md', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\nrequirements = [\n 'binaryornot>=0.4.4',\n 'Jinja2>=2.7,<4.0.0',\n 'click>=7.0,<9.0.0',\n 'pyyaml>=5.3.1',\n 'jinja2-time>=0.2.0',\n 'python-slugify>=4.0.0',\n 'requests>=2.23.0',\n]\n\nsetup(\n name='cookiecutter',\n version=version,\n description=(\n 'A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'\n ),\n long_description=readme,\n long_description_content_type='text/markdown',\n author='Audrey Feldroy',\n author_email='[email protected]',\n url='https://github.com/cookiecutter/cookiecutter',\n packages=['cookiecutter'],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={'console_scripts': ['cookiecutter = cookiecutter.__main__:main']},\n include_package_data=True,\n python_requires='>=3.7',\n install_requires=requirements,\n license='BSD',\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development\",\n ],\n keywords=[\n \"cookiecutter\",\n \"Python\",\n \"projects\",\n \"project templates\",\n \"Jinja2\",\n \"skeleton\",\n \"scaffolding\",\n \"project directory\",\n \"package\",\n \"packaging\",\n ],\n)\n", "path": "setup.py"}]} | 943 | 50 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.